repo_name
stringclasses 6
values | pr_number
int64 512
78.9k
| pr_title
stringlengths 3
144
| pr_description
stringlengths 0
30.3k
| author
stringlengths 2
21
| date_created
unknown | date_merged
unknown | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 17
30.4k
| filepath
stringlengths 9
210
| before_content
stringlengths 0
112M
| after_content
stringlengths 0
112M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/X509CertificateEnumerator.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
namespace System.Security.Cryptography.X509Certificates
{
public partial class X509CertificateCollection : System.Collections.CollectionBase
{
public class X509CertificateEnumerator : IEnumerator
{
private readonly IEnumerator _enumerator;
public X509CertificateEnumerator(X509CertificateCollection mappings!!)
{
_enumerator = ((IEnumerable)mappings).GetEnumerator();
}
public X509Certificate Current
{
get { return (X509Certificate)_enumerator.Current!; }
}
object IEnumerator.Current
{
get { return Current; }
}
public bool MoveNext()
{
return _enumerator.MoveNext();
}
bool IEnumerator.MoveNext()
{
return MoveNext();
}
public void Reset()
{
_enumerator.Reset();
}
void IEnumerator.Reset()
{
Reset();
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
namespace System.Security.Cryptography.X509Certificates
{
public partial class X509CertificateCollection : System.Collections.CollectionBase
{
public class X509CertificateEnumerator : IEnumerator
{
private readonly IEnumerator _enumerator;
public X509CertificateEnumerator(X509CertificateCollection mappings!!)
{
_enumerator = ((IEnumerable)mappings).GetEnumerator();
}
public X509Certificate Current
{
get { return (X509Certificate)_enumerator.Current!; }
}
object IEnumerator.Current
{
get { return Current; }
}
public bool MoveNext()
{
return _enumerator.MoveNext();
}
bool IEnumerator.MoveNext()
{
return MoveNext();
}
public void Reset()
{
_enumerator.Reset();
}
void IEnumerator.Reset()
{
Reset();
}
}
}
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/tests/JIT/jit64/gc/misc/struct3.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
struct S
{
public String str;
}
class Test_struct3
{
public static void c(S s1, S s2, S s3)
{
Console.WriteLine(s1.str + s2.str + s3.str);
}
public static int Main()
{
S sM;
sM.str = "test";
c(sM, sM, sM);
return 100;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
struct S
{
public String str;
}
class Test_struct3
{
public static void c(S s1, S s2, S s3)
{
Console.WriteLine(s1.str + s2.str + s3.str);
}
public static int Main()
{
S sM;
sM.str = "test";
c(sM, sM, sM);
return 100;
}
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/tests/JIT/Directed/tailcall/mutual_recursion.fsproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<NoStandardLib>True</NoStandardLib>
<Noconfig>True</Noconfig>
<Optimize>True</Optimize>
<JitOptimizationSensitive>True</JitOptimizationSensitive>
<TargetFramework>$(NetCoreAppToolCurrent)</TargetFramework>
</PropertyGroup>
<ItemGroup>
<Compile Include="mutual_recursion.fs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<NoStandardLib>True</NoStandardLib>
<Noconfig>True</Noconfig>
<Optimize>True</Optimize>
<JitOptimizationSensitive>True</JitOptimizationSensitive>
<TargetFramework>$(NetCoreAppToolCurrent)</TargetFramework>
</PropertyGroup>
<ItemGroup>
<Compile Include="mutual_recursion.fs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/mono/mono/tests/assemblyresolve_asm.cs | using System;
using Test;
public class Asm : Test.Test
{
Test.Test t;
public Asm ()
{
t = new Test.Test ();
}
}
public class Asm2 : Test.ReturnsTestBase
{
}
| using System;
using Test;
public class Asm : Test.Test
{
Test.Test t;
public Asm ()
{
t = new Test.Test ();
}
}
public class Asm2 : Test.ReturnsTestBase
{
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/EmbeddedObjectNode.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using ILCompiler.DependencyAnalysisFramework;
using Debug = System.Diagnostics.Debug;
namespace ILCompiler.DependencyAnalysis
{
public abstract class EmbeddedObjectNode : SortableDependencyNode
{
private const int InvalidOffset = int.MinValue;
private int _offset;
private int _index;
public IHasStartSymbol ContainingNode { get; set; }
public EmbeddedObjectNode()
{
_offset = InvalidOffset;
_index = InvalidOffset;
}
public int OffsetFromBeginningOfArray
{
get
{
Debug.Assert(_offset != InvalidOffset);
return _offset;
}
}
public int IndexFromBeginningOfArray
{
get
{
Debug.Assert(_index != InvalidOffset);
return _index;
}
}
public void InitializeOffsetFromBeginningOfArray(int offset)
{
Debug.Assert(_offset == InvalidOffset || _offset == offset);
_offset = offset;
}
public void InitializeIndexFromBeginningOfArray(int index)
{
Debug.Assert(_index == InvalidOffset || _index == index);
_index = index;
}
public virtual bool IsShareable => false;
public virtual bool RepresentsIndirectionCell => false;
public override bool InterestingForDynamicDependencyAnalysis => false;
public override bool HasDynamicDependencies => false;
public override bool HasConditionalStaticDependencies => false;
public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) => null;
public override IEnumerable<CombinedDependencyListEntry> SearchDynamicDependencies(List<DependencyNodeCore<NodeFactory>> markedNodes, int firstNode, NodeFactory factory) => null;
public abstract void EncodeData(ref ObjectDataBuilder dataBuilder, NodeFactory factory, bool relocsOnly);
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using ILCompiler.DependencyAnalysisFramework;
using Debug = System.Diagnostics.Debug;
namespace ILCompiler.DependencyAnalysis
{
public abstract class EmbeddedObjectNode : SortableDependencyNode
{
private const int InvalidOffset = int.MinValue;
private int _offset;
private int _index;
public IHasStartSymbol ContainingNode { get; set; }
public EmbeddedObjectNode()
{
_offset = InvalidOffset;
_index = InvalidOffset;
}
public int OffsetFromBeginningOfArray
{
get
{
Debug.Assert(_offset != InvalidOffset);
return _offset;
}
}
public int IndexFromBeginningOfArray
{
get
{
Debug.Assert(_index != InvalidOffset);
return _index;
}
}
public void InitializeOffsetFromBeginningOfArray(int offset)
{
Debug.Assert(_offset == InvalidOffset || _offset == offset);
_offset = offset;
}
public void InitializeIndexFromBeginningOfArray(int index)
{
Debug.Assert(_index == InvalidOffset || _index == index);
_index = index;
}
public virtual bool IsShareable => false;
public virtual bool RepresentsIndirectionCell => false;
public override bool InterestingForDynamicDependencyAnalysis => false;
public override bool HasDynamicDependencies => false;
public override bool HasConditionalStaticDependencies => false;
public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory factory) => null;
public override IEnumerable<CombinedDependencyListEntry> SearchDynamicDependencies(List<DependencyNodeCore<NodeFactory>> markedNodes, int firstNode, NodeFactory factory) => null;
public abstract void EncodeData(ref ObjectDataBuilder dataBuilder, NodeFactory factory, bool relocsOnly);
}
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/tests/JIT/Regression/JitBlue/Runtime_58877/Runtime_58877.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// Generated by Fuzzlyn v1.4 on 2021-09-09 16:16:00
// Run on .NET 7.0.0-dev on X64 Windows
// Seed: 9372673482466191512
// Reduced from 87.8 KiB to 1.2 KiB in 00:02:21
// Debug: Outputs 1
// Release: Outputs 0
//
// Test that if we run out of budget during VN memory disambiguation,
// we still correctly mark the load of vr3[0] as loop variant.
//
using System.Runtime.CompilerServices;
public class Program
{
static int s_result = -1;
[MethodImpl(MethodImplOptions.NoInlining)]
static void Consume(ushort u)
{
if (u == 1)
{
s_result = 100;
}
}
public static int Main()
{
ushort[] vr3 = new ushort[]{0};
for (int vr4 = 0; vr4 < 2; vr4++)
{
vr3[0] = 1;
var vr5 = new byte[][][][]{new byte[][][]{new byte[][]{new byte[]{1}, new byte[]{1, 1}}}, new byte[][][]{new byte[][]{new byte[]{1, 1}, new byte[]{1, 1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}, new byte[]{1}}, new byte[][]{new byte[]{1}}}, new byte[][][]{new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}, new byte[]{1}}, new byte[][]{new byte[]{1}, new byte[]{1}, new byte[]{1}}, new byte[][]{new byte[]{1}}}, new byte[][][]{new byte[][]{new byte[]{1, 1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1, 1}}, new byte[][]{new byte[]{0}}, new byte[][]{new byte[]{1}, new byte[]{1, 1}}}, new byte[][][]{new byte[][]{new byte[]{0}, new byte[]{0}, new byte[]{1}}, new byte[][]{new byte[]{1}, new byte[]{1, 1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}}, new byte[][][]{new byte[][]{new byte[]{1, 1}}}};
var vr6 = vr3[0];
Consume(vr6);
}
return s_result;
}
} | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// Generated by Fuzzlyn v1.4 on 2021-09-09 16:16:00
// Run on .NET 7.0.0-dev on X64 Windows
// Seed: 9372673482466191512
// Reduced from 87.8 KiB to 1.2 KiB in 00:02:21
// Debug: Outputs 1
// Release: Outputs 0
//
// Test that if we run out of budget during VN memory disambiguation,
// we still correctly mark the load of vr3[0] as loop variant.
//
using System.Runtime.CompilerServices;
public class Program
{
static int s_result = -1;
[MethodImpl(MethodImplOptions.NoInlining)]
static void Consume(ushort u)
{
if (u == 1)
{
s_result = 100;
}
}
public static int Main()
{
ushort[] vr3 = new ushort[]{0};
for (int vr4 = 0; vr4 < 2; vr4++)
{
vr3[0] = 1;
var vr5 = new byte[][][][]{new byte[][][]{new byte[][]{new byte[]{1}, new byte[]{1, 1}}}, new byte[][][]{new byte[][]{new byte[]{1, 1}, new byte[]{1, 1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}, new byte[]{1}}, new byte[][]{new byte[]{1}}}, new byte[][][]{new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}, new byte[]{1}}, new byte[][]{new byte[]{1}, new byte[]{1}, new byte[]{1}}, new byte[][]{new byte[]{1}}}, new byte[][][]{new byte[][]{new byte[]{1, 1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1, 1}}, new byte[][]{new byte[]{0}}, new byte[][]{new byte[]{1}, new byte[]{1, 1}}}, new byte[][][]{new byte[][]{new byte[]{0}, new byte[]{0}, new byte[]{1}}, new byte[][]{new byte[]{1}, new byte[]{1, 1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}, new byte[][]{new byte[]{1}}}, new byte[][][]{new byte[][]{new byte[]{1, 1}}}};
var vr6 = vr3[0];
Consume(vr6);
}
return s_result;
}
} | -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/libraries/Common/tests/System/Xml/XPath/FuncExpressions/SetContextFunctionalTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
using System;
using System.Xml;
using System.Xml.XPath;
using XPathTests.Common;
namespace XPathTests.FunctionalTests.Expressions
{
/// <summary>
/// XPathExpression - SetContext Functional Tests
/// </summary>
public static partial class SetContextFunctionalTestsTests
{
/// <summary>
/// Select node with qname
/// //NSbook:book
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest431()
{
var xml = "name.xml";
var testExpression = @"//NSbook:book[1]";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("NSbook", "http://book.htm");
var expected = new XPathResult(0,
new XPathResultToken
{
NodeType = XPathNodeType.Element,
HasChildren = true,
HasAttributes = true,
LocalName = "book",
Name = "NSbook:book",
NamespaceURI = "http://book.htm",
HasNameTable = true,
Prefix = "NSbook",
Value = "\n\t\t\tA Brief History Of Time\n\t\t"
});
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
/// <summary>
/// Prefix is not defined, should throw an error
/// //NSbook:book
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest432()
{
var xml = "name.xml";
var testExpression = @"//NSbook:book[1]";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("NSbook", "http://book.htm");
var expected = new XPathResult(0,
new XPathResultToken
{
NodeType = XPathNodeType.Element,
HasChildren = true,
HasAttributes = true,
LocalName = "book",
Name = "NSbook:book",
NamespaceURI = "http://book.htm",
HasNameTable = true,
Prefix = "NSbook",
Value = "\n\t\t\tA Brief History Of Time\n\t\t"
});
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
/// <summary>
/// use of multiple namespaces
/// /doc/prefix1:elem/prefix2:elem
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest433()
{
var xml = "name4.xml";
var testExpression = @"/doc/prefix1:elem/prefix2:elem";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("prefix1", "http://prefix1.htm");
namespaceManager.AddNamespace("prefix2", "http://prefix2.htm");
var expected = new XPathResult(0,
new XPathResultToken
{
NodeType = XPathNodeType.Element,
IsEmptyElement = true,
LocalName = "elem",
Name = "prefix2:elem",
NamespaceURI = "http://prefix2.htm",
HasNameTable = true,
Prefix = "prefix2"
});
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
/// <summary>
/// Prefix points to a namespace that is not defined in the document, should return empty nodeset.
/// //NSbook:book
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest434()
{
var xml = "name.xml";
var testExpression = @"//NSbook:book[1]";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("NSbook", "http://notbook.htm");
var expected = new XPathResult(0);
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
/// <summary>
/// The document's default namespace is defined with a prefix in the XmlNamespaceManager, XPath should find the nodes with the default namespace in the document.
/// //foo:book[1]
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest435()
{
var xml = "name2.xml";
var testExpression = @"//foo:book[1]";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("foo", "http://default.htm");
var expected = new XPathResult(0,
new XPathResultToken
{
NodeType = XPathNodeType.Element,
HasChildren = true,
HasAttributes = true,
LocalName = "book",
Name = "book",
NamespaceURI = "http://default.htm",
HasNameTable = true,
Value = "\n\t\t\tNewton's Time Machine\n\t\t"
});
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
using System;
using System.Xml;
using System.Xml.XPath;
using XPathTests.Common;
namespace XPathTests.FunctionalTests.Expressions
{
/// <summary>
/// XPathExpression - SetContext Functional Tests
/// </summary>
public static partial class SetContextFunctionalTestsTests
{
/// <summary>
/// Select node with qname
/// //NSbook:book
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest431()
{
var xml = "name.xml";
var testExpression = @"//NSbook:book[1]";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("NSbook", "http://book.htm");
var expected = new XPathResult(0,
new XPathResultToken
{
NodeType = XPathNodeType.Element,
HasChildren = true,
HasAttributes = true,
LocalName = "book",
Name = "NSbook:book",
NamespaceURI = "http://book.htm",
HasNameTable = true,
Prefix = "NSbook",
Value = "\n\t\t\tA Brief History Of Time\n\t\t"
});
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
/// <summary>
/// Prefix is not defined, should throw an error
/// //NSbook:book
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest432()
{
var xml = "name.xml";
var testExpression = @"//NSbook:book[1]";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("NSbook", "http://book.htm");
var expected = new XPathResult(0,
new XPathResultToken
{
NodeType = XPathNodeType.Element,
HasChildren = true,
HasAttributes = true,
LocalName = "book",
Name = "NSbook:book",
NamespaceURI = "http://book.htm",
HasNameTable = true,
Prefix = "NSbook",
Value = "\n\t\t\tA Brief History Of Time\n\t\t"
});
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
/// <summary>
/// use of multiple namespaces
/// /doc/prefix1:elem/prefix2:elem
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest433()
{
var xml = "name4.xml";
var testExpression = @"/doc/prefix1:elem/prefix2:elem";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("prefix1", "http://prefix1.htm");
namespaceManager.AddNamespace("prefix2", "http://prefix2.htm");
var expected = new XPathResult(0,
new XPathResultToken
{
NodeType = XPathNodeType.Element,
IsEmptyElement = true,
LocalName = "elem",
Name = "prefix2:elem",
NamespaceURI = "http://prefix2.htm",
HasNameTable = true,
Prefix = "prefix2"
});
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
/// <summary>
/// Prefix points to a namespace that is not defined in the document, should return empty nodeset.
/// //NSbook:book
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest434()
{
var xml = "name.xml";
var testExpression = @"//NSbook:book[1]";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("NSbook", "http://notbook.htm");
var expected = new XPathResult(0);
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
/// <summary>
/// The document's default namespace is defined with a prefix in the XmlNamespaceManager, XPath should find the nodes with the default namespace in the document.
/// //foo:book[1]
/// </summary>
[Fact]
public static void SetContextFunctionalTestsTest435()
{
var xml = "name2.xml";
var testExpression = @"//foo:book[1]";
var namespaceManager = new XmlNamespaceManager(new NameTable());
namespaceManager.AddNamespace("foo", "http://default.htm");
var expected = new XPathResult(0,
new XPathResultToken
{
NodeType = XPathNodeType.Element,
HasChildren = true,
HasAttributes = true,
LocalName = "book",
Name = "book",
NamespaceURI = "http://default.htm",
HasNameTable = true,
Value = "\n\t\t\tNewton's Time Machine\n\t\t"
});
Utils.XPathNodesetTest(xml, testExpression, expected, namespaceManager: namespaceManager);
}
}
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/codegenarm.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ARM Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARM
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "emit.h"
//------------------------------------------------------------------------
// genInstrWithConstant: We will typically generate one instruction
//
// ins reg1, reg2, imm
//
// However the imm might not fit as a directly encodable immediate.
// When it doesn't fit we generate extra instruction(s) that sets up
// the 'regTmp' with the proper immediate value.
//
// mov regTmp, imm
// ins reg1, reg2, regTmp
//
// Generally, codegen constants are marked non-containable if they don't fit. This function
// is used for cases that aren't mirrored in the IR, such as in the prolog.
//
// Arguments:
// ins - instruction
// attr - operation size and GC attribute
// reg1, reg2 - first and second register operands
// imm - immediate value (third operand when it fits)
// flags - whether flags are set
// tmpReg - temp register to use when the 'imm' doesn't fit. Can be REG_NA
// if caller knows for certain the constant will fit.
//
// Return Value:
// returns true if the immediate was small enough to be encoded inside instruction. If not,
// returns false meaning the immediate was too large and tmpReg was used and modified.
//
bool CodeGen::genInstrWithConstant(
instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insFlags flags, regNumber tmpReg)
{
bool immFitsInIns = false;
// reg1 is usually a dest register
// reg2 is always source register
assert(tmpReg != reg2); // regTmp cannot match any source register
switch (ins)
{
case INS_add:
case INS_sub:
immFitsInIns = validImmForInstr(ins, (target_ssize_t)imm, flags);
break;
default:
assert(!"Unexpected instruction in genInstrWithConstant");
break;
}
if (immFitsInIns)
{
// generate a single instruction that encodes the immediate directly
GetEmitter()->emitIns_R_R_I(ins, attr, reg1, reg2, (target_ssize_t)imm);
}
else
{
// caller can specify REG_NA for tmpReg, when it "knows" that the immediate will always fit
assert(tmpReg != REG_NA);
// generate two or more instructions
// first we load the immediate into tmpReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, imm);
// generate the instruction using a three register encoding with the immediate in tmpReg
GetEmitter()->emitIns_R_R_R(ins, attr, reg1, reg2, tmpReg);
}
return immFitsInIns;
}
//------------------------------------------------------------------------
// genStackPointerAdjustment: add a specified constant value to the stack pointer.
// An available temporary register is required to be specified, in case the constant
// is too large to encode in an "add" instruction (or "sub" instruction if we choose
// to use one), such that we need to load the constant into a register first, before using it.
//
// Arguments:
// spDelta - the value to add to SP (can be negative)
// tmpReg - an available temporary register
//
// Return Value:
// returns true if the immediate was small enough to be encoded inside instruction. If not,
// returns false meaning the immediate was too large and tmpReg was used and modified.
//
bool CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg)
{
// Even though INS_add is specified here, the encoder will choose either
// an INS_add or an INS_sub and encode the immediate as a positive value
//
return genInstrWithConstant(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, spDelta, INS_FLAGS_DONT_CARE, tmpReg);
}
//------------------------------------------------------------------------
// genCallFinally: Generate a call to the finally block.
//
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
BasicBlock* bbFinallyRet = nullptr;
// We don't have retless calls, since we use the BBJ_ALWAYS to point at a NOP pad where
// we would have otherwise created retless calls.
assert(block->isBBCallAlwaysPair());
assert(block->bbNext != NULL);
assert(block->bbNext->bbJumpKind == BBJ_ALWAYS);
assert(block->bbNext->bbJumpDest != NULL);
assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
bbFinallyRet = block->bbNext->bbJumpDest;
// Load the address where the finally funclet should return into LR.
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
genMov32RelocatableDisplacement(bbFinallyRet, REG_LR);
// Jump to the finally BB
inst_JMP(EJ_jmp, block->bbJumpDest);
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
// block is RETLESS.
assert(!(block->bbFlags & BBF_RETLESS_CALL));
assert(block->isBBCallAlwaysPair());
return block->bbNext;
}
//------------------------------------------------------------------------
// genEHCatchRet:
void CodeGen::genEHCatchRet(BasicBlock* block)
{
genMov32RelocatableDisplacement(block->bbJumpDest, REG_INTRET);
}
//------------------------------------------------------------------------
// instGen_Set_Reg_To_Imm: Move an immediate value into an integer register.
//
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags))
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
if (!compiler->opts.compReloc)
{
size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
if (EA_IS_RELOC(size))
{
// TODO-CrossBitness: we wouldn't need the cast below if we had CodeGen::instGen_Set_Reg_To_Reloc_Imm.
genMov32RelocatableImmediate(size, (BYTE*)imm, reg);
}
else if (imm == 0)
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
// TODO-CrossBitness: we wouldn't need the cast below if we had CodeGen::instGen_Set_Reg_To_Reloc_Imm.
const int val32 = (int)imm;
if (validImmForMov(val32))
{
GetEmitter()->emitIns_R_I(INS_mov, size, reg, val32, flags);
}
else // We have to use a movw/movt pair of instructions
{
const int imm_lo16 = val32 & 0xffff;
const int imm_hi16 = (val32 >> 16) & 0xffff;
assert(validImmForMov(imm_lo16));
assert(imm_hi16 != 0);
GetEmitter()->emitIns_R_I(INS_movw, size, reg, imm_lo16);
// If we've got a low register, the high word is all bits set,
// and the high bit of the low word is set, we can sign extend
// halfword and save two bytes of encoding. This can happen for
// small magnitude negative numbers 'n' for -32768 <= n <= -1.
if (GetEmitter()->isLowRegister(reg) && (imm_hi16 == 0xffff) && ((imm_lo16 & 0x8000) == 0x8000))
{
GetEmitter()->emitIns_Mov(INS_sxth, EA_4BYTE, reg, reg, /* canSkip */ false);
}
else
{
GetEmitter()->emitIns_R_I(INS_movt, size, reg, imm_hi16);
}
if (flags == INS_FLAGS_SET)
GetEmitter()->emitIns_Mov(INS_mov, size, reg, reg, /* canSkip */ false, INS_FLAGS_SET);
}
}
regSet.verifyRegUsed(reg);
}
//------------------------------------------------------------------------
// genSetRegToConst: Generate code to set a register 'targetReg' of type 'targetType'
// to the constant specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'.
//
// Notes:
// This does not call genProduceReg() on the target register.
//
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree)
{
switch (tree->gtOper)
{
case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
// TODO-CQ: Currently we cannot do this for all handles because of
// https://github.com/dotnet/runtime/issues/60712
if (con->ImmedValNeedsReloc(compiler))
{
attr = EA_SET_FLG(attr, EA_CNS_RELOC_FLG);
}
if (targetType == TYP_BYREF)
{
attr = EA_SET_FLG(attr, EA_BYREF_FLG);
}
instGen_Set_Reg_To_Imm(attr, targetReg, cnsVal);
regSet.verifyRegUsed(targetReg);
}
break;
case GT_CNS_DBL:
{
GenTreeDblCon* dblConst = tree->AsDblCon();
double constValue = dblConst->AsDblCon()->gtDconVal;
// TODO-ARM-CQ: Do we have a faster/smaller way to generate 0.0 in thumb2 ISA ?
if (targetType == TYP_FLOAT)
{
// Get a temp integer register
regNumber tmpReg = tree->GetSingleTempReg();
float f = forceCastToFloat(constValue);
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg, *((int*)(&f)));
GetEmitter()->emitIns_Mov(INS_vmov_i2f, EA_4BYTE, targetReg, tmpReg, /* canSkip */ false);
}
else
{
assert(targetType == TYP_DOUBLE);
unsigned* cv = (unsigned*)&constValue;
// Get two temp integer registers
regNumber tmpReg1 = tree->ExtractTempReg();
regNumber tmpReg2 = tree->GetSingleTempReg();
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg1, cv[0]);
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg2, cv[1]);
GetEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, tmpReg1, tmpReg2);
}
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genCodeForBinary: Generate code for many binary arithmetic operators
// This method is expected to have called genConsumeOperands() before calling it.
//
// Arguments:
// treeNode - The binary operation for which we are generating code.
//
// Return Value:
// None.
//
// Notes:
// Mul and div are not handled here.
// See the assert below for the operators that are handled.
void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
assert(treeNode->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_ADD_LO, GT_ADD_HI, GT_SUB_LO, GT_SUB_HI, GT_OR, GT_XOR, GT_AND,
GT_AND_NOT));
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
instruction ins = genGetInsForOper(oper, targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
noway_assert(targetReg != REG_NA);
if ((oper == GT_ADD_LO || oper == GT_SUB_LO))
{
// During decomposition, all operands become reg
assert(!op1->isContained() && !op2->isContained());
emit->emitIns_R_R_R(ins, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(), op2->GetRegNum(),
INS_FLAGS_SET);
}
else
{
regNumber r = emit->emitInsTernary(ins, emitTypeSize(treeNode), treeNode, op1, op2);
assert(r == targetReg);
}
genProduceReg(treeNode);
}
//--------------------------------------------------------------------------------------
// genLclHeap: Generate code for localloc
//
// Description:
// There are 2 ways depending from build version to generate code for localloc:
// 1) For debug build where memory should be initialized we generate loop
// which invoke push {tmpReg} N times.
// 2) For non-debug build, we tickle the pages to ensure that SP is always
// valid and is in sync with the "stack guard page". Amount of iteration
// is N/eeGetPageSize().
//
// Comments:
// There can be some optimization:
// 1) It's not needed to generate loop for zero size allocation
// 2) For small allocation (less than 4 store) we unroll loop
// 3) For allocation less than eeGetPageSize() and when it's not needed to initialize
// memory to zero, we can just decrement SP.
//
// Notes: Size N should be aligned to STACK_ALIGN before any allocation
//
void CodeGen::genLclHeap(GenTree* tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
GenTree* size = tree->AsOp()->gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
// Result of localloc will be returned in regCnt.
// Also it used as temporary register in code generation
// for storing allocation size
regNumber regCnt = tree->GetRegNum();
var_types type = genActualType(size->gtType);
emitAttr easz = emitTypeSize(type);
BasicBlock* endLabel = nullptr;
unsigned stackAdjustment = 0;
regNumber regTmp = REG_NA;
const target_ssize_t ILLEGAL_LAST_TOUCH_DELTA = (target_ssize_t)-1;
target_ssize_t lastTouchDelta =
ILLEGAL_LAST_TOUCH_DELTA; // The number of bytes from SP to the last stack address probed.
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
// Check to 0 size allocations
// size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
assert(size->isContained());
// If amount is zero then return null in regCnt
size_t amount = size->AsIntCon()->gtIconVal;
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
goto BAILOUT;
}
}
else
{
// If 0 bail out by returning null in regCnt
genConsumeRegAndCopy(size, regCnt);
endLabel = genCreateTempLabel();
GetEmitter()->emitIns_R_R(INS_TEST, easz, regCnt, regCnt);
inst_JMP(EJ_eq, endLabel);
}
// Setup the regTmp, if there is one.
if (tree->AvailableTempRegCount() > 0)
{
regTmp = tree->ExtractTempReg();
}
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
if (compiler->lvaOutgoingArgSpaceSize > 0)
{
// This must be true for the stack to remain aligned
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0);
// We're guaranteed (by LinearScan::BuildLclHeap()) to have a legal regTmp if we need one.
genStackPointerAdjustment(compiler->lvaOutgoingArgSpaceSize, regTmp);
stackAdjustment += compiler->lvaOutgoingArgSpaceSize;
}
// Put aligned allocation size to regCnt
if (size->IsCnsIntOrI())
{
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
target_size_t amount = (target_size_t)size->AsIntCon()->gtIconVal;
amount = AlignUp(amount, STACK_ALIGN);
// For small allocations we will generate up to four push instructions (either 2 or 4, exactly,
// since STACK_ALIGN is 8, and REGSIZE_BYTES is 4).
static_assert_no_msg(STACK_ALIGN == (REGSIZE_BYTES * 2));
assert(amount % REGSIZE_BYTES == 0);
target_size_t pushCount = amount / REGSIZE_BYTES;
if (pushCount <= 4)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
while (pushCount != 0)
{
inst_IV(INS_push, (unsigned)genRegMask(regCnt));
pushCount -= 1;
}
lastTouchDelta = 0;
goto ALLOC_DONE;
}
else if (!compiler->info.compInitMem && (amount < compiler->eeGetPageSize())) // must be < not <=
{
// Since the size is less than a page, simply adjust the SP value.
// The SP might already be in the guard page, must touch it BEFORE
// the alloc, not after.
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regCnt, REG_SP, 0);
inst_RV_IV(INS_sub, REG_SP, amount, EA_PTRSIZE);
lastTouchDelta = amount;
goto ALLOC_DONE;
}
// regCnt will be the total number of bytes to locAlloc
instGen_Set_Reg_To_Imm(EA_4BYTE, regCnt, amount);
}
else
{
// Round up the number of bytes to allocate to a STACK_ALIGN boundary.
inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
// Allocation
if (compiler->info.compInitMem)
{
// At this point 'regCnt' is set to the total number of bytes to localloc.
// Since we have to zero out the allocated memory AND ensure that the stack pointer is always valid
// by tickling the pages, we will just push 0's on the stack.
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regTmp);
// Loop:
BasicBlock* loop = genCreateTempLabel();
genDefineTempLabel(loop);
noway_assert(STACK_ALIGN == 8);
inst_IV(INS_push, (unsigned)genRegMask(regTmp));
inst_IV(INS_push, (unsigned)genRegMask(regTmp));
// If not done, loop
// Note that regCnt is the number of bytes to stack allocate.
assert(genIsValidIntReg(regCnt));
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, regCnt, STACK_ALIGN, INS_FLAGS_SET);
inst_JMP(EJ_ne, loop);
lastTouchDelta = 0;
}
else
{
// At this point 'regCnt' is set to the total number of bytes to locAlloc.
//
// We don't need to zero out the allocated memory. However, we do have
// to tickle the pages to ensure that SP is always valid and is
// in sync with the "stack guard page". Note that in the worst
// case SP is on the last byte of the guard page. Thus you must
// touch SP-0 first not SP-0x1000.
//
// Another subtlety is that you don't want SP to be exactly on the
// boundary of the guard page because PUSH is predecrement, thus
// call setup would not touch the guard page but just beyond it
//
// Note that we go through a few hoops so that SP never points to
// illegal pages at any time during the tickling process
//
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
// bvc Loop // result is smaller than original SP (no wrap around)
// mov regCnt, #0 // Overflow, pick lowest possible value
//
// Loop:
// ldr regTmp, [SP + 0] // tickle the page - read from the page
// sub regTmp, SP, PAGE_SIZE // decrement SP by eeGetPageSize()
// cmp regTmp, regCnt
// jb Done
// mov SP, regTmp
// j Loop
//
// Done:
// mov SP, regCnt
//
BasicBlock* loop = genCreateTempLabel();
BasicBlock* done = genCreateTempLabel();
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
GetEmitter()->emitIns_R_R_R(INS_sub, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt, INS_FLAGS_SET);
inst_JMP(EJ_vc, loop); // branch if the V flag is not set
// Overflow, set regCnt to lowest possible value
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
genDefineTempLabel(loop);
// tickle the page - Read from the updated SP - this triggers a page fault when on the guard page
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SPBASE, 0);
// decrement SP by eeGetPageSize()
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize());
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
inst_JMP(EJ_lo, done);
// Update SP to be at the next page of stack that we will tickle
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp, /* canSkip */ false);
// Jump to loop and tickle new stack address
inst_JMP(EJ_jmp, loop);
// Done with stack tickle loop
genDefineTempLabel(done);
// Now just move the final value to SP
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt, /* canSkip */ false);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
}
ALLOC_DONE:
// Re-adjust SP to allocate outgoing arg area. We must probe this adjustment.
if (stackAdjustment != 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert((lastTouchDelta == ILLEGAL_LAST_TOUCH_DELTA) || (lastTouchDelta >= 0));
if ((lastTouchDelta == ILLEGAL_LAST_TOUCH_DELTA) ||
(stackAdjustment + (unsigned)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, regTmp);
}
else
{
genStackPointerConstantAdjustment(-(ssize_t)stackAdjustment, regTmp);
}
// Return the stackalloc'ed address in result register.
// regCnt = SP + stackAdjustment.
genInstrWithConstant(INS_add, EA_PTRSIZE, regCnt, REG_SPBASE, (ssize_t)stackAdjustment, INS_FLAGS_DONT_CARE,
regTmp);
}
else // stackAdjustment == 0
{
// Move the final value of SP to regCnt
inst_Mov(TYP_I_IMPL, regCnt, REG_SPBASE, /* canSkip */ false);
}
BAILOUT:
if (endLabel != nullptr)
genDefineTempLabel(endLabel);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genTableBasedSwitch: generate code for a switch statement based on a table of ip-relative offsets
//
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
regNumber idxReg = treeNode->AsOp()->gtOp1->GetRegNum();
regNumber baseReg = treeNode->AsOp()->gtOp2->GetRegNum();
GetEmitter()->emitIns_R_ARX(INS_ldr, EA_4BYTE, REG_PC, baseReg, idxReg, TARGET_POINTER_SIZE, 0);
}
//------------------------------------------------------------------------
// genJumpTable: emits the table and an instruction to get the address of the first element
//
void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabBase;
jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, false);
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
GetEmitter()->emitDataGenData(i, target);
}
GetEmitter()->emitDataGenEnd();
genMov32RelocatableDataLabel(jmpTabBase, treeNode->GetRegNum());
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genGetInsForOper: Return instruction encoding of the operation tree.
//
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
if (varTypeIsFloating(type))
return CodeGen::ins_MathOp(oper, type);
switch (oper)
{
case GT_ADD:
ins = INS_add;
break;
case GT_AND:
ins = INS_AND;
break;
case GT_AND_NOT:
ins = INS_bic;
break;
case GT_MUL:
ins = INS_MUL;
break;
#if !defined(USE_HELPERS_FOR_INT_DIV)
case GT_DIV:
ins = INS_sdiv;
break;
#endif // !USE_HELPERS_FOR_INT_DIV
case GT_LSH:
ins = INS_SHIFT_LEFT_LOGICAL;
break;
case GT_NEG:
ins = INS_rsb;
break;
case GT_NOT:
ins = INS_NOT;
break;
case GT_OR:
ins = INS_OR;
break;
case GT_RSH:
ins = INS_SHIFT_RIGHT_ARITHM;
break;
case GT_RSZ:
ins = INS_SHIFT_RIGHT_LOGICAL;
break;
case GT_SUB:
ins = INS_sub;
break;
case GT_XOR:
ins = INS_XOR;
break;
case GT_ROR:
ins = INS_ror;
break;
case GT_ADD_LO:
ins = INS_add;
break;
case GT_ADD_HI:
ins = INS_adc;
break;
case GT_SUB_LO:
ins = INS_sub;
break;
case GT_SUB_HI:
ins = INS_sbc;
break;
case GT_LSH_HI:
ins = INS_SHIFT_LEFT_LOGICAL;
break;
case GT_RSH_LO:
ins = INS_SHIFT_RIGHT_LOGICAL;
break;
default:
unreached();
break;
}
return ins;
}
//------------------------------------------------------------------------
// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForNegNot(GenTree* tree)
{
assert(tree->OperIs(GT_NEG, GT_NOT));
var_types targetType = tree->TypeGet();
assert(!tree->OperIs(GT_NOT) || !varTypeIsFloating(targetType));
regNumber targetReg = tree->GetRegNum();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
assert(!tree->isContained());
// The dst can only be a register.
assert(targetReg != REG_NA);
GenTree* operand = tree->gtGetOp1();
assert(!operand->isContained());
// The src must be a register.
regNumber operandReg = genConsumeReg(operand);
if (ins == INS_vneg)
{
GetEmitter()->emitIns_R_R(ins, emitTypeSize(tree), targetReg, operandReg);
}
else
{
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(tree), targetReg, operandReg, 0, INS_FLAGS_SET);
}
genProduceReg(tree);
}
// Generate code for CpObj nodes wich copy structs that have interleaved
// GC pointers.
// For this case we'll generate a sequence of loads/stores in the case of struct
// slots that don't contain GC pointers. The generated code will look like:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
//
// In the case of a GC-Pointer we'll call the ByRef write barrier helper
// who happens to use the same registers as the previous call to maintain
// the same register requirements and register killsets:
// bl CORINFO_HELP_ASSIGN_BYREF
//
// So finally an example would look like this:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
// bl CORINFO_HELP_ASSIGN_BYREF
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
// bl CORINFO_HELP_ASSIGN_BYREF
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
{
GenTree* dstAddr = cpObjNode->Addr();
GenTree* source = cpObjNode->Data();
var_types srcAddrType = TYP_BYREF;
bool sourceIsLocal = false;
regNumber dstReg = REG_NA;
regNumber srcReg = REG_NA;
assert(source->isContained());
if (source->gtOper == GT_IND)
{
GenTree* srcAddr = source->gtGetOp1();
assert(!srcAddr->isContained());
srcAddrType = srcAddr->TypeGet();
}
else
{
noway_assert(source->IsLocal());
sourceIsLocal = true;
}
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
assert(!dstAddr->isContained());
// This GenTree node has data about GC pointers, this means we're dealing
// with CpObj.
assert(cpObjNode->GetLayout()->HasGCPtr());
#endif // DEBUG
// Consume the operands and get them into the right registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumeBlockOp(cpObjNode, REG_WRITE_BARRIER_DST_BYREF, REG_WRITE_BARRIER_SRC_BYREF, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_SRC_BYREF, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_DST_BYREF, dstAddr->TypeGet());
// Temp register used to perform the sequence of loads and stores.
regNumber tmpReg = cpObjNode->ExtractTempReg();
assert(genIsValidIntReg(tmpReg));
if (cpObjNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before & after a volatile CpObj operation
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
ClassLayout* layout = cpObjNode->GetLayout();
unsigned slots = layout->GetSlotCount();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
for (unsigned i = 0; i < slots; ++i)
{
emitAttr attr = emitTypeSize(layout->GetGCPtrType(i));
emit->emitIns_R_R_I(INS_ldr, attr, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
emit->emitIns_R_R_I(INS_str, attr, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
}
}
else
{
unsigned gcPtrCount = layout->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
if (!layout->IsGCPtr(i))
{
emit->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
emit->emitIns_R_R_I(INS_str, EA_PTRSIZE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
}
else
{
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
gcPtrCount--;
}
++i;
}
assert(gcPtrCount == 0);
}
if (cpObjNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before & after a volatile CpObj operation
instGen_MemoryBarrier();
}
// Clear the gcInfo for registers of source and dest.
// While we normally update GC info prior to the last instruction that uses them,
// these actually live into the helper call.
gcInfo.gcMarkRegSetNpt(RBM_WRITE_BARRIER_SRC_BYREF | RBM_WRITE_BARRIER_DST_BYREF);
}
//------------------------------------------------------------------------
// genCodeForShiftLong: Generates the code sequence for a GenTree node that
// represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is a contained constant
//
void CodeGen::genCodeForShiftLong(GenTree* tree)
{
// Only the non-RMW case here.
genTreeOps oper = tree->OperGet();
assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
GenTree* operand = tree->AsOp()->gtOp1;
assert(operand->OperGet() == GT_LONG);
assert(operand->AsOp()->gtOp1->isUsedFromReg());
assert(operand->AsOp()->gtOp2->isUsedFromReg());
GenTree* operandLo = operand->gtGetOp1();
GenTree* operandHi = operand->gtGetOp2();
regNumber regLo = operandLo->GetRegNum();
regNumber regHi = operandHi->GetRegNum();
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(oper, targetType);
GenTree* shiftBy = tree->gtGetOp2();
assert(shiftBy->isContainedIntOrIImmed());
unsigned count = (unsigned)shiftBy->AsIntConCommon()->IconValue();
regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
inst_Mov(targetType, tree->GetRegNum(), regResult, /* canSkip */ true);
if (oper == GT_LSH_HI)
{
inst_RV_SH(ins, EA_4BYTE, tree->GetRegNum(), count);
GetEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, tree->GetRegNum(), tree->GetRegNum(), regLo, 32 - count,
INS_FLAGS_DONT_CARE, INS_OPTS_LSR);
}
else
{
assert(oper == GT_RSH_LO);
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, tree->GetRegNum(), count);
GetEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, tree->GetRegNum(), tree->GetRegNum(), regHi, 32 - count,
INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclVar: Produce code for a GT_LCL_VAR node.
//
// Arguments:
// tree - the GT_LCL_VAR node
//
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
// lcl_vars are not defs
assert((tree->gtFlags & GTF_VAR_DEF) == 0);
bool isRegCandidate = compiler->lvaGetDesc(tree)->lvIsRegCandidate();
// If this is a register candidate that has been spilled, genConsumeReg() will
// reload it at the point of use. Otherwise, if it's not in a register, we load it here.
if (!isRegCandidate && !tree->IsMultiReg() && !(tree->gtFlags & GTF_SPILLED))
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(tree);
var_types type = varDsc->GetRegisterType(tree);
GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), tree->GetRegNum(), tree->GetLclNum(), 0);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
//
// Arguments:
// tree - the GT_STORE_LCL_FLD node
//
void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
{
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
noway_assert(targetType != TYP_STRUCT);
// record the offset
unsigned offset = tree->GetLclOffs();
// We must have a stack store with GT_STORE_LCL_FLD
noway_assert(targetReg == REG_NA);
unsigned varNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
// Ensure that lclVar nodes are typed correctly.
assert(!varDsc->lvNormalizeOnStore() || targetType == genActualType(varDsc->TypeGet()));
GenTree* data = tree->gtOp1;
regNumber dataReg = REG_NA;
genConsumeReg(data);
if (data->isContained())
{
assert(data->OperIs(GT_BITCAST));
const GenTree* bitcastSrc = data->AsUnOp()->gtGetOp1();
assert(!bitcastSrc->isContained());
dataReg = bitcastSrc->GetRegNum();
}
else
{
dataReg = data->GetRegNum();
}
assert(dataReg != REG_NA);
if (tree->IsOffsetMisaligned())
{
// Arm supports unaligned access only for integer types,
// convert the storing floating data into 1 or 2 integer registers and write them as int.
regNumber addr = tree->ExtractTempReg();
emit->emitIns_R_S(INS_lea, EA_PTRSIZE, addr, varNum, offset);
if (targetType == TYP_FLOAT)
{
regNumber floatAsInt = tree->GetSingleTempReg();
emit->emitIns_Mov(INS_vmov_f2i, EA_4BYTE, floatAsInt, dataReg, /* canSkip */ false);
emit->emitIns_R_R(INS_str, EA_4BYTE, floatAsInt, addr);
}
else
{
regNumber halfdoubleAsInt1 = tree->ExtractTempReg();
regNumber halfdoubleAsInt2 = tree->GetSingleTempReg();
emit->emitIns_R_R_R(INS_vmov_d2i, EA_8BYTE, halfdoubleAsInt1, halfdoubleAsInt2, dataReg);
emit->emitIns_R_R_I(INS_str, EA_4BYTE, halfdoubleAsInt1, addr, 0);
emit->emitIns_R_R_I(INS_str, EA_4BYTE, halfdoubleAsInt1, addr, 4);
}
}
else
{
emitAttr attr = emitTypeSize(targetType);
instruction ins = ins_StoreFromSrc(dataReg, targetType);
emit->emitIns_S_R(ins, attr, dataReg, varNum, offset);
}
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
varDsc->SetRegNum(REG_STK);
}
//------------------------------------------------------------------------
// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
//
// Arguments:
// tree - the GT_STORE_LCL_VAR node
//
void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* tree)
{
GenTree* data = tree->gtOp1;
GenTree* actualData = data->gtSkipReloadOrCopy();
unsigned regCount = 1;
// var = call, where call returns a multi-reg return value
// case is handled separately.
if (actualData->IsMultiRegNode())
{
regCount = actualData->IsMultiRegLclVar() ? actualData->AsLclVar()->GetFieldCount(compiler)
: actualData->GetMultiRegCount();
if (regCount > 1)
{
genMultiRegStoreToLocal(tree);
}
}
if (regCount == 1)
{
unsigned varNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
var_types targetType = varDsc->GetRegisterType(tree);
if (targetType == TYP_LONG)
{
genStoreLongLclVar(tree);
}
else
{
genConsumeRegs(data);
regNumber dataReg = REG_NA;
if (data->isContained())
{
assert(data->OperIs(GT_BITCAST));
const GenTree* bitcastSrc = data->AsUnOp()->gtGetOp1();
assert(!bitcastSrc->isContained());
dataReg = bitcastSrc->GetRegNum();
}
else
{
dataReg = data->GetRegNum();
}
assert(dataReg != REG_NA);
regNumber targetReg = tree->GetRegNum();
if (targetReg == REG_NA) // store into stack based LclVar
{
inst_set_SV_var(tree);
instruction ins = ins_StoreFromSrc(dataReg, targetType);
emitAttr attr = emitTypeSize(targetType);
emitter* emit = GetEmitter();
emit->emitIns_S_R(ins, attr, dataReg, varNum, /* offset */ 0);
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
varDsc->SetRegNum(REG_STK);
}
else // store into register (i.e move into register)
{
// Assign into targetReg when dataReg (from op1) is not the same register
inst_Mov(targetType, targetReg, dataReg, /* canSkip */ true);
genProduceReg(tree);
}
}
}
}
//------------------------------------------------------------------------
// genCodeForDivMod: Produce code for a GT_DIV/GT_UDIV/GT_MOD/GT_UMOD node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForDivMod(GenTreeOp* tree)
{
assert(tree->OperIs(GT_DIV, GT_UDIV, GT_MOD, GT_UMOD));
// We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
// helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
// on float/double args.
noway_assert(tree->OperIs(GT_DIV) || !varTypeIsFloating(tree));
#if defined(USE_HELPERS_FOR_INT_DIV)
noway_assert(!varTypeIsIntOrI(tree));
#endif // USE_HELPERS_FOR_INT_DIV
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
genConsumeOperands(tree);
noway_assert(targetReg != REG_NA);
GenTree* dst = tree;
GenTree* src1 = tree->gtGetOp1();
GenTree* src2 = tree->gtGetOp2();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
emitAttr attr = emitTypeSize(tree);
regNumber result = REG_NA;
// dst can only be a reg
assert(!dst->isContained());
// src can be only reg
assert(!src1->isContained() || !src2->isContained());
if (varTypeIsFloating(targetType))
{
// Floating point divide never raises an exception
emit->emitIns_R_R_R(ins, attr, dst->GetRegNum(), src1->GetRegNum(), src2->GetRegNum());
}
else // an signed integer divide operation
{
// TODO-ARM-Bug: handle zero division exception.
emit->emitIns_R_R_R(ins, attr, dst->GetRegNum(), src1->GetRegNum(), src2->GetRegNum());
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCkfinite: Generate code for ckfinite opcode.
//
// Arguments:
// treeNode - The GT_CKFINITE node
//
// Return Value:
// None.
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
//
void CodeGen::genCkfinite(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
emitter* emit = GetEmitter();
var_types targetType = treeNode->TypeGet();
regNumber intReg = treeNode->GetSingleTempReg();
regNumber fpReg = genConsumeReg(treeNode->AsOp()->gtOp1);
regNumber targetReg = treeNode->GetRegNum();
// Extract and sign-extend the exponent into an integer register
if (targetType == TYP_FLOAT)
{
emit->emitIns_Mov(INS_vmov_f2i, EA_4BYTE, intReg, fpReg, /* canSkip */ false);
emit->emitIns_R_R_I_I(INS_sbfx, EA_4BYTE, intReg, intReg, 23, 8);
}
else
{
assert(targetType == TYP_DOUBLE);
emit->emitIns_Mov(INS_vmov_f2i, EA_4BYTE, intReg, REG_NEXT(fpReg), /* canSkip */ false);
emit->emitIns_R_R_I_I(INS_sbfx, EA_4BYTE, intReg, intReg, 20, 11);
}
// If exponent is all 1's, throw ArithmeticException
emit->emitIns_R_I(INS_add, EA_4BYTE, intReg, 1, INS_FLAGS_SET);
genJumpToThrowHlpBlk(EJ_eq, SCK_ARITH_EXCPN);
// If it's a finite value, copy it to targetReg
inst_Mov(targetType, targetReg, fpReg, /* canSkip */ true, emitTypeSize(treeNode));
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_CMP node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
// TODO-ARM-CQ: Check if we can use the currently set flags.
// TODO-ARM-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
assert(!varTypeIsLong(op1Type));
assert(!varTypeIsLong(op2Type));
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
genConsumeIfReg(op1);
genConsumeIfReg(op2);
if (varTypeIsFloating(op1Type))
{
assert(op1Type == op2Type);
assert(!tree->OperIs(GT_CMP));
emit->emitInsBinary(INS_vcmp, emitTypeSize(op1Type), op1, op2);
// vmrs with register 0xf has special meaning of transferring flags
emit->emitIns_R(INS_vmrs, EA_4BYTE, REG_R15);
}
else
{
assert(!varTypeIsFloating(op2Type));
var_types cmpType = (op1Type == op2Type) ? op1Type : TYP_INT;
emit->emitInsBinary(INS_cmp, emitTypeSize(cmpType), op1, op2);
}
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
inst_SETCC(GenCondition::FromRelop(tree), tree->TypeGet(), targetReg);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
// tree - the GT_RETURNTRAP node
//
void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
{
assert(tree->OperGet() == GT_RETURNTRAP);
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
GenTree* data = tree->gtOp1;
genConsumeIfReg(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
cns.SetContained();
GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
BasicBlock* skipLabel = genCreateTempLabel();
inst_JMP(EJ_eq, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN);
genDefineTempLabel(skipLabel);
}
//------------------------------------------------------------------------
// genCodeForStoreInd: Produce code for a GT_STOREIND node.
//
// Arguments:
// tree - the GT_STOREIND node
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
GenTree* data = tree->Data();
GenTree* addr = tree->Addr();
var_types type = tree->TypeGet();
assert(!varTypeIsFloating(type) || (type == data->TypeGet()));
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
// data and addr must be in registers.
// Consume both registers so that any copies of interfering
// registers are taken care of.
genConsumeOperands(tree);
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_ARG_0,
// as that is where 'addr' must go.
noway_assert(data->GetRegNum() != REG_ARG_0);
// addr goes in REG_ARG_0
inst_Mov(addr->TypeGet(), REG_ARG_0, addr->GetRegNum(), /* canSkip */ true);
// data goes in REG_ARG_1
inst_Mov(data->TypeGet(), REG_ARG_1, data->GetRegNum(), /* canSkip */ true);
genGCWriteBarrier(tree, writeBarrierForm);
}
else // A normal store, not a WriteBarrier store
{
// We must consume the operands in the proper execution order,
// so that liveness is updated appropriately.
genConsumeAddress(addr);
if (!data->isContained())
{
genConsumeRegs(data);
}
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
// issue a full memory barrier a before volatile StInd
instGen_MemoryBarrier();
}
regNumber dataReg = data->GetRegNum();
GetEmitter()->emitInsLoadStoreOp(ins_StoreFromSrc(dataReg, type), emitActualTypeSize(type), dataReg, tree);
// If store was to a variable, update variable liveness after instruction was emitted.
genUpdateLife(tree);
}
}
// genLongToIntCast: Generate code for long to int casts.
//
// Arguments:
// cast - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// The cast node and its sources (via GT_LONG) must have been assigned registers.
// The destination cannot be a floating point type or a small integer type.
//
void CodeGen::genLongToIntCast(GenTree* cast)
{
assert(cast->OperGet() == GT_CAST);
GenTree* src = cast->gtGetOp1();
noway_assert(src->OperGet() == GT_LONG);
genConsumeRegs(src);
var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
var_types dstType = cast->CastToType();
regNumber loSrcReg = src->gtGetOp1()->GetRegNum();
regNumber hiSrcReg = src->gtGetOp2()->GetRegNum();
regNumber dstReg = cast->GetRegNum();
assert((dstType == TYP_INT) || (dstType == TYP_UINT));
assert(genIsValidIntReg(loSrcReg));
assert(genIsValidIntReg(hiSrcReg));
assert(genIsValidIntReg(dstReg));
if (cast->gtOverflow())
{
//
// Generate an overflow check for [u]long to [u]int casts:
//
// long -> int - check if the upper 33 bits are all 0 or all 1
//
// ulong -> int - check if the upper 33 bits are all 0
//
// long -> uint - check if the upper 32 bits are all 0
// ulong -> uint - check if the upper 32 bits are all 0
//
if ((srcType == TYP_LONG) && (dstType == TYP_INT))
{
BasicBlock* allOne = genCreateTempLabel();
BasicBlock* success = genCreateTempLabel();
inst_RV_RV(INS_tst, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
inst_JMP(EJ_mi, allOne);
inst_RV_RV(INS_tst, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
inst_JMP(EJ_jmp, success);
genDefineTempLabel(allOne);
inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
genDefineTempLabel(success);
}
else
{
if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
{
inst_RV_RV(INS_tst, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_mi, SCK_OVERFLOW);
}
inst_RV_RV(INS_tst, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
}
}
inst_Mov(TYP_INT, dstReg, loSrcReg, /* canSkip */ true);
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int to float/double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
void CodeGen::genIntToFloatCast(GenTree* treeNode)
{
// int --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidIntReg(op1->GetRegNum())); // Must be a valid int reg.
var_types dstType = treeNode->CastToType();
var_types srcType = genActualType(op1->TypeGet());
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (treeNode->gtFlags & GTF_UNSIGNED)
{
srcType = varTypeToUnsigned(srcType);
}
// We only expect a srcType whose size is EA_4BYTE.
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
noway_assert(srcSize == EA_4BYTE);
instruction insVcvt = INS_invalid;
if (dstType == TYP_DOUBLE)
{
insVcvt = (varTypeIsUnsigned(srcType)) ? INS_vcvt_u2d : INS_vcvt_i2d;
}
else
{
assert(dstType == TYP_FLOAT);
insVcvt = (varTypeIsUnsigned(srcType)) ? INS_vcvt_u2f : INS_vcvt_i2f;
}
// All other cast are implemented by different CORINFO_HELP_XX2XX
// Look to Compiler::fgMorphCast()
genConsumeOperands(treeNode->AsOp());
assert(insVcvt != INS_invalid);
GetEmitter()->emitIns_Mov(INS_vmov_i2f, srcSize, treeNode->GetRegNum(), op1->GetRegNum(), /* canSkip */ false);
GetEmitter()->emitIns_R_R(insVcvt, srcSize, treeNode->GetRegNum(), treeNode->GetRegNum());
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genFloatToIntCast: Generate code to cast float/double to int
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
void CodeGen::genFloatToIntCast(GenTree* treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidIntReg(targetReg)); // Must be a valid int reg.
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidFloatReg(op1->GetRegNum())); // Must be a valid float reg.
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We only expect a dstType whose size is EA_4BYTE.
// For conversions to small types (byte/sbyte/int16/uint16) from float/double,
// we expect the front-end or lowering phase to have generated two levels of cast.
//
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
noway_assert(dstSize == EA_4BYTE);
instruction insVcvt = INS_invalid;
if (srcType == TYP_DOUBLE)
{
insVcvt = (varTypeIsUnsigned(dstType)) ? INS_vcvt_d2u : INS_vcvt_d2i;
}
else
{
assert(srcType == TYP_FLOAT);
insVcvt = (varTypeIsUnsigned(dstType)) ? INS_vcvt_f2u : INS_vcvt_f2i;
}
// All other cast are implemented by different CORINFO_HELP_XX2XX
// Look to Compiler::fgMorphCast()
genConsumeOperands(treeNode->AsOp());
regNumber tmpReg = treeNode->GetSingleTempReg();
assert(insVcvt != INS_invalid);
GetEmitter()->emitIns_R_R(insVcvt, dstSize, tmpReg, op1->GetRegNum());
GetEmitter()->emitIns_Mov(INS_vmov_f2i, dstSize, treeNode->GetRegNum(), tmpReg, /* canSkip */ false);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genEmitHelperCall: Emit a call to a helper function.
//
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg /*= REG_NA */)
{
// Can we call the helper function directly
void *addr = NULL, **pAddr = NULL;
#if defined(DEBUG) && defined(PROFILING_SUPPORTED)
// Don't ask VM if it hasn't requested ELT hooks
if (!compiler->compProfilerHookNeeded && compiler->opts.compJitELTHookEnabled &&
(helper == CORINFO_HELP_PROF_FCN_ENTER || helper == CORINFO_HELP_PROF_FCN_LEAVE ||
helper == CORINFO_HELP_PROF_FCN_TAILCALL))
{
addr = compiler->compProfilerMethHnd;
}
else
#endif
{
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, (void**)&pAddr);
}
if (!addr || !validImmForBL((ssize_t)addr))
{
if (callTargetReg == REG_NA)
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
}
// Load the address into a register and call through a register
if (addr)
{
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, callTargetReg, (ssize_t)addr);
}
else
{
GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, callTargetReg, (ssize_t)pAddr);
regSet.verifyRegUsed(callTargetReg);
}
GetEmitter()->emitIns_Call(emitter::EC_INDIR_R, compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) NULL, // addr
argSize, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, DebugInfo(),
callTargetReg, // ireg
REG_NA, 0, 0, // xreg, xmul, disp
false // isJump
);
}
else
{
GetEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN, compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) addr, argSize, retSize, gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, DebugInfo(), REG_NA, REG_NA, 0,
0, /* ilOffset, ireg, xreg, xmul, disp */
false /* isJump */
);
}
regSet.verifyRegistersUsed(RBM_CALLEE_TRASH);
}
#ifdef PROFILING_SUPPORTED
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed set to 'false' if 'initReg' is
// not zero after this call.
//
// Return Value:
// None
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
// On Arm arguments are prespilled on stack, which frees r0-r3.
// For generating Enter callout we would need two registers and one of them has to be r0 to pass profiler handle.
// The call target register could be any free register.
regNumber argReg = REG_PROFILER_ENTER_ARG;
regMaskTP argRegMask = genRegMask(argReg);
assert((regSet.rsMaskPreSpillRegArg & argRegMask) != 0);
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, argReg, (ssize_t)compiler->compProfilerMethHnd);
regSet.verifyRegUsed(argReg);
}
else
{
instGen_Set_Reg_To_Imm(EA_4BYTE, argReg, (ssize_t)compiler->compProfilerMethHnd);
}
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER,
0, // argSize. Again, we have to lie about it
EA_UNKNOWN); // retSize
if (initReg == argReg)
{
*pInitRegZeroed = false;
}
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
//
// Push the profilerHandle
//
// Contract between JIT and Profiler Leave callout on arm:
// Return size <= 4 bytes: REG_PROFILER_RET_SCRATCH will contain return value
// Return size > 4 and <= 8: <REG_PROFILER_RET_SCRATCH,r1> will contain return value.
// Floating point or double or HFA return values will be in s0-s15 in case of non-vararg methods.
// It is assumed that profiler Leave callback doesn't trash registers r1,REG_PROFILER_RET_SCRATCH and s0-s15.
//
// In the following cases r0 doesn't contain a return value and hence need not be preserved before emitting Leave
// callback.
bool r0InUse;
emitAttr attr = EA_UNKNOWN;
if (helper == CORINFO_HELP_PROF_FCN_TAILCALL)
{
// For the tail call case, the helper call is introduced during lower,
// so the allocator will arrange things so R0 is not in use here.
//
// For the tail jump case, all reg args have been spilled via genJmpMethod,
// so R0 is likewise not in use.
r0InUse = false;
}
else if (compiler->info.compRetType == TYP_VOID)
{
r0InUse = false;
}
else if (varTypeIsFloating(compiler->info.compRetType) ||
compiler->IsHfa(compiler->info.compMethodInfo->args.retTypeClass))
{
r0InUse = compiler->info.compIsVarArgs || compiler->opts.compUseSoftFP;
}
else
{
r0InUse = true;
}
if (r0InUse)
{
if (varTypeIsGC(compiler->info.compRetNativeType))
{
attr = emitActualTypeSize(compiler->info.compRetNativeType);
}
else if (compiler->compMethodReturnsRetBufAddr())
{
attr = EA_BYREF;
}
else
{
attr = EA_PTRSIZE;
}
}
if (r0InUse)
{
// Has a return value and r0 is in use. For emitting Leave profiler callout we would need r0 for passing
// profiler handle. Therefore, r0 is moved to REG_PROFILER_RETURN_SCRATCH as per contract.
GetEmitter()->emitIns_Mov(INS_mov, attr, REG_PROFILER_RET_SCRATCH, REG_R0, /* canSkip */ false);
genTransferRegGCState(REG_PROFILER_RET_SCRATCH, REG_R0);
regSet.verifyRegUsed(REG_PROFILER_RET_SCRATCH);
}
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, REG_R0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_R0, (ssize_t)compiler->compProfilerMethHnd);
}
gcInfo.gcMarkRegSetNpt(RBM_R0);
regSet.verifyRegUsed(REG_R0);
genEmitHelperCall(helper,
0, // argSize
EA_UNKNOWN); // retSize
// Restore state that existed before profiler callback
if (r0InUse)
{
GetEmitter()->emitIns_Mov(INS_mov, attr, REG_R0, REG_PROFILER_RET_SCRATCH, /* canSkip */ false);
genTransferRegGCState(REG_R0, REG_PROFILER_RET_SCRATCH);
gcInfo.gcMarkRegSetNpt(RBM_PROFILER_RET_SCRATCH);
}
}
#endif // PROFILING_SUPPORTED
//------------------------------------------------------------------------
// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer.
//
// Arguments:
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
//
void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
assert(arm_Valid_Imm_For_Add_SP(delta));
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
if (reportUnwindData)
{
compiler->unwindPadding();
}
}
//------------------------------------------------------------------------
// genAllocLclFrame: Probe the stack and allocate the local stack frame - subtract from SP.
//
// Notes:
// The first instruction of the prolog is always a push (which touches the lowest address
// of the stack), either of the LR register or of some argument registers, e.g., in the case of
// pre-spilling. The LR register is always pushed because we require it to allow for GC return
// address hijacking (see the comment in CodeGen::genPushCalleeSavedRegisters()). These pushes
// happen immediately before calling this function, so the SP at the current location has already
// been touched.
//
// Arguments:
// frameSize - the size of the stack frame being allocated.
// initReg - register to use as a scratch register.
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
// maskArgRegsLiveIn - incoming argument registers that are currently live.
//
// Return value:
// None
//
void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
if (frameSize == 0)
{
return;
}
const target_size_t pageSize = compiler->eeGetPageSize();
assert(!compiler->info.compPublishStubParam || (REG_SECRET_STUB_PARAM != initReg));
if (frameSize < pageSize)
{
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
}
else
{
// Generate the following code:
//
// movw r4, #frameSize
// sub r4, sp, r4
// bl CORINFO_HELP_STACK_PROBE
// mov sp, r4
//
// If frameSize can not be encoded by movw immediate this becomes:
//
// movw r4, #frameSizeLo16
// movt r4, #frameSizeHi16
// sub r4, sp, r4
// bl CORINFO_HELP_STACK_PROBE
// mov sp, r4
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, frameSize,
INS_FLAGS_DONT_CARE, REG_STACK_PROBE_HELPER_ARG);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN, REG_STACK_PROBE_HELPER_CALL_TARGET);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_CALL_TARGET);
compiler->unwindPadding();
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
if ((genRegMask(initReg) & (RBM_STACK_PROBE_HELPER_ARG | RBM_STACK_PROBE_HELPER_CALL_TARGET |
RBM_STACK_PROBE_HELPER_TRASH)) != RBM_NONE)
{
*pInitRegZeroed = false;
}
}
compiler->unwindAllocStack(frameSize);
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(frameSize);
}
#endif // USING_SCOPE_INFO
}
void CodeGen::genPushFltRegs(regMaskTP regMask)
{
assert(regMask != 0); // Don't call uness we have some registers to push
assert((regMask & RBM_ALLFLOAT) == regMask); // Only floasting point registers should be in regMask
regNumber lowReg = genRegNumFromMask(genFindLowestBit(regMask));
int slots = genCountBits(regMask);
// regMask should be contiguously set
regMaskTP tmpMask = ((regMask >> lowReg) + 1); // tmpMask should have a single bit set
assert((tmpMask & (tmpMask - 1)) == 0);
assert(lowReg == REG_F16); // Currently we expect to start at F16 in the unwind codes
// Our calling convention requires that we only use vpush for TYP_DOUBLE registers
noway_assert(floatRegCanHoldType(lowReg, TYP_DOUBLE));
noway_assert((slots % 2) == 0);
GetEmitter()->emitIns_R_I(INS_vpush, EA_8BYTE, lowReg, slots / 2);
}
void CodeGen::genPopFltRegs(regMaskTP regMask)
{
assert(regMask != 0); // Don't call uness we have some registers to pop
assert((regMask & RBM_ALLFLOAT) == regMask); // Only floasting point registers should be in regMask
regNumber lowReg = genRegNumFromMask(genFindLowestBit(regMask));
int slots = genCountBits(regMask);
// regMask should be contiguously set
regMaskTP tmpMask = ((regMask >> lowReg) + 1); // tmpMask should have a single bit set
assert((tmpMask & (tmpMask - 1)) == 0);
// Our calling convention requires that we only use vpop for TYP_DOUBLE registers
noway_assert(floatRegCanHoldType(lowReg, TYP_DOUBLE));
noway_assert((slots % 2) == 0);
GetEmitter()->emitIns_R_I(INS_vpop, EA_8BYTE, lowReg, slots / 2);
}
//------------------------------------------------------------------------
// genFreeLclFrame: free the local stack frame by adding `frameSize` to SP.
//
// Arguments:
// frameSize - the frame size to free;
// pUnwindStarted - was epilog unwind started or not.
//
// Notes:
// If epilog unwind hasn't been started, and we generate code, we start unwind
// and set* pUnwindStarted = true.
//
void CodeGen::genFreeLclFrame(unsigned frameSize, /* IN OUT */ bool* pUnwindStarted)
{
assert(compiler->compGeneratingEpilog);
if (frameSize == 0)
return;
// Add 'frameSize' to SP.
//
// Unfortunately, we can't just use:
//
// inst_RV_IV(INS_add, REG_SPBASE, frameSize, EA_PTRSIZE);
//
// because we need to generate proper unwind codes for each instruction generated,
// and large frame sizes might generate a temp register load which might
// need an unwind code. We don't want to generate a "NOP" code for this
// temp register load; we want the unwind codes to start after that.
if (arm_Valid_Imm_For_Instr(INS_add, frameSize, INS_FLAGS_DONT_CARE))
{
if (!*pUnwindStarted)
{
compiler->unwindBegEpilog();
*pUnwindStarted = true;
}
GetEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, frameSize, INS_FLAGS_DONT_CARE);
}
else
{
// R12 doesn't hold arguments or return values, so can be used as temp.
regNumber tmpReg = REG_R12;
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, frameSize);
if (*pUnwindStarted)
{
compiler->unwindPadding();
}
// We're going to generate an unwindable instruction, so check again if
// we need to start the unwind codes.
if (!*pUnwindStarted)
{
compiler->unwindBegEpilog();
*pUnwindStarted = true;
}
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, REG_SPBASE, tmpReg, INS_FLAGS_DONT_CARE);
}
compiler->unwindAllocStack(frameSize);
}
/*-----------------------------------------------------------------------------
*
* Move of relocatable displacement value to register
*/
void CodeGen::genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg)
{
GetEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block, reg);
GetEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block, reg);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
GetEmitter()->emitIns_R_R_R(INS_add, EA_4BYTE_DSP_RELOC, reg, reg, REG_PC);
}
}
/*-----------------------------------------------------------------------------
*
* Move of relocatable data-label to register
*/
void CodeGen::genMov32RelocatableDataLabel(unsigned value, regNumber reg)
{
GetEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, value, reg);
GetEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, value, reg);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
GetEmitter()->emitIns_R_R_R(INS_add, EA_HANDLE_CNS_RELOC, reg, reg, REG_PC);
}
}
/*-----------------------------------------------------------------------------
*
* Move of relocatable immediate to register
*/
void CodeGen::genMov32RelocatableImmediate(emitAttr size, BYTE* addr, regNumber reg)
{
_ASSERTE(EA_IS_RELOC(size));
GetEmitter()->emitIns_MovRelocatableImmediate(INS_movw, size, reg, addr);
GetEmitter()->emitIns_MovRelocatableImmediate(INS_movt, size, reg, addr);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
GetEmitter()->emitIns_R_R_R(INS_add, size, reg, reg, REG_PC);
}
}
/*-----------------------------------------------------------------------------
*
* Returns register mask to push/pop to allocate a small stack frame,
* instead of using "sub sp" / "add sp". Returns RBM_NONE if either frame size
* is zero, or if we should use "sub sp" / "add sp" instead of push/pop.
*/
regMaskTP CodeGen::genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat)
{
assert(compiler->compGeneratingProlog || compiler->compGeneratingEpilog);
// We can't do this optimization with callee saved floating point registers because
// the stack would be allocated in a wrong spot.
if (maskCalleeSavedFloat != RBM_NONE)
return RBM_NONE;
// Allocate space for small frames by pushing extra registers. It generates smaller and faster code
// that extra sub sp,XXX/add sp,XXX.
// R0 and R1 may be used by return value. Keep things simple and just skip the optimization
// for the 3*REGSIZE_BYTES and 4*REGSIZE_BYTES cases. They are less common and they have more
// significant negative side-effects (more memory bus traffic).
switch (frameSize)
{
case REGSIZE_BYTES:
return RBM_R3;
case 2 * REGSIZE_BYTES:
return RBM_R2 | RBM_R3;
default:
return RBM_NONE;
}
}
//-----------------------------------------------------------------------------------
// instGen_MemoryBarrier: Emit a MemoryBarrier instruction
//
// Arguments:
// barrierKind - kind of barrier to emit (ignored on arm32)
//
// Notes:
// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1
// barrierKind argument is ignored on arm32 and a full memory barrier is emitted
//
void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind)
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
{
return;
}
#endif // DEBUG
// Avoid emitting redundant memory barriers on arm32 if they belong to the same IG
// and there were no memory accesses in-between them
if ((GetEmitter()->emitLastMemBarrier != nullptr) && compiler->opts.OptimizationEnabled())
{
assert(GetEmitter()->emitLastMemBarrier->idSmallCns() == INS_BARRIER_SY);
}
else
{
// ARM has only full barriers, so all barriers need to be emitted as full.
GetEmitter()->emitIns_I(INS_dmb, EA_4BYTE, INS_BARRIER_SY);
}
}
bool CodeGen::genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
if (!jmpEpilog && regSet.rsMaskPreSpillRegs(true) == RBM_NONE)
return true;
else
return false;
}
void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
regMaskTP maskPopRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
regMaskTP maskPopRegsFloat = maskPopRegs & RBM_ALLFLOAT;
regMaskTP maskPopRegsInt = maskPopRegs & ~maskPopRegsFloat;
// First, pop float registers
if (maskPopRegsFloat != RBM_NONE)
{
genPopFltRegs(maskPopRegsFloat);
compiler->unwindPopMaskFloat(maskPopRegsFloat);
}
// Next, pop integer registers
if (!jmpEpilog)
{
regMaskTP maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize, maskPopRegsFloat);
maskPopRegsInt |= maskStackAlloc;
}
if (isFramePointerUsed())
{
assert(!regSet.rsRegsModified(RBM_FPBASE));
maskPopRegsInt |= RBM_FPBASE;
}
if (genCanUsePopToReturn(maskPopRegsInt, jmpEpilog))
{
maskPopRegsInt |= RBM_PC;
// Record the fact that we use a pop to the PC to perform the return
genUsedPopToReturn = true;
}
else
{
maskPopRegsInt |= RBM_LR;
// Record the fact that we did not use a pop to the PC to perform the return
genUsedPopToReturn = false;
}
assert(FitsIn<int>(maskPopRegsInt));
inst_IV(INS_pop, (int)maskPopRegsInt);
compiler->unwindPopMaskInt(maskPopRegsInt);
}
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
* Funclets have the following incoming arguments:
*
* catch: r0 = the exception object that was caught (see GT_CATCH_ARG)
* filter: r0 = the exception object to filter (see GT_CATCH_ARG), r1 = CallerSP of the containing function
* finally/fault: none
*
* Funclets set the following registers on exit:
*
* catch: r0 = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: r0 = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* The ARM funclet prolog sequence is:
*
* push {regs,lr} ; We push the callee-saved regs and 'lr'.
* ; TODO-ARM-CQ: We probably only need to save lr, plus any callee-save registers that we
* ; actually use in the funclet. Currently, we save the same set of callee-saved regs
* ; calculated for the entire function.
* sub sp, XXX ; Establish the rest of the frame.
* ; XXX is determined by lvaOutgoingArgSpaceSize plus space for the PSP slot, aligned
* ; up to preserve stack alignment. If we push an odd number of registers, we also
* ; generate this, to keep the stack aligned.
*
* ; Fill the PSP slot, for use by the VM (it gets reported with the GC info), or by code generation of nested
* ; filters.
* ; This is not part of the "OS prolog"; it has no associated unwind data, and is not reversed in the funclet
* ; epilog.
*
* if (this is a filter funclet)
* {
* // r1 on entry to a filter funclet is CallerSP of the containing function:
* // either the main function, or the funclet for a handler that this filter is dynamically nested within.
* // Note that a filter can be dynamically nested within a funclet even if it is not statically within
* // a funclet. Consider:
* //
* // try {
* // try {
* // throw new Exception();
* // } catch(Exception) {
* // throw new Exception(); // The exception thrown here ...
* // }
* // } filter { // ... will be processed here, while the "catch" funclet frame is
* // // still on the stack
* // } filter-handler {
* // }
* //
* // Because of this, we need a PSP in the main function anytime a filter funclet doesn't know whether the
* // enclosing frame will be a funclet or main function. We won't know any time there is a filter protecting
* // nested EH. To simplify, we just always create a main function PSP for any function with a filter.
*
* ldr r1, [r1 - PSP_slot_CallerSP_offset] ; Load the CallerSP of the main function (stored in the PSP of
* ; the dynamically containing funclet or function)
* str r1, [sp + PSP_slot_SP_offset] ; store the PSP
* sub r11, r1, Function_CallerSP_to_FP_delta ; re-establish the frame pointer
* }
* else
* {
* // This is NOT a filter funclet. The VM re-establishes the frame pointer on entry.
* // TODO-ARM-CQ: if VM set r1 to CallerSP on entry, like for filters, we could save an instruction.
*
* add r3, r11, Function_CallerSP_to_FP_delta ; compute the CallerSP, given the frame pointer. r3 is scratch.
* str r3, [sp + PSP_slot_SP_offset] ; store the PSP
* }
*
* The epilog sequence is then:
*
* add sp, XXX ; if necessary
* pop {regs,pc}
*
* If it is worth it, we could push r0, r1, r2, r3 instead of using an additional add/sub instruction.
* Code size would be smaller, but we would be writing to / reading from the stack, which might be slow.
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming |
* | arguments |
* +=======================+ <---- Caller's SP
* |Callee saved registers |
* |-----------------------|
* |Pre-spill regs space | // This is only necessary to keep the PSP slot at the same offset
* | | // in function and funclet
* |-----------------------|
* | PSP slot | // Omitted in CoreRT ABI
* |-----------------------|
* ~ possible 4 byte pad ~
* ~ for alignment ~
* |-----------------------|
* | Outgoing arg space |
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFuncletProlog()\n");
#endif
assert(block != NULL);
assert(block->bbFlags & BBF_FUNCLET_BEG);
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
regMaskTP maskPushRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = genFuncletInfo.fiSaveRegs & ~maskPushRegsFloat;
regMaskTP maskStackAlloc = genStackAllocRegisterMask(genFuncletInfo.fiSpDelta, maskPushRegsFloat);
maskPushRegsInt |= maskStackAlloc;
assert(FitsIn<int>(maskPushRegsInt));
inst_IV(INS_push, (int)maskPushRegsInt);
compiler->unwindPushMaskInt(maskPushRegsInt);
if (maskPushRegsFloat != RBM_NONE)
{
genPushFltRegs(maskPushRegsFloat);
compiler->unwindPushMaskFloat(maskPushRegsFloat);
}
bool isFilter = (block->bbCatchTyp == BBCT_FILTER);
regMaskTP maskArgRegsLiveIn;
if (isFilter)
{
maskArgRegsLiveIn = RBM_R0 | RBM_R1;
}
else if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
{
maskArgRegsLiveIn = RBM_NONE;
}
else
{
maskArgRegsLiveIn = RBM_R0;
}
regNumber initReg = REG_R3; // R3 is never live on entry to a funclet, so it can be trashed
bool initRegZeroed = false;
if (maskStackAlloc == RBM_NONE)
{
genAllocLclFrame(genFuncletInfo.fiSpDelta, initReg, &initRegZeroed, maskArgRegsLiveIn);
}
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// If there is no PSPSym (CoreRT ABI), we are done.
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
if (isFilter)
{
// This is the first block of a filter
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, REG_R1, REG_R1, genFuncletInfo.fiPSP_slot_CallerSP_offset);
regSet.verifyRegUsed(REG_R1);
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_R1, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_FPBASE, REG_R1,
genFuncletInfo.fiFunctionCallerSPtoFPdelta);
}
else
{
// This is a non-filter funclet
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE,
genFuncletInfo.fiFunctionCallerSPtoFPdelta);
regSet.verifyRegUsed(REG_R3);
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_R3, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
}
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFuncletEpilog()\n");
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Just as for the main function, we delay starting the unwind codes until we have
// an instruction which we know needs an unwind code. This is to support code like
// this:
// movw r3, 0x38e0
// add sp, r3
// pop {r4,r5,r6,r10,r11,pc}
// where the "movw" shouldn't be part of the unwind codes. See genFnEpilog() for more details.
bool unwindStarted = false;
/* The saved regs info saves the LR register. We need to pop the PC register to return */
assert(genFuncletInfo.fiSaveRegs & RBM_LR);
regMaskTP maskPopRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskPopRegsInt = genFuncletInfo.fiSaveRegs & ~maskPopRegsFloat;
regMaskTP maskStackAlloc = genStackAllocRegisterMask(genFuncletInfo.fiSpDelta, maskPopRegsFloat);
maskPopRegsInt |= maskStackAlloc;
if (maskStackAlloc == RBM_NONE)
{
genFreeLclFrame(genFuncletInfo.fiSpDelta, &unwindStarted);
}
if (!unwindStarted)
{
// We'll definitely generate an unwindable instruction next
compiler->unwindBegEpilog();
unwindStarted = true;
}
maskPopRegsInt &= ~RBM_LR;
maskPopRegsInt |= RBM_PC;
if (maskPopRegsFloat != RBM_NONE)
{
genPopFltRegs(maskPopRegsFloat);
compiler->unwindPopMaskFloat(maskPopRegsFloat);
}
assert(FitsIn<int>(maskPopRegsInt));
inst_IV(INS_pop, (int)maskPopRegsInt);
compiler->unwindPopMaskInt(maskPopRegsInt);
compiler->unwindEndEpilog();
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
* Note that all funclet prologs are identical, and all funclet epilogs are
* identical (per type: filters are identical, and non-filters are identical).
* Thus, we compute the data used for these just once.
*
* See genFuncletProlog() for more information about the prolog/epilog sequences.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (compiler->ehAnyFunclets())
{
assert(isFramePointerUsed());
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be
// finalized
// Frame pointer doesn't point at the end, it points at the pushed r11. So, instead
// of adding the number of callee-saved regs to CallerSP, we add 1 for lr and 1 for r11
// (plus the "pre spill regs"). Note that we assume r12 and r13 aren't saved
// (also assumed in genFnProlog()).
assert((regSet.rsMaskCalleeSaved & (RBM_R12 | RBM_R13)) == 0);
unsigned preSpillRegArgSize = genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
genFuncletInfo.fiFunctionCallerSPtoFPdelta = preSpillRegArgSize + 2 * REGSIZE_BYTES;
regMaskTP rsMaskSaveRegs = regSet.rsMaskCalleeSaved;
unsigned saveRegsCount = genCountBits(rsMaskSaveRegs);
unsigned saveRegsSize = saveRegsCount * REGSIZE_BYTES; // bytes of regs we're saving
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
unsigned funcletFrameSize =
preSpillRegArgSize + saveRegsSize + REGSIZE_BYTES /* PSP slot */ + compiler->lvaOutgoingArgSpaceSize;
unsigned funcletFrameSizeAligned = roundUp(funcletFrameSize, STACK_ALIGN);
unsigned funcletFrameAlignmentPad = funcletFrameSizeAligned - funcletFrameSize;
unsigned spDelta = funcletFrameSizeAligned - saveRegsSize;
unsigned PSP_slot_SP_offset = compiler->lvaOutgoingArgSpaceSize + funcletFrameAlignmentPad;
int PSP_slot_CallerSP_offset =
-(int)(funcletFrameSize - compiler->lvaOutgoingArgSpaceSize); // NOTE: it's negative!
/* Now save it for future use */
genFuncletInfo.fiSaveRegs = rsMaskSaveRegs;
genFuncletInfo.fiSpDelta = spDelta;
genFuncletInfo.fiPSP_slot_SP_offset = PSP_slot_SP_offset;
genFuncletInfo.fiPSP_slot_CallerSP_offset = PSP_slot_CallerSP_offset;
#ifdef DEBUG
if (verbose)
{
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Function CallerSP-to-FP delta: %d\n", genFuncletInfo.fiFunctionCallerSPtoFPdelta);
printf(" Save regs: ");
dspRegMask(rsMaskSaveRegs);
printf("\n");
printf(" SP delta: %d\n", genFuncletInfo.fiSpDelta);
printf(" PSP slot SP offset: %d\n", genFuncletInfo.fiPSP_slot_SP_offset);
printf(" PSP slot Caller SP offset: %d\n", genFuncletInfo.fiPSP_slot_CallerSP_offset);
if (PSP_slot_CallerSP_offset != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym))
{
printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n",
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
}
}
#endif // DEBUG
assert(PSP_slot_CallerSP_offset < 0);
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
assert(PSP_slot_CallerSP_offset ==
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main
// function and funclet!
}
}
}
void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
// We either generate:
// add r1, r11, 8
// str r1, [reg + PSPSymOffset]
// or:
// add r1, sp, 76
// str r1, [reg + PSPSymOffset]
// depending on the smallest encoding
int SPtoCallerSPdelta = -genCallerSPtoInitialSPdelta();
int callerSPOffs;
regNumber regBase;
if (arm_Valid_Imm_For_Add_SP(SPtoCallerSPdelta))
{
// use the "add <reg>, sp, imm" form
callerSPOffs = SPtoCallerSPdelta;
regBase = REG_SPBASE;
}
else
{
// use the "add <reg>, r11, imm" form
int FPtoCallerSPdelta = -genCallerSPtoFPdelta();
noway_assert(arm_Valid_Imm_For_Add(FPtoCallerSPdelta, INS_FLAGS_DONT_CARE));
callerSPOffs = FPtoCallerSPdelta;
regBase = REG_FPBASE;
}
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
regNumber regTmp = initReg;
*pInitRegZeroed = false;
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, regTmp, regBase, callerSPOffs);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
}
void CodeGen::genInsertNopForUnwinder(BasicBlock* block)
{
// If this block is the target of a finally return, we need to add a preceding NOP, in the same EH region,
// so the unwinder doesn't get confused by our "movw lr, xxx; movt lr, xxx; b Lyyy" calling convention that
// calls the funclet during non-exceptional control flow.
if (block->bbFlags & BBF_FINALLY_TARGET)
{
assert(block->bbFlags & BBF_HAS_LABEL);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nEmitting finally target NOP predecessor for " FMT_BB "\n", block->bbNum);
}
#endif
// Create a label that we'll use for computing the start of an EH region, if this block is
// at the beginning of such a region. If we used the existing bbEmitCookie as is for
// determining the EH regions, then this NOP would end up outside of the region, if this
// block starts an EH region. If we pointed the existing bbEmitCookie here, then the NOP
// would be executed, which we would prefer not to do.
block->bbUnwindNopEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(block));
instGen(INS_nop);
}
}
//-----------------------------------------------------------------------------
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
// `genUseBlockInit` is set.
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
assert(genUseBlockInit);
assert(untrLclHi > untrLclLo);
// Generate the following code:
//
// For cnt less than 10
//
// mov rZero1, 0
// mov rZero2, 0
// mov rCnt, <cnt>
// stm <rZero1,rZero2>,[rAddr!]
// <optional> stm <rZero1,rZero2>,[rAddr!]
// <optional> stm <rZero1,rZero2>,[rAddr!]
// <optional> stm <rZero1,rZero2>,[rAddr!]
// <optional> str rZero1,[rAddr]
//
// For rCnt greater than or equal to 10
//
// mov rZero1, 0
// mov rZero2, 0
// mov rCnt, <cnt/2>
// sub rAddr, sp, OFFS
//
// loop:
// stm <rZero1,rZero2>,[rAddr!]
// sub rCnt,rCnt,1
// jnz loop
//
// <optional> str rZero1,[rAddr] // When cnt is odd
regNumber rAddr;
regNumber rCnt = REG_NA; // Invalid
regMaskTP regMask;
regMaskTP availMask = regSet.rsGetModifiedRegsMask() | RBM_INT_CALLEE_TRASH; // Set of available registers
availMask &= ~intRegState.rsCalleeRegArgMaskLiveIn; // Remove all of the incoming argument registers as they are
// currently live
availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg as we will zero it and maybe use it for
// a large constant.
if (compiler->compLocallocUsed)
{
availMask &= ~RBM_SAVED_LOCALLOC_SP; // Remove the register reserved when we have a localloc frame
}
regNumber rZero1; // We're going to use initReg for rZero1
regNumber rZero2;
// We pick the next lowest register number for rZero2
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
rZero2 = genRegNumFromMask(regMask);
availMask &= ~regMask;
assert((genRegMask(rZero2) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // rZero2 is not a live incoming
// argument reg
// We pick the next lowest register number for rAddr
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
rAddr = genRegNumFromMask(regMask);
availMask &= ~regMask;
bool useLoop = false;
unsigned uCntBytes = untrLclHi - untrLclLo;
assert((uCntBytes % sizeof(int)) == 0); // The smallest stack slot is always 4 bytes.
unsigned uCntSlots = uCntBytes / REGSIZE_BYTES; // How many register sized stack slots we're going to use.
// When uCntSlots is 9 or less, we will emit a sequence of stm/stp instructions inline.
// When it is 10 or greater, we will emit a loop containing a stm/stp instruction.
// In both of these cases the stm/stp instruction will write two zeros to memory
// and we will use a single str instruction at the end whenever we have an odd count.
if (uCntSlots >= 10)
useLoop = true;
if (useLoop)
{
// We pick the next lowest register number for rCnt
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
rCnt = genRegNumFromMask(regMask);
availMask &= ~regMask;
}
// rAddr is not a live incoming argument reg
assert((genRegMask(rAddr) & intRegState.rsCalleeRegArgMaskLiveIn) == 0);
if (arm_Valid_Imm_For_Add(untrLclLo, INS_FLAGS_DONT_CARE))
{
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), untrLclLo);
}
else
{
// Load immediate into the InitReg register
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, (ssize_t)untrLclLo);
GetEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), initReg);
*pInitRegZeroed = false;
}
if (useLoop)
{
noway_assert(uCntSlots >= 2);
assert((genRegMask(rCnt) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // rCnt is not a live incoming
// argument reg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, rCnt, (ssize_t)uCntSlots / 2);
}
rZero1 = genGetZeroReg(initReg, pInitRegZeroed);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, rZero2);
target_ssize_t stmImm = (target_ssize_t)(genRegMask(rZero1) | genRegMask(rZero2));
if (!useLoop)
{
while (uCntBytes >= REGSIZE_BYTES * 2)
{
GetEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm);
uCntBytes -= REGSIZE_BYTES * 2;
}
}
else
{
GetEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm); // zero stack slots
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, rCnt, 1, INS_FLAGS_SET);
GetEmitter()->emitIns_J(INS_bhi, NULL, -3);
uCntBytes %= REGSIZE_BYTES * 2;
}
if (uCntBytes >= REGSIZE_BYTES) // check and zero the last register-sized stack slot (odd number)
{
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, rZero1, rAddr, 0);
uCntBytes -= REGSIZE_BYTES;
}
noway_assert(uCntBytes == 0);
}
#endif // TARGET_ARM
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ARM Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARM
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "emit.h"
//------------------------------------------------------------------------
// genInstrWithConstant: We will typically generate one instruction
//
// ins reg1, reg2, imm
//
// However the imm might not fit as a directly encodable immediate.
// When it doesn't fit we generate extra instruction(s) that sets up
// the 'regTmp' with the proper immediate value.
//
// mov regTmp, imm
// ins reg1, reg2, regTmp
//
// Generally, codegen constants are marked non-containable if they don't fit. This function
// is used for cases that aren't mirrored in the IR, such as in the prolog.
//
// Arguments:
// ins - instruction
// attr - operation size and GC attribute
// reg1, reg2 - first and second register operands
// imm - immediate value (third operand when it fits)
// flags - whether flags are set
// tmpReg - temp register to use when the 'imm' doesn't fit. Can be REG_NA
// if caller knows for certain the constant will fit.
//
// Return Value:
// returns true if the immediate was small enough to be encoded inside instruction. If not,
// returns false meaning the immediate was too large and tmpReg was used and modified.
//
bool CodeGen::genInstrWithConstant(
instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insFlags flags, regNumber tmpReg)
{
bool immFitsInIns = false;
// reg1 is usually a dest register
// reg2 is always source register
assert(tmpReg != reg2); // regTmp cannot match any source register
switch (ins)
{
case INS_add:
case INS_sub:
immFitsInIns = validImmForInstr(ins, (target_ssize_t)imm, flags);
break;
default:
assert(!"Unexpected instruction in genInstrWithConstant");
break;
}
if (immFitsInIns)
{
// generate a single instruction that encodes the immediate directly
GetEmitter()->emitIns_R_R_I(ins, attr, reg1, reg2, (target_ssize_t)imm);
}
else
{
// caller can specify REG_NA for tmpReg, when it "knows" that the immediate will always fit
assert(tmpReg != REG_NA);
// generate two or more instructions
// first we load the immediate into tmpReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, imm);
// generate the instruction using a three register encoding with the immediate in tmpReg
GetEmitter()->emitIns_R_R_R(ins, attr, reg1, reg2, tmpReg);
}
return immFitsInIns;
}
//------------------------------------------------------------------------
// genStackPointerAdjustment: add a specified constant value to the stack pointer.
// An available temporary register is required to be specified, in case the constant
// is too large to encode in an "add" instruction (or "sub" instruction if we choose
// to use one), such that we need to load the constant into a register first, before using it.
//
// Arguments:
// spDelta - the value to add to SP (can be negative)
// tmpReg - an available temporary register
//
// Return Value:
// returns true if the immediate was small enough to be encoded inside instruction. If not,
// returns false meaning the immediate was too large and tmpReg was used and modified.
//
bool CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg)
{
// Even though INS_add is specified here, the encoder will choose either
// an INS_add or an INS_sub and encode the immediate as a positive value
//
return genInstrWithConstant(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, spDelta, INS_FLAGS_DONT_CARE, tmpReg);
}
//------------------------------------------------------------------------
// genCallFinally: Generate a call to the finally block.
//
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
BasicBlock* bbFinallyRet = nullptr;
// We don't have retless calls, since we use the BBJ_ALWAYS to point at a NOP pad where
// we would have otherwise created retless calls.
assert(block->isBBCallAlwaysPair());
assert(block->bbNext != NULL);
assert(block->bbNext->bbJumpKind == BBJ_ALWAYS);
assert(block->bbNext->bbJumpDest != NULL);
assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
bbFinallyRet = block->bbNext->bbJumpDest;
// Load the address where the finally funclet should return into LR.
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
genMov32RelocatableDisplacement(bbFinallyRet, REG_LR);
// Jump to the finally BB
inst_JMP(EJ_jmp, block->bbJumpDest);
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
// block is RETLESS.
assert(!(block->bbFlags & BBF_RETLESS_CALL));
assert(block->isBBCallAlwaysPair());
return block->bbNext;
}
//------------------------------------------------------------------------
// genEHCatchRet:
void CodeGen::genEHCatchRet(BasicBlock* block)
{
genMov32RelocatableDisplacement(block->bbJumpDest, REG_INTRET);
}
//------------------------------------------------------------------------
// instGen_Set_Reg_To_Imm: Move an immediate value into an integer register.
//
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags))
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
if (!compiler->opts.compReloc)
{
size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
if (EA_IS_RELOC(size))
{
// TODO-CrossBitness: we wouldn't need the cast below if we had CodeGen::instGen_Set_Reg_To_Reloc_Imm.
genMov32RelocatableImmediate(size, (BYTE*)imm, reg);
}
else if (imm == 0)
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
// TODO-CrossBitness: we wouldn't need the cast below if we had CodeGen::instGen_Set_Reg_To_Reloc_Imm.
const int val32 = (int)imm;
if (validImmForMov(val32))
{
GetEmitter()->emitIns_R_I(INS_mov, size, reg, val32, flags);
}
else // We have to use a movw/movt pair of instructions
{
const int imm_lo16 = val32 & 0xffff;
const int imm_hi16 = (val32 >> 16) & 0xffff;
assert(validImmForMov(imm_lo16));
assert(imm_hi16 != 0);
GetEmitter()->emitIns_R_I(INS_movw, size, reg, imm_lo16);
// If we've got a low register, the high word is all bits set,
// and the high bit of the low word is set, we can sign extend
// halfword and save two bytes of encoding. This can happen for
// small magnitude negative numbers 'n' for -32768 <= n <= -1.
if (GetEmitter()->isLowRegister(reg) && (imm_hi16 == 0xffff) && ((imm_lo16 & 0x8000) == 0x8000))
{
GetEmitter()->emitIns_Mov(INS_sxth, EA_4BYTE, reg, reg, /* canSkip */ false);
}
else
{
GetEmitter()->emitIns_R_I(INS_movt, size, reg, imm_hi16);
}
if (flags == INS_FLAGS_SET)
GetEmitter()->emitIns_Mov(INS_mov, size, reg, reg, /* canSkip */ false, INS_FLAGS_SET);
}
}
regSet.verifyRegUsed(reg);
}
//------------------------------------------------------------------------
// genSetRegToConst: Generate code to set a register 'targetReg' of type 'targetType'
// to the constant specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'.
//
// Notes:
// This does not call genProduceReg() on the target register.
//
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree)
{
switch (tree->gtOper)
{
case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
// TODO-CQ: Currently we cannot do this for all handles because of
// https://github.com/dotnet/runtime/issues/60712
if (con->ImmedValNeedsReloc(compiler))
{
attr = EA_SET_FLG(attr, EA_CNS_RELOC_FLG);
}
if (targetType == TYP_BYREF)
{
attr = EA_SET_FLG(attr, EA_BYREF_FLG);
}
instGen_Set_Reg_To_Imm(attr, targetReg, cnsVal);
regSet.verifyRegUsed(targetReg);
}
break;
case GT_CNS_DBL:
{
GenTreeDblCon* dblConst = tree->AsDblCon();
double constValue = dblConst->AsDblCon()->gtDconVal;
// TODO-ARM-CQ: Do we have a faster/smaller way to generate 0.0 in thumb2 ISA ?
if (targetType == TYP_FLOAT)
{
// Get a temp integer register
regNumber tmpReg = tree->GetSingleTempReg();
float f = forceCastToFloat(constValue);
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg, *((int*)(&f)));
GetEmitter()->emitIns_Mov(INS_vmov_i2f, EA_4BYTE, targetReg, tmpReg, /* canSkip */ false);
}
else
{
assert(targetType == TYP_DOUBLE);
unsigned* cv = (unsigned*)&constValue;
// Get two temp integer registers
regNumber tmpReg1 = tree->ExtractTempReg();
regNumber tmpReg2 = tree->GetSingleTempReg();
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg1, cv[0]);
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg2, cv[1]);
GetEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, tmpReg1, tmpReg2);
}
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genCodeForBinary: Generate code for many binary arithmetic operators
// This method is expected to have called genConsumeOperands() before calling it.
//
// Arguments:
// treeNode - The binary operation for which we are generating code.
//
// Return Value:
// None.
//
// Notes:
// Mul and div are not handled here.
// See the assert below for the operators that are handled.
void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
assert(treeNode->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_ADD_LO, GT_ADD_HI, GT_SUB_LO, GT_SUB_HI, GT_OR, GT_XOR, GT_AND,
GT_AND_NOT));
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
instruction ins = genGetInsForOper(oper, targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
noway_assert(targetReg != REG_NA);
if ((oper == GT_ADD_LO || oper == GT_SUB_LO))
{
// During decomposition, all operands become reg
assert(!op1->isContained() && !op2->isContained());
emit->emitIns_R_R_R(ins, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(), op2->GetRegNum(),
INS_FLAGS_SET);
}
else
{
regNumber r = emit->emitInsTernary(ins, emitTypeSize(treeNode), treeNode, op1, op2);
assert(r == targetReg);
}
genProduceReg(treeNode);
}
//--------------------------------------------------------------------------------------
// genLclHeap: Generate code for localloc
//
// Description:
// There are 2 ways depending from build version to generate code for localloc:
// 1) For debug build where memory should be initialized we generate loop
// which invoke push {tmpReg} N times.
// 2) For non-debug build, we tickle the pages to ensure that SP is always
// valid and is in sync with the "stack guard page". Amount of iteration
// is N/eeGetPageSize().
//
// Comments:
// There can be some optimization:
// 1) It's not needed to generate loop for zero size allocation
// 2) For small allocation (less than 4 store) we unroll loop
// 3) For allocation less than eeGetPageSize() and when it's not needed to initialize
// memory to zero, we can just decrement SP.
//
// Notes: Size N should be aligned to STACK_ALIGN before any allocation
//
void CodeGen::genLclHeap(GenTree* tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
GenTree* size = tree->AsOp()->gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
// Result of localloc will be returned in regCnt.
// Also it used as temporary register in code generation
// for storing allocation size
regNumber regCnt = tree->GetRegNum();
var_types type = genActualType(size->gtType);
emitAttr easz = emitTypeSize(type);
BasicBlock* endLabel = nullptr;
unsigned stackAdjustment = 0;
regNumber regTmp = REG_NA;
const target_ssize_t ILLEGAL_LAST_TOUCH_DELTA = (target_ssize_t)-1;
target_ssize_t lastTouchDelta =
ILLEGAL_LAST_TOUCH_DELTA; // The number of bytes from SP to the last stack address probed.
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
// Check to 0 size allocations
// size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
assert(size->isContained());
// If amount is zero then return null in regCnt
size_t amount = size->AsIntCon()->gtIconVal;
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
goto BAILOUT;
}
}
else
{
// If 0 bail out by returning null in regCnt
genConsumeRegAndCopy(size, regCnt);
endLabel = genCreateTempLabel();
GetEmitter()->emitIns_R_R(INS_TEST, easz, regCnt, regCnt);
inst_JMP(EJ_eq, endLabel);
}
// Setup the regTmp, if there is one.
if (tree->AvailableTempRegCount() > 0)
{
regTmp = tree->ExtractTempReg();
}
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
if (compiler->lvaOutgoingArgSpaceSize > 0)
{
// This must be true for the stack to remain aligned
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0);
// We're guaranteed (by LinearScan::BuildLclHeap()) to have a legal regTmp if we need one.
genStackPointerAdjustment(compiler->lvaOutgoingArgSpaceSize, regTmp);
stackAdjustment += compiler->lvaOutgoingArgSpaceSize;
}
// Put aligned allocation size to regCnt
if (size->IsCnsIntOrI())
{
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
target_size_t amount = (target_size_t)size->AsIntCon()->gtIconVal;
amount = AlignUp(amount, STACK_ALIGN);
// For small allocations we will generate up to four push instructions (either 2 or 4, exactly,
// since STACK_ALIGN is 8, and REGSIZE_BYTES is 4).
static_assert_no_msg(STACK_ALIGN == (REGSIZE_BYTES * 2));
assert(amount % REGSIZE_BYTES == 0);
target_size_t pushCount = amount / REGSIZE_BYTES;
if (pushCount <= 4)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
while (pushCount != 0)
{
inst_IV(INS_push, (unsigned)genRegMask(regCnt));
pushCount -= 1;
}
lastTouchDelta = 0;
goto ALLOC_DONE;
}
else if (!compiler->info.compInitMem && (amount < compiler->eeGetPageSize())) // must be < not <=
{
// Since the size is less than a page, simply adjust the SP value.
// The SP might already be in the guard page, must touch it BEFORE
// the alloc, not after.
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regCnt, REG_SP, 0);
inst_RV_IV(INS_sub, REG_SP, amount, EA_PTRSIZE);
lastTouchDelta = amount;
goto ALLOC_DONE;
}
// regCnt will be the total number of bytes to locAlloc
instGen_Set_Reg_To_Imm(EA_4BYTE, regCnt, amount);
}
else
{
// Round up the number of bytes to allocate to a STACK_ALIGN boundary.
inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
// Allocation
if (compiler->info.compInitMem)
{
// At this point 'regCnt' is set to the total number of bytes to localloc.
// Since we have to zero out the allocated memory AND ensure that the stack pointer is always valid
// by tickling the pages, we will just push 0's on the stack.
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regTmp);
// Loop:
BasicBlock* loop = genCreateTempLabel();
genDefineTempLabel(loop);
noway_assert(STACK_ALIGN == 8);
inst_IV(INS_push, (unsigned)genRegMask(regTmp));
inst_IV(INS_push, (unsigned)genRegMask(regTmp));
// If not done, loop
// Note that regCnt is the number of bytes to stack allocate.
assert(genIsValidIntReg(regCnt));
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, regCnt, STACK_ALIGN, INS_FLAGS_SET);
inst_JMP(EJ_ne, loop);
lastTouchDelta = 0;
}
else
{
// At this point 'regCnt' is set to the total number of bytes to locAlloc.
//
// We don't need to zero out the allocated memory. However, we do have
// to tickle the pages to ensure that SP is always valid and is
// in sync with the "stack guard page". Note that in the worst
// case SP is on the last byte of the guard page. Thus you must
// touch SP-0 first not SP-0x1000.
//
// Another subtlety is that you don't want SP to be exactly on the
// boundary of the guard page because PUSH is predecrement, thus
// call setup would not touch the guard page but just beyond it
//
// Note that we go through a few hoops so that SP never points to
// illegal pages at any time during the tickling process
//
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
// bvc Loop // result is smaller than original SP (no wrap around)
// mov regCnt, #0 // Overflow, pick lowest possible value
//
// Loop:
// ldr regTmp, [SP + 0] // tickle the page - read from the page
// sub regTmp, SP, PAGE_SIZE // decrement SP by eeGetPageSize()
// cmp regTmp, regCnt
// jb Done
// mov SP, regTmp
// j Loop
//
// Done:
// mov SP, regCnt
//
BasicBlock* loop = genCreateTempLabel();
BasicBlock* done = genCreateTempLabel();
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
GetEmitter()->emitIns_R_R_R(INS_sub, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt, INS_FLAGS_SET);
inst_JMP(EJ_vc, loop); // branch if the V flag is not set
// Overflow, set regCnt to lowest possible value
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
genDefineTempLabel(loop);
// tickle the page - Read from the updated SP - this triggers a page fault when on the guard page
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SPBASE, 0);
// decrement SP by eeGetPageSize()
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize());
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
inst_JMP(EJ_lo, done);
// Update SP to be at the next page of stack that we will tickle
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp, /* canSkip */ false);
// Jump to loop and tickle new stack address
inst_JMP(EJ_jmp, loop);
// Done with stack tickle loop
genDefineTempLabel(done);
// Now just move the final value to SP
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt, /* canSkip */ false);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
}
ALLOC_DONE:
// Re-adjust SP to allocate outgoing arg area. We must probe this adjustment.
if (stackAdjustment != 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert((lastTouchDelta == ILLEGAL_LAST_TOUCH_DELTA) || (lastTouchDelta >= 0));
if ((lastTouchDelta == ILLEGAL_LAST_TOUCH_DELTA) ||
(stackAdjustment + (unsigned)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, regTmp);
}
else
{
genStackPointerConstantAdjustment(-(ssize_t)stackAdjustment, regTmp);
}
// Return the stackalloc'ed address in result register.
// regCnt = SP + stackAdjustment.
genInstrWithConstant(INS_add, EA_PTRSIZE, regCnt, REG_SPBASE, (ssize_t)stackAdjustment, INS_FLAGS_DONT_CARE,
regTmp);
}
else // stackAdjustment == 0
{
// Move the final value of SP to regCnt
inst_Mov(TYP_I_IMPL, regCnt, REG_SPBASE, /* canSkip */ false);
}
BAILOUT:
if (endLabel != nullptr)
genDefineTempLabel(endLabel);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genTableBasedSwitch: generate code for a switch statement based on a table of ip-relative offsets
//
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
regNumber idxReg = treeNode->AsOp()->gtOp1->GetRegNum();
regNumber baseReg = treeNode->AsOp()->gtOp2->GetRegNum();
GetEmitter()->emitIns_R_ARX(INS_ldr, EA_4BYTE, REG_PC, baseReg, idxReg, TARGET_POINTER_SIZE, 0);
}
//------------------------------------------------------------------------
// genJumpTable: emits the table and an instruction to get the address of the first element
//
void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabBase;
jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, false);
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
GetEmitter()->emitDataGenData(i, target);
}
GetEmitter()->emitDataGenEnd();
genMov32RelocatableDataLabel(jmpTabBase, treeNode->GetRegNum());
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genGetInsForOper: Return instruction encoding of the operation tree.
//
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
if (varTypeIsFloating(type))
return CodeGen::ins_MathOp(oper, type);
switch (oper)
{
case GT_ADD:
ins = INS_add;
break;
case GT_AND:
ins = INS_AND;
break;
case GT_AND_NOT:
ins = INS_bic;
break;
case GT_MUL:
ins = INS_MUL;
break;
#if !defined(USE_HELPERS_FOR_INT_DIV)
case GT_DIV:
ins = INS_sdiv;
break;
#endif // !USE_HELPERS_FOR_INT_DIV
case GT_LSH:
ins = INS_SHIFT_LEFT_LOGICAL;
break;
case GT_NEG:
ins = INS_rsb;
break;
case GT_NOT:
ins = INS_NOT;
break;
case GT_OR:
ins = INS_OR;
break;
case GT_RSH:
ins = INS_SHIFT_RIGHT_ARITHM;
break;
case GT_RSZ:
ins = INS_SHIFT_RIGHT_LOGICAL;
break;
case GT_SUB:
ins = INS_sub;
break;
case GT_XOR:
ins = INS_XOR;
break;
case GT_ROR:
ins = INS_ror;
break;
case GT_ADD_LO:
ins = INS_add;
break;
case GT_ADD_HI:
ins = INS_adc;
break;
case GT_SUB_LO:
ins = INS_sub;
break;
case GT_SUB_HI:
ins = INS_sbc;
break;
case GT_LSH_HI:
ins = INS_SHIFT_LEFT_LOGICAL;
break;
case GT_RSH_LO:
ins = INS_SHIFT_RIGHT_LOGICAL;
break;
default:
unreached();
break;
}
return ins;
}
//------------------------------------------------------------------------
// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForNegNot(GenTree* tree)
{
assert(tree->OperIs(GT_NEG, GT_NOT));
var_types targetType = tree->TypeGet();
assert(!tree->OperIs(GT_NOT) || !varTypeIsFloating(targetType));
regNumber targetReg = tree->GetRegNum();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
assert(!tree->isContained());
// The dst can only be a register.
assert(targetReg != REG_NA);
GenTree* operand = tree->gtGetOp1();
assert(!operand->isContained());
// The src must be a register.
regNumber operandReg = genConsumeReg(operand);
if (ins == INS_vneg)
{
GetEmitter()->emitIns_R_R(ins, emitTypeSize(tree), targetReg, operandReg);
}
else
{
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(tree), targetReg, operandReg, 0, INS_FLAGS_SET);
}
genProduceReg(tree);
}
// Generate code for CpObj nodes wich copy structs that have interleaved
// GC pointers.
// For this case we'll generate a sequence of loads/stores in the case of struct
// slots that don't contain GC pointers. The generated code will look like:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
//
// In the case of a GC-Pointer we'll call the ByRef write barrier helper
// who happens to use the same registers as the previous call to maintain
// the same register requirements and register killsets:
// bl CORINFO_HELP_ASSIGN_BYREF
//
// So finally an example would look like this:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
// bl CORINFO_HELP_ASSIGN_BYREF
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
// bl CORINFO_HELP_ASSIGN_BYREF
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
{
GenTree* dstAddr = cpObjNode->Addr();
GenTree* source = cpObjNode->Data();
var_types srcAddrType = TYP_BYREF;
bool sourceIsLocal = false;
regNumber dstReg = REG_NA;
regNumber srcReg = REG_NA;
assert(source->isContained());
if (source->gtOper == GT_IND)
{
GenTree* srcAddr = source->gtGetOp1();
assert(!srcAddr->isContained());
srcAddrType = srcAddr->TypeGet();
}
else
{
noway_assert(source->IsLocal());
sourceIsLocal = true;
}
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
assert(!dstAddr->isContained());
// This GenTree node has data about GC pointers, this means we're dealing
// with CpObj.
assert(cpObjNode->GetLayout()->HasGCPtr());
#endif // DEBUG
// Consume the operands and get them into the right registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumeBlockOp(cpObjNode, REG_WRITE_BARRIER_DST_BYREF, REG_WRITE_BARRIER_SRC_BYREF, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_SRC_BYREF, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_DST_BYREF, dstAddr->TypeGet());
// Temp register used to perform the sequence of loads and stores.
regNumber tmpReg = cpObjNode->ExtractTempReg();
assert(genIsValidIntReg(tmpReg));
if (cpObjNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before & after a volatile CpObj operation
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
ClassLayout* layout = cpObjNode->GetLayout();
unsigned slots = layout->GetSlotCount();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
for (unsigned i = 0; i < slots; ++i)
{
emitAttr attr = emitTypeSize(layout->GetGCPtrType(i));
emit->emitIns_R_R_I(INS_ldr, attr, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
emit->emitIns_R_R_I(INS_str, attr, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
}
}
else
{
unsigned gcPtrCount = layout->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
if (!layout->IsGCPtr(i))
{
emit->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
emit->emitIns_R_R_I(INS_str, EA_PTRSIZE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
}
else
{
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
gcPtrCount--;
}
++i;
}
assert(gcPtrCount == 0);
}
if (cpObjNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before & after a volatile CpObj operation
instGen_MemoryBarrier();
}
// Clear the gcInfo for registers of source and dest.
// While we normally update GC info prior to the last instruction that uses them,
// these actually live into the helper call.
gcInfo.gcMarkRegSetNpt(RBM_WRITE_BARRIER_SRC_BYREF | RBM_WRITE_BARRIER_DST_BYREF);
}
//------------------------------------------------------------------------
// genCodeForShiftLong: Generates the code sequence for a GenTree node that
// represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is a contained constant
//
void CodeGen::genCodeForShiftLong(GenTree* tree)
{
// Only the non-RMW case here.
genTreeOps oper = tree->OperGet();
assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
GenTree* operand = tree->AsOp()->gtOp1;
assert(operand->OperGet() == GT_LONG);
assert(operand->AsOp()->gtOp1->isUsedFromReg());
assert(operand->AsOp()->gtOp2->isUsedFromReg());
GenTree* operandLo = operand->gtGetOp1();
GenTree* operandHi = operand->gtGetOp2();
regNumber regLo = operandLo->GetRegNum();
regNumber regHi = operandHi->GetRegNum();
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(oper, targetType);
GenTree* shiftBy = tree->gtGetOp2();
assert(shiftBy->isContainedIntOrIImmed());
unsigned count = (unsigned)shiftBy->AsIntConCommon()->IconValue();
regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
inst_Mov(targetType, tree->GetRegNum(), regResult, /* canSkip */ true);
if (oper == GT_LSH_HI)
{
inst_RV_SH(ins, EA_4BYTE, tree->GetRegNum(), count);
GetEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, tree->GetRegNum(), tree->GetRegNum(), regLo, 32 - count,
INS_FLAGS_DONT_CARE, INS_OPTS_LSR);
}
else
{
assert(oper == GT_RSH_LO);
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, tree->GetRegNum(), count);
GetEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, tree->GetRegNum(), tree->GetRegNum(), regHi, 32 - count,
INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclVar: Produce code for a GT_LCL_VAR node.
//
// Arguments:
// tree - the GT_LCL_VAR node
//
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
// lcl_vars are not defs
assert((tree->gtFlags & GTF_VAR_DEF) == 0);
bool isRegCandidate = compiler->lvaGetDesc(tree)->lvIsRegCandidate();
// If this is a register candidate that has been spilled, genConsumeReg() will
// reload it at the point of use. Otherwise, if it's not in a register, we load it here.
if (!isRegCandidate && !tree->IsMultiReg() && !(tree->gtFlags & GTF_SPILLED))
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(tree);
var_types type = varDsc->GetRegisterType(tree);
GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), tree->GetRegNum(), tree->GetLclNum(), 0);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
//
// Arguments:
// tree - the GT_STORE_LCL_FLD node
//
void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
{
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
noway_assert(targetType != TYP_STRUCT);
// record the offset
unsigned offset = tree->GetLclOffs();
// We must have a stack store with GT_STORE_LCL_FLD
noway_assert(targetReg == REG_NA);
unsigned varNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
// Ensure that lclVar nodes are typed correctly.
assert(!varDsc->lvNormalizeOnStore() || targetType == genActualType(varDsc->TypeGet()));
GenTree* data = tree->gtOp1;
regNumber dataReg = REG_NA;
genConsumeReg(data);
if (data->isContained())
{
assert(data->OperIs(GT_BITCAST));
const GenTree* bitcastSrc = data->AsUnOp()->gtGetOp1();
assert(!bitcastSrc->isContained());
dataReg = bitcastSrc->GetRegNum();
}
else
{
dataReg = data->GetRegNum();
}
assert(dataReg != REG_NA);
if (tree->IsOffsetMisaligned())
{
// Arm supports unaligned access only for integer types,
// convert the storing floating data into 1 or 2 integer registers and write them as int.
regNumber addr = tree->ExtractTempReg();
emit->emitIns_R_S(INS_lea, EA_PTRSIZE, addr, varNum, offset);
if (targetType == TYP_FLOAT)
{
regNumber floatAsInt = tree->GetSingleTempReg();
emit->emitIns_Mov(INS_vmov_f2i, EA_4BYTE, floatAsInt, dataReg, /* canSkip */ false);
emit->emitIns_R_R(INS_str, EA_4BYTE, floatAsInt, addr);
}
else
{
regNumber halfdoubleAsInt1 = tree->ExtractTempReg();
regNumber halfdoubleAsInt2 = tree->GetSingleTempReg();
emit->emitIns_R_R_R(INS_vmov_d2i, EA_8BYTE, halfdoubleAsInt1, halfdoubleAsInt2, dataReg);
emit->emitIns_R_R_I(INS_str, EA_4BYTE, halfdoubleAsInt1, addr, 0);
emit->emitIns_R_R_I(INS_str, EA_4BYTE, halfdoubleAsInt1, addr, 4);
}
}
else
{
emitAttr attr = emitTypeSize(targetType);
instruction ins = ins_StoreFromSrc(dataReg, targetType);
emit->emitIns_S_R(ins, attr, dataReg, varNum, offset);
}
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
varDsc->SetRegNum(REG_STK);
}
//------------------------------------------------------------------------
// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
//
// Arguments:
// tree - the GT_STORE_LCL_VAR node
//
void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* tree)
{
GenTree* data = tree->gtOp1;
GenTree* actualData = data->gtSkipReloadOrCopy();
unsigned regCount = 1;
// var = call, where call returns a multi-reg return value
// case is handled separately.
if (actualData->IsMultiRegNode())
{
regCount = actualData->GetMultiRegCount(compiler);
if (regCount > 1)
{
genMultiRegStoreToLocal(tree);
}
}
if (regCount == 1)
{
unsigned varNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
var_types targetType = varDsc->GetRegisterType(tree);
if (targetType == TYP_LONG)
{
genStoreLongLclVar(tree);
}
else
{
genConsumeRegs(data);
regNumber dataReg = REG_NA;
if (data->isContained())
{
assert(data->OperIs(GT_BITCAST));
const GenTree* bitcastSrc = data->AsUnOp()->gtGetOp1();
assert(!bitcastSrc->isContained());
dataReg = bitcastSrc->GetRegNum();
}
else
{
dataReg = data->GetRegNum();
}
assert(dataReg != REG_NA);
regNumber targetReg = tree->GetRegNum();
if (targetReg == REG_NA) // store into stack based LclVar
{
inst_set_SV_var(tree);
instruction ins = ins_StoreFromSrc(dataReg, targetType);
emitAttr attr = emitTypeSize(targetType);
emitter* emit = GetEmitter();
emit->emitIns_S_R(ins, attr, dataReg, varNum, /* offset */ 0);
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
varDsc->SetRegNum(REG_STK);
}
else // store into register (i.e move into register)
{
// Assign into targetReg when dataReg (from op1) is not the same register
inst_Mov(targetType, targetReg, dataReg, /* canSkip */ true);
genProduceReg(tree);
}
}
}
}
//------------------------------------------------------------------------
// genCodeForDivMod: Produce code for a GT_DIV/GT_UDIV/GT_MOD/GT_UMOD node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForDivMod(GenTreeOp* tree)
{
assert(tree->OperIs(GT_DIV, GT_UDIV, GT_MOD, GT_UMOD));
// We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
// helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
// on float/double args.
noway_assert(tree->OperIs(GT_DIV) || !varTypeIsFloating(tree));
#if defined(USE_HELPERS_FOR_INT_DIV)
noway_assert(!varTypeIsIntOrI(tree));
#endif // USE_HELPERS_FOR_INT_DIV
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
genConsumeOperands(tree);
noway_assert(targetReg != REG_NA);
GenTree* dst = tree;
GenTree* src1 = tree->gtGetOp1();
GenTree* src2 = tree->gtGetOp2();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
emitAttr attr = emitTypeSize(tree);
regNumber result = REG_NA;
// dst can only be a reg
assert(!dst->isContained());
// src can be only reg
assert(!src1->isContained() || !src2->isContained());
if (varTypeIsFloating(targetType))
{
// Floating point divide never raises an exception
emit->emitIns_R_R_R(ins, attr, dst->GetRegNum(), src1->GetRegNum(), src2->GetRegNum());
}
else // an signed integer divide operation
{
// TODO-ARM-Bug: handle zero division exception.
emit->emitIns_R_R_R(ins, attr, dst->GetRegNum(), src1->GetRegNum(), src2->GetRegNum());
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCkfinite: Generate code for ckfinite opcode.
//
// Arguments:
// treeNode - The GT_CKFINITE node
//
// Return Value:
// None.
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
//
void CodeGen::genCkfinite(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
emitter* emit = GetEmitter();
var_types targetType = treeNode->TypeGet();
regNumber intReg = treeNode->GetSingleTempReg();
regNumber fpReg = genConsumeReg(treeNode->AsOp()->gtOp1);
regNumber targetReg = treeNode->GetRegNum();
// Extract and sign-extend the exponent into an integer register
if (targetType == TYP_FLOAT)
{
emit->emitIns_Mov(INS_vmov_f2i, EA_4BYTE, intReg, fpReg, /* canSkip */ false);
emit->emitIns_R_R_I_I(INS_sbfx, EA_4BYTE, intReg, intReg, 23, 8);
}
else
{
assert(targetType == TYP_DOUBLE);
emit->emitIns_Mov(INS_vmov_f2i, EA_4BYTE, intReg, REG_NEXT(fpReg), /* canSkip */ false);
emit->emitIns_R_R_I_I(INS_sbfx, EA_4BYTE, intReg, intReg, 20, 11);
}
// If exponent is all 1's, throw ArithmeticException
emit->emitIns_R_I(INS_add, EA_4BYTE, intReg, 1, INS_FLAGS_SET);
genJumpToThrowHlpBlk(EJ_eq, SCK_ARITH_EXCPN);
// If it's a finite value, copy it to targetReg
inst_Mov(targetType, targetReg, fpReg, /* canSkip */ true, emitTypeSize(treeNode));
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_CMP node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
// TODO-ARM-CQ: Check if we can use the currently set flags.
// TODO-ARM-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
assert(!varTypeIsLong(op1Type));
assert(!varTypeIsLong(op2Type));
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
genConsumeIfReg(op1);
genConsumeIfReg(op2);
if (varTypeIsFloating(op1Type))
{
assert(op1Type == op2Type);
assert(!tree->OperIs(GT_CMP));
emit->emitInsBinary(INS_vcmp, emitTypeSize(op1Type), op1, op2);
// vmrs with register 0xf has special meaning of transferring flags
emit->emitIns_R(INS_vmrs, EA_4BYTE, REG_R15);
}
else
{
assert(!varTypeIsFloating(op2Type));
var_types cmpType = (op1Type == op2Type) ? op1Type : TYP_INT;
emit->emitInsBinary(INS_cmp, emitTypeSize(cmpType), op1, op2);
}
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
inst_SETCC(GenCondition::FromRelop(tree), tree->TypeGet(), targetReg);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
// tree - the GT_RETURNTRAP node
//
void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
{
assert(tree->OperGet() == GT_RETURNTRAP);
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
GenTree* data = tree->gtOp1;
genConsumeIfReg(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
cns.SetContained();
GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
BasicBlock* skipLabel = genCreateTempLabel();
inst_JMP(EJ_eq, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN);
genDefineTempLabel(skipLabel);
}
//------------------------------------------------------------------------
// genCodeForStoreInd: Produce code for a GT_STOREIND node.
//
// Arguments:
// tree - the GT_STOREIND node
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
GenTree* data = tree->Data();
GenTree* addr = tree->Addr();
var_types type = tree->TypeGet();
assert(!varTypeIsFloating(type) || (type == data->TypeGet()));
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
// data and addr must be in registers.
// Consume both registers so that any copies of interfering
// registers are taken care of.
genConsumeOperands(tree);
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_ARG_0,
// as that is where 'addr' must go.
noway_assert(data->GetRegNum() != REG_ARG_0);
// addr goes in REG_ARG_0
inst_Mov(addr->TypeGet(), REG_ARG_0, addr->GetRegNum(), /* canSkip */ true);
// data goes in REG_ARG_1
inst_Mov(data->TypeGet(), REG_ARG_1, data->GetRegNum(), /* canSkip */ true);
genGCWriteBarrier(tree, writeBarrierForm);
}
else // A normal store, not a WriteBarrier store
{
// We must consume the operands in the proper execution order,
// so that liveness is updated appropriately.
genConsumeAddress(addr);
if (!data->isContained())
{
genConsumeRegs(data);
}
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
// issue a full memory barrier a before volatile StInd
instGen_MemoryBarrier();
}
regNumber dataReg = data->GetRegNum();
GetEmitter()->emitInsLoadStoreOp(ins_StoreFromSrc(dataReg, type), emitActualTypeSize(type), dataReg, tree);
// If store was to a variable, update variable liveness after instruction was emitted.
genUpdateLife(tree);
}
}
// genLongToIntCast: Generate code for long to int casts.
//
// Arguments:
// cast - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// The cast node and its sources (via GT_LONG) must have been assigned registers.
// The destination cannot be a floating point type or a small integer type.
//
void CodeGen::genLongToIntCast(GenTree* cast)
{
assert(cast->OperGet() == GT_CAST);
GenTree* src = cast->gtGetOp1();
noway_assert(src->OperGet() == GT_LONG);
genConsumeRegs(src);
var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
var_types dstType = cast->CastToType();
regNumber loSrcReg = src->gtGetOp1()->GetRegNum();
regNumber hiSrcReg = src->gtGetOp2()->GetRegNum();
regNumber dstReg = cast->GetRegNum();
assert((dstType == TYP_INT) || (dstType == TYP_UINT));
assert(genIsValidIntReg(loSrcReg));
assert(genIsValidIntReg(hiSrcReg));
assert(genIsValidIntReg(dstReg));
if (cast->gtOverflow())
{
//
// Generate an overflow check for [u]long to [u]int casts:
//
// long -> int - check if the upper 33 bits are all 0 or all 1
//
// ulong -> int - check if the upper 33 bits are all 0
//
// long -> uint - check if the upper 32 bits are all 0
// ulong -> uint - check if the upper 32 bits are all 0
//
if ((srcType == TYP_LONG) && (dstType == TYP_INT))
{
BasicBlock* allOne = genCreateTempLabel();
BasicBlock* success = genCreateTempLabel();
inst_RV_RV(INS_tst, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
inst_JMP(EJ_mi, allOne);
inst_RV_RV(INS_tst, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
inst_JMP(EJ_jmp, success);
genDefineTempLabel(allOne);
inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
genDefineTempLabel(success);
}
else
{
if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
{
inst_RV_RV(INS_tst, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_mi, SCK_OVERFLOW);
}
inst_RV_RV(INS_tst, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
}
}
inst_Mov(TYP_INT, dstReg, loSrcReg, /* canSkip */ true);
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int to float/double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
void CodeGen::genIntToFloatCast(GenTree* treeNode)
{
// int --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidIntReg(op1->GetRegNum())); // Must be a valid int reg.
var_types dstType = treeNode->CastToType();
var_types srcType = genActualType(op1->TypeGet());
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (treeNode->gtFlags & GTF_UNSIGNED)
{
srcType = varTypeToUnsigned(srcType);
}
// We only expect a srcType whose size is EA_4BYTE.
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
noway_assert(srcSize == EA_4BYTE);
instruction insVcvt = INS_invalid;
if (dstType == TYP_DOUBLE)
{
insVcvt = (varTypeIsUnsigned(srcType)) ? INS_vcvt_u2d : INS_vcvt_i2d;
}
else
{
assert(dstType == TYP_FLOAT);
insVcvt = (varTypeIsUnsigned(srcType)) ? INS_vcvt_u2f : INS_vcvt_i2f;
}
// All other cast are implemented by different CORINFO_HELP_XX2XX
// Look to Compiler::fgMorphCast()
genConsumeOperands(treeNode->AsOp());
assert(insVcvt != INS_invalid);
GetEmitter()->emitIns_Mov(INS_vmov_i2f, srcSize, treeNode->GetRegNum(), op1->GetRegNum(), /* canSkip */ false);
GetEmitter()->emitIns_R_R(insVcvt, srcSize, treeNode->GetRegNum(), treeNode->GetRegNum());
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genFloatToIntCast: Generate code to cast float/double to int
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
void CodeGen::genFloatToIntCast(GenTree* treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidIntReg(targetReg)); // Must be a valid int reg.
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidFloatReg(op1->GetRegNum())); // Must be a valid float reg.
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We only expect a dstType whose size is EA_4BYTE.
// For conversions to small types (byte/sbyte/int16/uint16) from float/double,
// we expect the front-end or lowering phase to have generated two levels of cast.
//
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
noway_assert(dstSize == EA_4BYTE);
instruction insVcvt = INS_invalid;
if (srcType == TYP_DOUBLE)
{
insVcvt = (varTypeIsUnsigned(dstType)) ? INS_vcvt_d2u : INS_vcvt_d2i;
}
else
{
assert(srcType == TYP_FLOAT);
insVcvt = (varTypeIsUnsigned(dstType)) ? INS_vcvt_f2u : INS_vcvt_f2i;
}
// All other cast are implemented by different CORINFO_HELP_XX2XX
// Look to Compiler::fgMorphCast()
genConsumeOperands(treeNode->AsOp());
regNumber tmpReg = treeNode->GetSingleTempReg();
assert(insVcvt != INS_invalid);
GetEmitter()->emitIns_R_R(insVcvt, dstSize, tmpReg, op1->GetRegNum());
GetEmitter()->emitIns_Mov(INS_vmov_f2i, dstSize, treeNode->GetRegNum(), tmpReg, /* canSkip */ false);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genEmitHelperCall: Emit a call to a helper function.
//
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg /*= REG_NA */)
{
// Can we call the helper function directly
void *addr = NULL, **pAddr = NULL;
#if defined(DEBUG) && defined(PROFILING_SUPPORTED)
// Don't ask VM if it hasn't requested ELT hooks
if (!compiler->compProfilerHookNeeded && compiler->opts.compJitELTHookEnabled &&
(helper == CORINFO_HELP_PROF_FCN_ENTER || helper == CORINFO_HELP_PROF_FCN_LEAVE ||
helper == CORINFO_HELP_PROF_FCN_TAILCALL))
{
addr = compiler->compProfilerMethHnd;
}
else
#endif
{
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, (void**)&pAddr);
}
if (!addr || !validImmForBL((ssize_t)addr))
{
if (callTargetReg == REG_NA)
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
}
// Load the address into a register and call through a register
if (addr)
{
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, callTargetReg, (ssize_t)addr);
}
else
{
GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, callTargetReg, (ssize_t)pAddr);
regSet.verifyRegUsed(callTargetReg);
}
GetEmitter()->emitIns_Call(emitter::EC_INDIR_R, compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) NULL, // addr
argSize, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, DebugInfo(),
callTargetReg, // ireg
REG_NA, 0, 0, // xreg, xmul, disp
false // isJump
);
}
else
{
GetEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN, compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) addr, argSize, retSize, gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, DebugInfo(), REG_NA, REG_NA, 0,
0, /* ilOffset, ireg, xreg, xmul, disp */
false /* isJump */
);
}
regSet.verifyRegistersUsed(RBM_CALLEE_TRASH);
}
#ifdef PROFILING_SUPPORTED
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed set to 'false' if 'initReg' is
// not zero after this call.
//
// Return Value:
// None
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
// On Arm arguments are prespilled on stack, which frees r0-r3.
// For generating Enter callout we would need two registers and one of them has to be r0 to pass profiler handle.
// The call target register could be any free register.
regNumber argReg = REG_PROFILER_ENTER_ARG;
regMaskTP argRegMask = genRegMask(argReg);
assert((regSet.rsMaskPreSpillRegArg & argRegMask) != 0);
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, argReg, (ssize_t)compiler->compProfilerMethHnd);
regSet.verifyRegUsed(argReg);
}
else
{
instGen_Set_Reg_To_Imm(EA_4BYTE, argReg, (ssize_t)compiler->compProfilerMethHnd);
}
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER,
0, // argSize. Again, we have to lie about it
EA_UNKNOWN); // retSize
if (initReg == argReg)
{
*pInitRegZeroed = false;
}
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
//
// Push the profilerHandle
//
// Contract between JIT and Profiler Leave callout on arm:
// Return size <= 4 bytes: REG_PROFILER_RET_SCRATCH will contain return value
// Return size > 4 and <= 8: <REG_PROFILER_RET_SCRATCH,r1> will contain return value.
// Floating point or double or HFA return values will be in s0-s15 in case of non-vararg methods.
// It is assumed that profiler Leave callback doesn't trash registers r1,REG_PROFILER_RET_SCRATCH and s0-s15.
//
// In the following cases r0 doesn't contain a return value and hence need not be preserved before emitting Leave
// callback.
bool r0InUse;
emitAttr attr = EA_UNKNOWN;
if (helper == CORINFO_HELP_PROF_FCN_TAILCALL)
{
// For the tail call case, the helper call is introduced during lower,
// so the allocator will arrange things so R0 is not in use here.
//
// For the tail jump case, all reg args have been spilled via genJmpMethod,
// so R0 is likewise not in use.
r0InUse = false;
}
else if (compiler->info.compRetType == TYP_VOID)
{
r0InUse = false;
}
else if (varTypeIsFloating(compiler->info.compRetType) ||
compiler->IsHfa(compiler->info.compMethodInfo->args.retTypeClass))
{
r0InUse = compiler->info.compIsVarArgs || compiler->opts.compUseSoftFP;
}
else
{
r0InUse = true;
}
if (r0InUse)
{
if (varTypeIsGC(compiler->info.compRetNativeType))
{
attr = emitActualTypeSize(compiler->info.compRetNativeType);
}
else if (compiler->compMethodReturnsRetBufAddr())
{
attr = EA_BYREF;
}
else
{
attr = EA_PTRSIZE;
}
}
if (r0InUse)
{
// Has a return value and r0 is in use. For emitting Leave profiler callout we would need r0 for passing
// profiler handle. Therefore, r0 is moved to REG_PROFILER_RETURN_SCRATCH as per contract.
GetEmitter()->emitIns_Mov(INS_mov, attr, REG_PROFILER_RET_SCRATCH, REG_R0, /* canSkip */ false);
genTransferRegGCState(REG_PROFILER_RET_SCRATCH, REG_R0);
regSet.verifyRegUsed(REG_PROFILER_RET_SCRATCH);
}
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, REG_R0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_R0, (ssize_t)compiler->compProfilerMethHnd);
}
gcInfo.gcMarkRegSetNpt(RBM_R0);
regSet.verifyRegUsed(REG_R0);
genEmitHelperCall(helper,
0, // argSize
EA_UNKNOWN); // retSize
// Restore state that existed before profiler callback
if (r0InUse)
{
GetEmitter()->emitIns_Mov(INS_mov, attr, REG_R0, REG_PROFILER_RET_SCRATCH, /* canSkip */ false);
genTransferRegGCState(REG_R0, REG_PROFILER_RET_SCRATCH);
gcInfo.gcMarkRegSetNpt(RBM_PROFILER_RET_SCRATCH);
}
}
#endif // PROFILING_SUPPORTED
//------------------------------------------------------------------------
// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer.
//
// Arguments:
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
//
void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
assert(arm_Valid_Imm_For_Add_SP(delta));
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
if (reportUnwindData)
{
compiler->unwindPadding();
}
}
//------------------------------------------------------------------------
// genAllocLclFrame: Probe the stack and allocate the local stack frame - subtract from SP.
//
// Notes:
// The first instruction of the prolog is always a push (which touches the lowest address
// of the stack), either of the LR register or of some argument registers, e.g., in the case of
// pre-spilling. The LR register is always pushed because we require it to allow for GC return
// address hijacking (see the comment in CodeGen::genPushCalleeSavedRegisters()). These pushes
// happen immediately before calling this function, so the SP at the current location has already
// been touched.
//
// Arguments:
// frameSize - the size of the stack frame being allocated.
// initReg - register to use as a scratch register.
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
// maskArgRegsLiveIn - incoming argument registers that are currently live.
//
// Return value:
// None
//
void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
if (frameSize == 0)
{
return;
}
const target_size_t pageSize = compiler->eeGetPageSize();
assert(!compiler->info.compPublishStubParam || (REG_SECRET_STUB_PARAM != initReg));
if (frameSize < pageSize)
{
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
}
else
{
// Generate the following code:
//
// movw r4, #frameSize
// sub r4, sp, r4
// bl CORINFO_HELP_STACK_PROBE
// mov sp, r4
//
// If frameSize can not be encoded by movw immediate this becomes:
//
// movw r4, #frameSizeLo16
// movt r4, #frameSizeHi16
// sub r4, sp, r4
// bl CORINFO_HELP_STACK_PROBE
// mov sp, r4
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, frameSize,
INS_FLAGS_DONT_CARE, REG_STACK_PROBE_HELPER_ARG);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN, REG_STACK_PROBE_HELPER_CALL_TARGET);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_CALL_TARGET);
compiler->unwindPadding();
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
if ((genRegMask(initReg) & (RBM_STACK_PROBE_HELPER_ARG | RBM_STACK_PROBE_HELPER_CALL_TARGET |
RBM_STACK_PROBE_HELPER_TRASH)) != RBM_NONE)
{
*pInitRegZeroed = false;
}
}
compiler->unwindAllocStack(frameSize);
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(frameSize);
}
#endif // USING_SCOPE_INFO
}
void CodeGen::genPushFltRegs(regMaskTP regMask)
{
assert(regMask != 0); // Don't call uness we have some registers to push
assert((regMask & RBM_ALLFLOAT) == regMask); // Only floasting point registers should be in regMask
regNumber lowReg = genRegNumFromMask(genFindLowestBit(regMask));
int slots = genCountBits(regMask);
// regMask should be contiguously set
regMaskTP tmpMask = ((regMask >> lowReg) + 1); // tmpMask should have a single bit set
assert((tmpMask & (tmpMask - 1)) == 0);
assert(lowReg == REG_F16); // Currently we expect to start at F16 in the unwind codes
// Our calling convention requires that we only use vpush for TYP_DOUBLE registers
noway_assert(floatRegCanHoldType(lowReg, TYP_DOUBLE));
noway_assert((slots % 2) == 0);
GetEmitter()->emitIns_R_I(INS_vpush, EA_8BYTE, lowReg, slots / 2);
}
void CodeGen::genPopFltRegs(regMaskTP regMask)
{
assert(regMask != 0); // Don't call uness we have some registers to pop
assert((regMask & RBM_ALLFLOAT) == regMask); // Only floasting point registers should be in regMask
regNumber lowReg = genRegNumFromMask(genFindLowestBit(regMask));
int slots = genCountBits(regMask);
// regMask should be contiguously set
regMaskTP tmpMask = ((regMask >> lowReg) + 1); // tmpMask should have a single bit set
assert((tmpMask & (tmpMask - 1)) == 0);
// Our calling convention requires that we only use vpop for TYP_DOUBLE registers
noway_assert(floatRegCanHoldType(lowReg, TYP_DOUBLE));
noway_assert((slots % 2) == 0);
GetEmitter()->emitIns_R_I(INS_vpop, EA_8BYTE, lowReg, slots / 2);
}
//------------------------------------------------------------------------
// genFreeLclFrame: free the local stack frame by adding `frameSize` to SP.
//
// Arguments:
// frameSize - the frame size to free;
// pUnwindStarted - was epilog unwind started or not.
//
// Notes:
// If epilog unwind hasn't been started, and we generate code, we start unwind
// and set* pUnwindStarted = true.
//
void CodeGen::genFreeLclFrame(unsigned frameSize, /* IN OUT */ bool* pUnwindStarted)
{
assert(compiler->compGeneratingEpilog);
if (frameSize == 0)
return;
// Add 'frameSize' to SP.
//
// Unfortunately, we can't just use:
//
// inst_RV_IV(INS_add, REG_SPBASE, frameSize, EA_PTRSIZE);
//
// because we need to generate proper unwind codes for each instruction generated,
// and large frame sizes might generate a temp register load which might
// need an unwind code. We don't want to generate a "NOP" code for this
// temp register load; we want the unwind codes to start after that.
if (arm_Valid_Imm_For_Instr(INS_add, frameSize, INS_FLAGS_DONT_CARE))
{
if (!*pUnwindStarted)
{
compiler->unwindBegEpilog();
*pUnwindStarted = true;
}
GetEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, frameSize, INS_FLAGS_DONT_CARE);
}
else
{
// R12 doesn't hold arguments or return values, so can be used as temp.
regNumber tmpReg = REG_R12;
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, frameSize);
if (*pUnwindStarted)
{
compiler->unwindPadding();
}
// We're going to generate an unwindable instruction, so check again if
// we need to start the unwind codes.
if (!*pUnwindStarted)
{
compiler->unwindBegEpilog();
*pUnwindStarted = true;
}
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, REG_SPBASE, tmpReg, INS_FLAGS_DONT_CARE);
}
compiler->unwindAllocStack(frameSize);
}
/*-----------------------------------------------------------------------------
*
* Move of relocatable displacement value to register
*/
void CodeGen::genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg)
{
GetEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block, reg);
GetEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block, reg);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
GetEmitter()->emitIns_R_R_R(INS_add, EA_4BYTE_DSP_RELOC, reg, reg, REG_PC);
}
}
/*-----------------------------------------------------------------------------
*
* Move of relocatable data-label to register
*/
void CodeGen::genMov32RelocatableDataLabel(unsigned value, regNumber reg)
{
GetEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, value, reg);
GetEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, value, reg);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
GetEmitter()->emitIns_R_R_R(INS_add, EA_HANDLE_CNS_RELOC, reg, reg, REG_PC);
}
}
/*-----------------------------------------------------------------------------
*
* Move of relocatable immediate to register
*/
void CodeGen::genMov32RelocatableImmediate(emitAttr size, BYTE* addr, regNumber reg)
{
_ASSERTE(EA_IS_RELOC(size));
GetEmitter()->emitIns_MovRelocatableImmediate(INS_movw, size, reg, addr);
GetEmitter()->emitIns_MovRelocatableImmediate(INS_movt, size, reg, addr);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
GetEmitter()->emitIns_R_R_R(INS_add, size, reg, reg, REG_PC);
}
}
/*-----------------------------------------------------------------------------
*
* Returns register mask to push/pop to allocate a small stack frame,
* instead of using "sub sp" / "add sp". Returns RBM_NONE if either frame size
* is zero, or if we should use "sub sp" / "add sp" instead of push/pop.
*/
regMaskTP CodeGen::genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat)
{
assert(compiler->compGeneratingProlog || compiler->compGeneratingEpilog);
// We can't do this optimization with callee saved floating point registers because
// the stack would be allocated in a wrong spot.
if (maskCalleeSavedFloat != RBM_NONE)
return RBM_NONE;
// Allocate space for small frames by pushing extra registers. It generates smaller and faster code
// that extra sub sp,XXX/add sp,XXX.
// R0 and R1 may be used by return value. Keep things simple and just skip the optimization
// for the 3*REGSIZE_BYTES and 4*REGSIZE_BYTES cases. They are less common and they have more
// significant negative side-effects (more memory bus traffic).
switch (frameSize)
{
case REGSIZE_BYTES:
return RBM_R3;
case 2 * REGSIZE_BYTES:
return RBM_R2 | RBM_R3;
default:
return RBM_NONE;
}
}
//-----------------------------------------------------------------------------------
// instGen_MemoryBarrier: Emit a MemoryBarrier instruction
//
// Arguments:
// barrierKind - kind of barrier to emit (ignored on arm32)
//
// Notes:
// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1
// barrierKind argument is ignored on arm32 and a full memory barrier is emitted
//
void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind)
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
{
return;
}
#endif // DEBUG
// Avoid emitting redundant memory barriers on arm32 if they belong to the same IG
// and there were no memory accesses in-between them
if ((GetEmitter()->emitLastMemBarrier != nullptr) && compiler->opts.OptimizationEnabled())
{
assert(GetEmitter()->emitLastMemBarrier->idSmallCns() == INS_BARRIER_SY);
}
else
{
// ARM has only full barriers, so all barriers need to be emitted as full.
GetEmitter()->emitIns_I(INS_dmb, EA_4BYTE, INS_BARRIER_SY);
}
}
bool CodeGen::genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
if (!jmpEpilog && regSet.rsMaskPreSpillRegs(true) == RBM_NONE)
return true;
else
return false;
}
void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
regMaskTP maskPopRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
regMaskTP maskPopRegsFloat = maskPopRegs & RBM_ALLFLOAT;
regMaskTP maskPopRegsInt = maskPopRegs & ~maskPopRegsFloat;
// First, pop float registers
if (maskPopRegsFloat != RBM_NONE)
{
genPopFltRegs(maskPopRegsFloat);
compiler->unwindPopMaskFloat(maskPopRegsFloat);
}
// Next, pop integer registers
if (!jmpEpilog)
{
regMaskTP maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize, maskPopRegsFloat);
maskPopRegsInt |= maskStackAlloc;
}
if (isFramePointerUsed())
{
assert(!regSet.rsRegsModified(RBM_FPBASE));
maskPopRegsInt |= RBM_FPBASE;
}
if (genCanUsePopToReturn(maskPopRegsInt, jmpEpilog))
{
maskPopRegsInt |= RBM_PC;
// Record the fact that we use a pop to the PC to perform the return
genUsedPopToReturn = true;
}
else
{
maskPopRegsInt |= RBM_LR;
// Record the fact that we did not use a pop to the PC to perform the return
genUsedPopToReturn = false;
}
assert(FitsIn<int>(maskPopRegsInt));
inst_IV(INS_pop, (int)maskPopRegsInt);
compiler->unwindPopMaskInt(maskPopRegsInt);
}
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
* Funclets have the following incoming arguments:
*
* catch: r0 = the exception object that was caught (see GT_CATCH_ARG)
* filter: r0 = the exception object to filter (see GT_CATCH_ARG), r1 = CallerSP of the containing function
* finally/fault: none
*
* Funclets set the following registers on exit:
*
* catch: r0 = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: r0 = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* The ARM funclet prolog sequence is:
*
* push {regs,lr} ; We push the callee-saved regs and 'lr'.
* ; TODO-ARM-CQ: We probably only need to save lr, plus any callee-save registers that we
* ; actually use in the funclet. Currently, we save the same set of callee-saved regs
* ; calculated for the entire function.
* sub sp, XXX ; Establish the rest of the frame.
* ; XXX is determined by lvaOutgoingArgSpaceSize plus space for the PSP slot, aligned
* ; up to preserve stack alignment. If we push an odd number of registers, we also
* ; generate this, to keep the stack aligned.
*
* ; Fill the PSP slot, for use by the VM (it gets reported with the GC info), or by code generation of nested
* ; filters.
* ; This is not part of the "OS prolog"; it has no associated unwind data, and is not reversed in the funclet
* ; epilog.
*
* if (this is a filter funclet)
* {
* // r1 on entry to a filter funclet is CallerSP of the containing function:
* // either the main function, or the funclet for a handler that this filter is dynamically nested within.
* // Note that a filter can be dynamically nested within a funclet even if it is not statically within
* // a funclet. Consider:
* //
* // try {
* // try {
* // throw new Exception();
* // } catch(Exception) {
* // throw new Exception(); // The exception thrown here ...
* // }
* // } filter { // ... will be processed here, while the "catch" funclet frame is
* // // still on the stack
* // } filter-handler {
* // }
* //
* // Because of this, we need a PSP in the main function anytime a filter funclet doesn't know whether the
* // enclosing frame will be a funclet or main function. We won't know any time there is a filter protecting
* // nested EH. To simplify, we just always create a main function PSP for any function with a filter.
*
* ldr r1, [r1 - PSP_slot_CallerSP_offset] ; Load the CallerSP of the main function (stored in the PSP of
* ; the dynamically containing funclet or function)
* str r1, [sp + PSP_slot_SP_offset] ; store the PSP
* sub r11, r1, Function_CallerSP_to_FP_delta ; re-establish the frame pointer
* }
* else
* {
* // This is NOT a filter funclet. The VM re-establishes the frame pointer on entry.
* // TODO-ARM-CQ: if VM set r1 to CallerSP on entry, like for filters, we could save an instruction.
*
* add r3, r11, Function_CallerSP_to_FP_delta ; compute the CallerSP, given the frame pointer. r3 is scratch.
* str r3, [sp + PSP_slot_SP_offset] ; store the PSP
* }
*
* The epilog sequence is then:
*
* add sp, XXX ; if necessary
* pop {regs,pc}
*
* If it is worth it, we could push r0, r1, r2, r3 instead of using an additional add/sub instruction.
* Code size would be smaller, but we would be writing to / reading from the stack, which might be slow.
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming |
* | arguments |
* +=======================+ <---- Caller's SP
* |Callee saved registers |
* |-----------------------|
* |Pre-spill regs space | // This is only necessary to keep the PSP slot at the same offset
* | | // in function and funclet
* |-----------------------|
* | PSP slot | // Omitted in CoreRT ABI
* |-----------------------|
* ~ possible 4 byte pad ~
* ~ for alignment ~
* |-----------------------|
* | Outgoing arg space |
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFuncletProlog()\n");
#endif
assert(block != NULL);
assert(block->bbFlags & BBF_FUNCLET_BEG);
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
regMaskTP maskPushRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = genFuncletInfo.fiSaveRegs & ~maskPushRegsFloat;
regMaskTP maskStackAlloc = genStackAllocRegisterMask(genFuncletInfo.fiSpDelta, maskPushRegsFloat);
maskPushRegsInt |= maskStackAlloc;
assert(FitsIn<int>(maskPushRegsInt));
inst_IV(INS_push, (int)maskPushRegsInt);
compiler->unwindPushMaskInt(maskPushRegsInt);
if (maskPushRegsFloat != RBM_NONE)
{
genPushFltRegs(maskPushRegsFloat);
compiler->unwindPushMaskFloat(maskPushRegsFloat);
}
bool isFilter = (block->bbCatchTyp == BBCT_FILTER);
regMaskTP maskArgRegsLiveIn;
if (isFilter)
{
maskArgRegsLiveIn = RBM_R0 | RBM_R1;
}
else if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
{
maskArgRegsLiveIn = RBM_NONE;
}
else
{
maskArgRegsLiveIn = RBM_R0;
}
regNumber initReg = REG_R3; // R3 is never live on entry to a funclet, so it can be trashed
bool initRegZeroed = false;
if (maskStackAlloc == RBM_NONE)
{
genAllocLclFrame(genFuncletInfo.fiSpDelta, initReg, &initRegZeroed, maskArgRegsLiveIn);
}
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// If there is no PSPSym (CoreRT ABI), we are done.
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
if (isFilter)
{
// This is the first block of a filter
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, REG_R1, REG_R1, genFuncletInfo.fiPSP_slot_CallerSP_offset);
regSet.verifyRegUsed(REG_R1);
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_R1, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_FPBASE, REG_R1,
genFuncletInfo.fiFunctionCallerSPtoFPdelta);
}
else
{
// This is a non-filter funclet
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE,
genFuncletInfo.fiFunctionCallerSPtoFPdelta);
regSet.verifyRegUsed(REG_R3);
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_R3, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
}
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFuncletEpilog()\n");
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Just as for the main function, we delay starting the unwind codes until we have
// an instruction which we know needs an unwind code. This is to support code like
// this:
// movw r3, 0x38e0
// add sp, r3
// pop {r4,r5,r6,r10,r11,pc}
// where the "movw" shouldn't be part of the unwind codes. See genFnEpilog() for more details.
bool unwindStarted = false;
/* The saved regs info saves the LR register. We need to pop the PC register to return */
assert(genFuncletInfo.fiSaveRegs & RBM_LR);
regMaskTP maskPopRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskPopRegsInt = genFuncletInfo.fiSaveRegs & ~maskPopRegsFloat;
regMaskTP maskStackAlloc = genStackAllocRegisterMask(genFuncletInfo.fiSpDelta, maskPopRegsFloat);
maskPopRegsInt |= maskStackAlloc;
if (maskStackAlloc == RBM_NONE)
{
genFreeLclFrame(genFuncletInfo.fiSpDelta, &unwindStarted);
}
if (!unwindStarted)
{
// We'll definitely generate an unwindable instruction next
compiler->unwindBegEpilog();
unwindStarted = true;
}
maskPopRegsInt &= ~RBM_LR;
maskPopRegsInt |= RBM_PC;
if (maskPopRegsFloat != RBM_NONE)
{
genPopFltRegs(maskPopRegsFloat);
compiler->unwindPopMaskFloat(maskPopRegsFloat);
}
assert(FitsIn<int>(maskPopRegsInt));
inst_IV(INS_pop, (int)maskPopRegsInt);
compiler->unwindPopMaskInt(maskPopRegsInt);
compiler->unwindEndEpilog();
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
* Note that all funclet prologs are identical, and all funclet epilogs are
* identical (per type: filters are identical, and non-filters are identical).
* Thus, we compute the data used for these just once.
*
* See genFuncletProlog() for more information about the prolog/epilog sequences.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (compiler->ehAnyFunclets())
{
assert(isFramePointerUsed());
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be
// finalized
// Frame pointer doesn't point at the end, it points at the pushed r11. So, instead
// of adding the number of callee-saved regs to CallerSP, we add 1 for lr and 1 for r11
// (plus the "pre spill regs"). Note that we assume r12 and r13 aren't saved
// (also assumed in genFnProlog()).
assert((regSet.rsMaskCalleeSaved & (RBM_R12 | RBM_R13)) == 0);
unsigned preSpillRegArgSize = genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
genFuncletInfo.fiFunctionCallerSPtoFPdelta = preSpillRegArgSize + 2 * REGSIZE_BYTES;
regMaskTP rsMaskSaveRegs = regSet.rsMaskCalleeSaved;
unsigned saveRegsCount = genCountBits(rsMaskSaveRegs);
unsigned saveRegsSize = saveRegsCount * REGSIZE_BYTES; // bytes of regs we're saving
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
unsigned funcletFrameSize =
preSpillRegArgSize + saveRegsSize + REGSIZE_BYTES /* PSP slot */ + compiler->lvaOutgoingArgSpaceSize;
unsigned funcletFrameSizeAligned = roundUp(funcletFrameSize, STACK_ALIGN);
unsigned funcletFrameAlignmentPad = funcletFrameSizeAligned - funcletFrameSize;
unsigned spDelta = funcletFrameSizeAligned - saveRegsSize;
unsigned PSP_slot_SP_offset = compiler->lvaOutgoingArgSpaceSize + funcletFrameAlignmentPad;
int PSP_slot_CallerSP_offset =
-(int)(funcletFrameSize - compiler->lvaOutgoingArgSpaceSize); // NOTE: it's negative!
/* Now save it for future use */
genFuncletInfo.fiSaveRegs = rsMaskSaveRegs;
genFuncletInfo.fiSpDelta = spDelta;
genFuncletInfo.fiPSP_slot_SP_offset = PSP_slot_SP_offset;
genFuncletInfo.fiPSP_slot_CallerSP_offset = PSP_slot_CallerSP_offset;
#ifdef DEBUG
if (verbose)
{
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Function CallerSP-to-FP delta: %d\n", genFuncletInfo.fiFunctionCallerSPtoFPdelta);
printf(" Save regs: ");
dspRegMask(rsMaskSaveRegs);
printf("\n");
printf(" SP delta: %d\n", genFuncletInfo.fiSpDelta);
printf(" PSP slot SP offset: %d\n", genFuncletInfo.fiPSP_slot_SP_offset);
printf(" PSP slot Caller SP offset: %d\n", genFuncletInfo.fiPSP_slot_CallerSP_offset);
if (PSP_slot_CallerSP_offset != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym))
{
printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n",
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
}
}
#endif // DEBUG
assert(PSP_slot_CallerSP_offset < 0);
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
assert(PSP_slot_CallerSP_offset ==
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main
// function and funclet!
}
}
}
void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
// We either generate:
// add r1, r11, 8
// str r1, [reg + PSPSymOffset]
// or:
// add r1, sp, 76
// str r1, [reg + PSPSymOffset]
// depending on the smallest encoding
int SPtoCallerSPdelta = -genCallerSPtoInitialSPdelta();
int callerSPOffs;
regNumber regBase;
if (arm_Valid_Imm_For_Add_SP(SPtoCallerSPdelta))
{
// use the "add <reg>, sp, imm" form
callerSPOffs = SPtoCallerSPdelta;
regBase = REG_SPBASE;
}
else
{
// use the "add <reg>, r11, imm" form
int FPtoCallerSPdelta = -genCallerSPtoFPdelta();
noway_assert(arm_Valid_Imm_For_Add(FPtoCallerSPdelta, INS_FLAGS_DONT_CARE));
callerSPOffs = FPtoCallerSPdelta;
regBase = REG_FPBASE;
}
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
regNumber regTmp = initReg;
*pInitRegZeroed = false;
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, regTmp, regBase, callerSPOffs);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
}
void CodeGen::genInsertNopForUnwinder(BasicBlock* block)
{
// If this block is the target of a finally return, we need to add a preceding NOP, in the same EH region,
// so the unwinder doesn't get confused by our "movw lr, xxx; movt lr, xxx; b Lyyy" calling convention that
// calls the funclet during non-exceptional control flow.
if (block->bbFlags & BBF_FINALLY_TARGET)
{
assert(block->bbFlags & BBF_HAS_LABEL);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nEmitting finally target NOP predecessor for " FMT_BB "\n", block->bbNum);
}
#endif
// Create a label that we'll use for computing the start of an EH region, if this block is
// at the beginning of such a region. If we used the existing bbEmitCookie as is for
// determining the EH regions, then this NOP would end up outside of the region, if this
// block starts an EH region. If we pointed the existing bbEmitCookie here, then the NOP
// would be executed, which we would prefer not to do.
block->bbUnwindNopEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(block));
instGen(INS_nop);
}
}
//-----------------------------------------------------------------------------
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
// `genUseBlockInit` is set.
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
assert(genUseBlockInit);
assert(untrLclHi > untrLclLo);
// Generate the following code:
//
// For cnt less than 10
//
// mov rZero1, 0
// mov rZero2, 0
// mov rCnt, <cnt>
// stm <rZero1,rZero2>,[rAddr!]
// <optional> stm <rZero1,rZero2>,[rAddr!]
// <optional> stm <rZero1,rZero2>,[rAddr!]
// <optional> stm <rZero1,rZero2>,[rAddr!]
// <optional> str rZero1,[rAddr]
//
// For rCnt greater than or equal to 10
//
// mov rZero1, 0
// mov rZero2, 0
// mov rCnt, <cnt/2>
// sub rAddr, sp, OFFS
//
// loop:
// stm <rZero1,rZero2>,[rAddr!]
// sub rCnt,rCnt,1
// jnz loop
//
// <optional> str rZero1,[rAddr] // When cnt is odd
regNumber rAddr;
regNumber rCnt = REG_NA; // Invalid
regMaskTP regMask;
regMaskTP availMask = regSet.rsGetModifiedRegsMask() | RBM_INT_CALLEE_TRASH; // Set of available registers
availMask &= ~intRegState.rsCalleeRegArgMaskLiveIn; // Remove all of the incoming argument registers as they are
// currently live
availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg as we will zero it and maybe use it for
// a large constant.
if (compiler->compLocallocUsed)
{
availMask &= ~RBM_SAVED_LOCALLOC_SP; // Remove the register reserved when we have a localloc frame
}
regNumber rZero1; // We're going to use initReg for rZero1
regNumber rZero2;
// We pick the next lowest register number for rZero2
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
rZero2 = genRegNumFromMask(regMask);
availMask &= ~regMask;
assert((genRegMask(rZero2) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // rZero2 is not a live incoming
// argument reg
// We pick the next lowest register number for rAddr
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
rAddr = genRegNumFromMask(regMask);
availMask &= ~regMask;
bool useLoop = false;
unsigned uCntBytes = untrLclHi - untrLclLo;
assert((uCntBytes % sizeof(int)) == 0); // The smallest stack slot is always 4 bytes.
unsigned uCntSlots = uCntBytes / REGSIZE_BYTES; // How many register sized stack slots we're going to use.
// When uCntSlots is 9 or less, we will emit a sequence of stm/stp instructions inline.
// When it is 10 or greater, we will emit a loop containing a stm/stp instruction.
// In both of these cases the stm/stp instruction will write two zeros to memory
// and we will use a single str instruction at the end whenever we have an odd count.
if (uCntSlots >= 10)
useLoop = true;
if (useLoop)
{
// We pick the next lowest register number for rCnt
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
rCnt = genRegNumFromMask(regMask);
availMask &= ~regMask;
}
// rAddr is not a live incoming argument reg
assert((genRegMask(rAddr) & intRegState.rsCalleeRegArgMaskLiveIn) == 0);
if (arm_Valid_Imm_For_Add(untrLclLo, INS_FLAGS_DONT_CARE))
{
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), untrLclLo);
}
else
{
// Load immediate into the InitReg register
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, (ssize_t)untrLclLo);
GetEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), initReg);
*pInitRegZeroed = false;
}
if (useLoop)
{
noway_assert(uCntSlots >= 2);
assert((genRegMask(rCnt) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // rCnt is not a live incoming
// argument reg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, rCnt, (ssize_t)uCntSlots / 2);
}
rZero1 = genGetZeroReg(initReg, pInitRegZeroed);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, rZero2);
target_ssize_t stmImm = (target_ssize_t)(genRegMask(rZero1) | genRegMask(rZero2));
if (!useLoop)
{
while (uCntBytes >= REGSIZE_BYTES * 2)
{
GetEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm);
uCntBytes -= REGSIZE_BYTES * 2;
}
}
else
{
GetEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm); // zero stack slots
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, rCnt, 1, INS_FLAGS_SET);
GetEmitter()->emitIns_J(INS_bhi, NULL, -3);
uCntBytes %= REGSIZE_BYTES * 2;
}
if (uCntBytes >= REGSIZE_BYTES) // check and zero the last register-sized stack slot (odd number)
{
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, rZero1, rAddr, 0);
uCntBytes -= REGSIZE_BYTES;
}
noway_assert(uCntBytes == 0);
}
#endif // TARGET_ARM
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/codegenarmarch.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ARM/ARM64 Code Generator Common Code XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "emit.h"
#include "patchpointinfo.h"
//------------------------------------------------------------------------
// genStackPointerConstantAdjustment: add a specified constant value to the stack pointer.
// No probe is done.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero.
// regTmp - an available temporary register that is used if 'spDelta' cannot be encoded by
// 'sub sp, sp, #spDelta' instruction.
// Can be REG_NA if the caller knows for certain that 'spDelta' fits into the immediate
// value range.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
// We assert that the SP change is less than one page. If it's greater, you should have called a
// function that does a probe, which will in turn call this function.
assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize());
#ifdef TARGET_ARM64
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -spDelta, regTmp);
#else
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -spDelta, INS_FLAGS_DONT_CARE, regTmp);
#endif
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Should only be called as a helper for
// genStackPointerConstantAdjustmentLoopWithProbe.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens,
// but the stack pointer doesn't move.
// regTmp - temporary register to use as target for probe load instruction
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Generates one probe per page, up to the total amount required.
// This will generate a sequence of probes in-line.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative.
// regTmp - temporary register to use as target for probe load instruction
//
// Return Value:
// Offset in bytes from SP to last probed address.
//
target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
const target_size_t pageSize = compiler->eeGetPageSize();
ssize_t spRemainingDelta = spDelta;
do
{
ssize_t spOneDelta = -(ssize_t)min((target_size_t)-spRemainingDelta, pageSize);
genStackPointerConstantAdjustmentWithProbe(spOneDelta, regTmp);
spRemainingDelta -= spOneDelta;
} while (spRemainingDelta < 0);
// What offset from the final SP was the last probe? This depends on the fact that
// genStackPointerConstantAdjustmentWithProbe() probes first, then does "SUB SP".
target_size_t lastTouchDelta = (target_size_t)(-spDelta) % pageSize;
if ((lastTouchDelta == 0) || (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize))
{
// We haven't probed almost a complete page. If lastTouchDelta==0, then spDelta was an exact
// multiple of pageSize, which means we last probed exactly one page back. Otherwise, we probed
// the page, but very far from the end. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we do one more probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
lastTouchDelta = 0;
}
return lastTouchDelta;
}
//------------------------------------------------------------------------
// genCodeForTreeNode Generate code for a single node in the tree.
//
// Preconditions:
// All operands have been evaluated.
//
void CodeGen::genCodeForTreeNode(GenTree* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
compiler->gtDispLIRNode(treeNode, "Generating: ");
}
#endif // DEBUG
// Is this a node whose value is already in a register? LSRA denotes this by
// setting the GTF_REUSE_REG_VAL flag.
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
assert((treeNode->OperGet() == GT_CNS_INT) || (treeNode->OperGet() == GT_CNS_DBL));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
// contained nodes are part of their parents for codegen purposes
// ex : immediates, most LEAs
if (treeNode->isContained())
{
return;
}
switch (treeNode->gtOper)
{
case GT_START_NONGC:
GetEmitter()->emitDisableGC();
break;
case GT_START_PREEMPTGC:
// Kill callee saves GC registers, and create a label
// so that information gets propagated to the emitter.
gcInfo.gcMarkRegSetNpt(RBM_INT_CALLEE_SAVED);
genDefineTempLabel(genCreateTempLabel());
break;
case GT_PROF_HOOK:
// We should be seeing this only if profiler hook is needed
noway_assert(compiler->compIsProfilerHookNeeded());
#ifdef PROFILING_SUPPORTED
// Right now this node is used only for tail calls. In future if
// we intend to use it for Enter or Leave hooks, add a data member
// to this node indicating the kind of profiler hook. For example,
// helper number can be used.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
break;
case GT_LCLHEAP:
genLclHeap(treeNode);
break;
case GT_CNS_INT:
case GT_CNS_DBL:
genSetRegToConst(targetReg, targetType, treeNode);
genProduceReg(treeNode);
break;
case GT_NOT:
case GT_NEG:
genCodeForNegNot(treeNode);
break;
#if defined(TARGET_ARM64)
case GT_BSWAP:
case GT_BSWAP16:
genCodeForBswap(treeNode);
break;
#endif // defined(TARGET_ARM64)
case GT_MOD:
case GT_UMOD:
case GT_DIV:
case GT_UDIV:
genCodeForDivMod(treeNode->AsOp());
break;
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_AND_NOT:
assert(varTypeIsIntegralOrI(treeNode));
FALLTHROUGH;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif // !defined(TARGET_64BIT)
case GT_ADD:
case GT_SUB:
case GT_MUL:
genConsumeOperands(treeNode->AsOp());
genCodeForBinary(treeNode->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// case GT_ROL: // No ROL instruction on ARM; it has been lowered to ROR.
case GT_ROR:
genCodeForShift(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LSH_HI:
case GT_RSH_LO:
genCodeForShiftLong(treeNode);
break;
#endif // !defined(TARGET_64BIT)
case GT_CAST:
genCodeForCast(treeNode->AsOp());
break;
case GT_BITCAST:
genCodeForBitCast(treeNode->AsOp());
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
genCodeForLclAddr(treeNode);
break;
case GT_LCL_FLD:
genCodeForLclFld(treeNode->AsLclFld());
break;
case GT_LCL_VAR:
genCodeForLclVar(treeNode->AsLclVar());
break;
case GT_STORE_LCL_FLD:
genCodeForStoreLclFld(treeNode->AsLclFld());
break;
case GT_STORE_LCL_VAR:
genCodeForStoreLclVar(treeNode->AsLclVar());
break;
case GT_RETFILT:
case GT_RETURN:
genReturn(treeNode);
break;
case GT_LEA:
// If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
genLeaInstruction(treeNode->AsAddrMode());
break;
case GT_INDEX_ADDR:
genCodeForIndexAddr(treeNode->AsIndexAddr());
break;
case GT_IND:
genCodeForIndir(treeNode->AsIndir());
break;
case GT_MUL_LONG:
genCodeForMulLong(treeNode->AsOp());
break;
#ifdef TARGET_ARM64
case GT_MADD:
genCodeForMadd(treeNode->AsOp());
break;
case GT_INC_SATURATE:
genCodeForIncSaturate(treeNode);
break;
case GT_MULHI:
genCodeForMulHi(treeNode->AsOp());
break;
case GT_SWAP:
genCodeForSwap(treeNode->AsOp());
break;
case GT_ADDEX:
genCodeForAddEx(treeNode->AsOp());
break;
case GT_BFIZ:
genCodeForBfiz(treeNode->AsOp());
break;
#endif // TARGET_ARM64
case GT_JMP:
genJmpMethod(treeNode);
break;
case GT_CKFINITE:
genCkfinite(treeNode);
break;
case GT_INTRINSIC:
genIntrinsic(treeNode);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
genSIMDIntrinsic(treeNode->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
genHWIntrinsic(treeNode->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_CMP:
#ifdef TARGET_ARM64
case GT_TEST_EQ:
case GT_TEST_NE:
#endif // TARGET_ARM64
genCodeForCompare(treeNode->AsOp());
break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;
#ifdef TARGET_ARM64
case GT_JCMP:
genCodeForJumpCompare(treeNode->AsOp());
break;
#endif // TARGET_ARM64
case GT_JCC:
genCodeForJcc(treeNode->AsCC());
break;
case GT_SETCC:
genCodeForSetcc(treeNode->AsCC());
break;
case GT_RETURNTRAP:
genCodeForReturnTrap(treeNode->AsOp());
break;
case GT_STOREIND:
genCodeForStoreInd(treeNode->AsStoreInd());
break;
case GT_COPY:
// This is handled at the time we call genConsumeReg() on the GT_COPY
break;
case GT_FIELD_LIST:
// Should always be marked contained.
assert(!"LIST, FIELD_LIST nodes should always be marked contained.");
break;
case GT_PUTARG_STK:
genPutArgStk(treeNode->AsPutArgStk());
break;
case GT_PUTARG_REG:
genPutArgReg(treeNode->AsOp());
break;
case GT_PUTARG_SPLIT:
genPutArgSplit(treeNode->AsPutArgSplit());
break;
case GT_CALL:
genCall(treeNode->AsCall());
break;
case GT_MEMORYBARRIER:
{
CodeGen::BarrierKind barrierKind =
treeNode->gtFlags & GTF_MEMORYBARRIER_LOAD ? BARRIER_LOAD_ONLY : BARRIER_FULL;
instGen_MemoryBarrier(barrierKind);
break;
}
#ifdef TARGET_ARM64
case GT_XCHG:
case GT_XORR:
case GT_XAND:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;
case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
#endif // TARGET_ARM64
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
// into the register specified in this node.
break;
case GT_NOP:
break;
case GT_KEEPALIVE:
if (treeNode->AsOp()->gtOp1->isContained())
{
// For this case we simply need to update the lifetime of the local.
genUpdateLife(treeNode->AsOp()->gtOp1);
}
else
{
genConsumeReg(treeNode->AsOp()->gtOp1);
}
break;
case GT_NO_OP:
instGen(INS_nop);
break;
case GT_BOUNDS_CHECK:
genRangeCheck(treeNode);
break;
case GT_PHYSREG:
genCodeForPhysReg(treeNode->AsPhysReg());
break;
case GT_NULLCHECK:
genCodeForNullCheck(treeNode->AsIndir());
break;
case GT_CATCH_ARG:
noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
/* Catch arguments get passed in a register. genCodeForBBlist()
would have marked it as holding a GC object, but not used. */
noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
genConsumeReg(treeNode);
break;
case GT_PINVOKE_PROLOG:
noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
#ifdef PSEUDORANDOM_NOP_INSERTION
// the runtime side requires the codegen here to be consistent
emit->emitDisableRandomNops();
#endif // PSEUDORANDOM_NOP_INSERTION
break;
case GT_LABEL:
genPendingCallLabel = genCreateTempLabel();
#if defined(TARGET_ARM)
genMov32RelocatableDisplacement(genPendingCallLabel, targetReg);
#else
emit->emitIns_R_L(INS_adr, EA_PTRSIZE, genPendingCallLabel, targetReg);
#endif
break;
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
case GT_JMPTABLE:
genJumpTable(treeNode);
break;
case GT_SWITCH_TABLE:
genTableBasedSwitch(treeNode);
break;
case GT_ARR_INDEX:
genCodeForArrIndex(treeNode->AsArrIndex());
break;
case GT_ARR_OFFSET:
genCodeForArrOffset(treeNode->AsArrOffs());
break;
#ifdef TARGET_ARM
case GT_CLS_VAR_ADDR:
emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0);
genProduceReg(treeNode);
break;
case GT_LONG:
assert(treeNode->isUsedFromReg());
genConsumeRegs(treeNode);
break;
#endif // TARGET_ARM
case GT_IL_OFFSET:
// Do nothing; these nodes are simply markers for debug info.
break;
default:
{
#ifdef DEBUG
char message[256];
_snprintf_s(message, ArrLen(message), _TRUNCATE, "NYI: Unimplemented node type %s",
GenTree::OpName(treeNode->OperGet()));
NYIRAW(message);
#else
NYI("unimplemented node");
#endif
}
break;
}
}
//---------------------------------------------------------------------
// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog.
//
// Arguments:
// initReg - register to use as a scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
{
return;
}
if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
{
// Security cookie is on original frame and was initialized there.
return;
}
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
// initReg = #GlobalSecurityCookieVal; [frame.GSSecurityCookie] = initReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, initReg, (ssize_t)compiler->gsGlobalSecurityCookieAddr,
INS_FLAGS_DONT_CARE DEBUGARG((size_t)THT_SetGSCookie) DEBUGARG(GTF_EMPTY));
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, initReg, initReg, 0);
regSet.verifyRegUsed(initReg);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
*pInitRegZeroed = false;
}
//------------------------------------------------------------------------
// genEmitGSCookieCheck: Generate code to check that the GS cookie
// wasn't thrashed by a buffer overrun.
//
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that the return register is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by REG_INTRET (R0).
if (!pushReg && (compiler->info.compRetNativeType == TYP_REF))
gcInfo.gcRegGCrefSetCur |= RBM_INTRET;
// We need two temporary registers, to load the GS cookie values and compare them. We can't use
// any argument registers if 'pushReg' is true (meaning we have a JMP call). They should be
// callee-trash registers, which should not contain anything interesting at this point.
// We don't have any IR node representing this check, so LSRA can't communicate registers
// for us to use.
regNumber regGSConst = REG_GSCOOKIE_TMP_0;
regNumber regGSValue = REG_GSCOOKIE_TMP_1;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
// load the GS cookie constant into a reg
//
instGen_Set_Reg_To_Imm(EA_PTRSIZE, regGSConst, compiler->gsGlobalSecurityCookieVal);
}
else
{
// Ngen case - GS cookie constant needs to be accessed through an indirection.
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSConst, (ssize_t)compiler->gsGlobalSecurityCookieAddr,
INS_FLAGS_DONT_CARE DEBUGARG((size_t)THT_GSCookieCheck) DEBUGARG(GTF_EMPTY));
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, regGSConst, regGSConst, 0);
}
// Load this method's GS value from the stack frame
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, regGSValue, compiler->lvaGSSecurityCookie, 0);
// Compare with the GC cookie constant
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_eq, gsCheckBlk);
// regGSConst and regGSValue aren't needed anymore, we can use them for helper call
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN, regGSConst);
genDefineTempLabel(gsCheckBlk);
}
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
// Arguments
// treeNode - the GT_INTRINSIC node
//
// Return value:
// None
//
void CodeGen::genIntrinsic(GenTree* treeNode)
{
assert(treeNode->OperIs(GT_INTRINSIC));
// Both operand and its result must be of the same floating point type.
GenTree* srcNode = treeNode->AsOp()->gtOp1;
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
// Only a subset of functions are treated as math intrinsics.
//
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Abs:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_ABS, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
#ifdef TARGET_ARM64
case NI_System_Math_Ceiling:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintp, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Floor:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintm, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Truncate:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintz, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Round:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintn, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Max:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R_R(INS_fmax, emitActualTypeSize(treeNode), treeNode->GetRegNum(),
treeNode->gtGetOp1()->GetRegNum(), treeNode->gtGetOp2()->GetRegNum());
break;
case NI_System_Math_Min:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R_R(INS_fmin, emitActualTypeSize(treeNode), treeNode->GetRegNum(),
treeNode->gtGetOp1()->GetRegNum(), treeNode->gtGetOp2()->GetRegNum());
break;
#endif // TARGET_ARM64
case NI_System_Math_Sqrt:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_SQRT, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
default:
assert(!"genIntrinsic: Unsupported intrinsic");
unreached();
}
genProduceReg(treeNode);
}
//---------------------------------------------------------------------
// genPutArgStk - generate code for a GT_PUTARG_STK node
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
// Return value:
// None
//
void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
{
assert(treeNode->OperIs(GT_PUTARG_STK));
GenTree* source = treeNode->gtOp1;
var_types targetType;
if (!compMacOsArm64Abi())
{
targetType = genActualType(source->TypeGet());
}
else
{
targetType = source->TypeGet();
}
emitter* emit = GetEmitter();
// This is the varNum for our store operations,
// typically this is the varNum for the Outgoing arg space
// When we are generating a tail call it will be the varNum for arg0
unsigned varNumOut = (unsigned)-1;
unsigned argOffsetMax = (unsigned)-1; // Records the maximum size of this area for assert checks
// Get argument offset to use with 'varNumOut'
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
unsigned argOffsetOut = treeNode->getArgOffset();
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(treeNode->gtCall, treeNode);
assert(curArgTabEntry != nullptr);
DEBUG_ARG_SLOTS_ASSERT(argOffsetOut == (curArgTabEntry->slotNum * TARGET_POINTER_SIZE));
#endif // DEBUG
// Whether to setup stk arg in incoming or out-going arg area?
// Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
// All other calls - stk arg is setup in out-going arg area.
if (treeNode->putInIncomingArgArea())
{
varNumOut = getFirstArgWithStackSlot();
argOffsetMax = compiler->compArgSize;
#if FEATURE_FASTTAILCALL
// This must be a fast tail call.
assert(treeNode->gtCall->IsFastTailCall());
// Since it is a fast tail call, the existence of first incoming arg is guaranteed
// because fast tail call requires that in-coming arg area of caller is >= out-going
// arg area required for tail call.
LclVarDsc* varDsc = compiler->lvaGetDesc(varNumOut);
assert(varDsc != nullptr);
#endif // FEATURE_FASTTAILCALL
}
else
{
varNumOut = compiler->lvaOutgoingArgSpaceVar;
argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
}
bool isStruct = (targetType == TYP_STRUCT) || (source->OperGet() == GT_FIELD_LIST);
if (!isStruct) // a normal non-Struct argument
{
if (varTypeIsSIMD(targetType))
{
assert(!source->isContained());
regNumber srcReg = genConsumeReg(source);
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
assert(compMacOsArm64Abi() || treeNode->GetStackByteSize() % TARGET_POINTER_SIZE == 0);
#ifdef TARGET_ARM64
if (compMacOsArm64Abi() && (treeNode->GetStackByteSize() == 12))
{
regNumber tmpReg = treeNode->GetSingleTempReg();
GetEmitter()->emitStoreSIMD12ToLclOffset(varNumOut, argOffsetOut, srcReg, tmpReg);
argOffsetOut += 12;
}
else
#endif // TARGET_ARM64
{
emitAttr storeAttr = emitTypeSize(targetType);
emit->emitIns_S_R(INS_str, storeAttr, srcReg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
}
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
return;
}
if (compMacOsArm64Abi())
{
switch (treeNode->GetStackByteSize())
{
case 1:
targetType = TYP_BYTE;
break;
case 2:
targetType = TYP_SHORT;
break;
default:
assert(treeNode->GetStackByteSize() >= 4);
break;
}
}
instruction storeIns = ins_Store(targetType);
emitAttr storeAttr = emitTypeSize(targetType);
// If it is contained then source must be the integer constant zero
if (source->isContained())
{
#ifdef TARGET_ARM64
assert(source->OperGet() == GT_CNS_INT);
assert(source->AsIntConCommon()->IconValue() == 0);
emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut);
#else // !TARGET_ARM64
// There is no zero register on ARM32
unreached();
#endif // !TARGET_ARM64
}
else
{
genConsumeReg(source);
emit->emitIns_S_R(storeIns, storeAttr, source->GetRegNum(), varNumOut, argOffsetOut);
#ifdef TARGET_ARM
if (targetType == TYP_LONG)
{
// This case currently only occurs for double types that are passed as TYP_LONG;
// actual long types would have been decomposed by now.
assert(source->IsCopyOrReload());
regNumber otherReg = (regNumber)source->AsCopyOrReload()->GetRegNumByIdx(1);
assert(otherReg != REG_NA);
argOffsetOut += EA_4BYTE;
emit->emitIns_S_R(storeIns, storeAttr, otherReg, varNumOut, argOffsetOut);
}
#endif // TARGET_ARM
}
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
}
else // We have some kind of a struct argument
{
assert(source->isContained()); // We expect that this node was marked as contained in Lower
if (source->OperGet() == GT_FIELD_LIST)
{
genPutArgStkFieldList(treeNode, varNumOut);
}
else // We must have a GT_OBJ or a GT_LCL_VAR
{
noway_assert(source->OperIs(GT_LCL_VAR, GT_OBJ));
var_types targetType = source->TypeGet();
noway_assert(varTypeIsStruct(targetType));
// We will copy this struct to the stack, possibly using a ldp/ldr instruction
// in ARM64/ARM
// Setup loReg (and hiReg) from the internal registers that we reserved in lower.
//
regNumber loReg = treeNode->ExtractTempReg();
#ifdef TARGET_ARM64
regNumber hiReg = treeNode->GetSingleTempReg();
#endif // TARGET_ARM64
regNumber addrReg = REG_NA;
GenTreeLclVarCommon* varNode = nullptr;
GenTree* addrNode = nullptr;
if (source->OperGet() == GT_LCL_VAR)
{
varNode = source->AsLclVarCommon();
}
else // we must have a GT_OBJ
{
assert(source->OperGet() == GT_OBJ);
addrNode = source->AsOp()->gtOp1;
// addrNode can either be a GT_LCL_VAR_ADDR or an address expression
//
if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
// We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
//
assert(addrNode->isContained());
varNode = addrNode->AsLclVarCommon();
addrNode = nullptr;
}
else // addrNode is used
{
// TODO-Cleanup: `Lowering::NewPutArg` marks only `LCL_VAR_ADDR` as contained nowadays,
// but we use `genConsumeAddress` as a precaution, use `genConsumeReg()` instead.
assert(!addrNode->isContained());
// Generate code to load the address that we need into a register
genConsumeAddress(addrNode);
addrReg = addrNode->GetRegNum();
#ifdef TARGET_ARM64
// If addrReg equal to loReg, swap(loReg, hiReg)
// This reduces code complexity by only supporting one addrReg overwrite case
if (loReg == addrReg)
{
loReg = hiReg;
hiReg = addrReg;
}
#endif // TARGET_ARM64
}
}
// Either varNode or addrNOde must have been setup above,
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
ClassLayout* layout;
unsigned srcSize;
bool isHfa;
// Setup the srcSize, isHFa, and gcPtrCount
if (source->OperGet() == GT_LCL_VAR)
{
assert(varNode != nullptr);
LclVarDsc* varDsc = compiler->lvaGetDesc(varNode);
// This struct also must live in the stack frame
// And it can't live in a register (SIMD)
assert(varDsc->lvType == TYP_STRUCT);
assert(varDsc->lvOnFrame && !varDsc->lvRegister);
srcSize = varDsc->lvSize();
isHfa = varDsc->lvIsHfa();
layout = varDsc->GetLayout();
}
else // we must have a GT_OBJ
{
assert(source->OperGet() == GT_OBJ);
// If the source is an OBJ node then we need to use the type information
// it provides (size and GC layout) even if the node wraps a lclvar. Due
// to struct reinterpretation (e.g. Unsafe.As<X, Y>) it is possible that
// the OBJ node has a different type than the lclvar.
layout = source->AsObj()->GetLayout();
srcSize = layout->GetSize();
isHfa = compiler->IsHfa(layout->GetClassHandle());
}
// If we have an HFA we can't have any GC pointers,
// if not then the max size for the the struct is 16 bytes
if (isHfa)
{
noway_assert(!layout->HasGCPtr());
}
#ifdef TARGET_ARM64
else
{
noway_assert(srcSize <= 2 * TARGET_POINTER_SIZE);
}
noway_assert(srcSize <= MAX_PASS_MULTIREG_BYTES);
#endif // TARGET_ARM64
unsigned structSize;
unsigned dstSize = treeNode->GetStackByteSize();
if (dstSize != srcSize)
{
// We can generate a smaller code if store size is a multiple of TARGET_POINTER_SIZE.
// The dst size can be rounded up to PUTARG_STK size.
// The src size can be rounded up if it reads a local variable slot because the local
// variable stack allocation size is rounded up to be a multiple of the TARGET_POINTER_SIZE.
// The exception is arm64 apple arguments because they can be passed without padding.
if (varNode != nullptr)
{
// If we have a varNode, even if it was casted using `OBJ`, we can read its original memory size.
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNode);
const unsigned varStackSize = varDsc->lvSize();
if (varStackSize >= srcSize)
{
srcSize = varStackSize;
}
}
}
if (dstSize == srcSize)
{
structSize = dstSize;
}
else
{
// With Unsafe object cast we can have different strange combinations:
// PutArgStk<8>(Obj<16>(LclVar<8>)) -> copy 8 bytes;
// PutArgStk<16>(Obj<16>(LclVar<8>)) -> copy 16 bytes, reading undefined memory after the local.
structSize = min(dstSize, srcSize);
}
int remainingSize = structSize;
unsigned structOffset = 0;
unsigned nextIndex = 0;
#ifdef TARGET_ARM64
// For a >= 16-byte structSize we will generate a ldp and stp instruction each loop
// ldp x2, x3, [x0]
// stp x2, x3, [sp, #16]
while (remainingSize >= 2 * TARGET_POINTER_SIZE)
{
var_types type0 = layout->GetGCPtrType(nextIndex + 0);
var_types type1 = layout->GetGCPtrType(nextIndex + 1);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_R_S_S(INS_ldp, emitTypeSize(type0), emitTypeSize(type1), loReg, hiReg,
varNode->GetLclNum(), structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(loReg != addrReg);
noway_assert((remainingSize == 2 * TARGET_POINTER_SIZE) || (hiReg != addrReg));
// Load from our address expression source
emit->emitIns_R_R_R_I(INS_ldp, emitTypeSize(type0), loReg, hiReg, addrReg, structOffset,
INS_OPTS_NONE, emitTypeSize(type0));
}
// Emit stp instruction to store the two registers into the outgoing argument area
emit->emitIns_S_S_R_R(INS_stp, emitTypeSize(type0), emitTypeSize(type1), loReg, hiReg, varNumOut,
argOffsetOut);
argOffsetOut += (2 * TARGET_POINTER_SIZE); // We stored 16-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= (2 * TARGET_POINTER_SIZE); // We loaded 16-bytes of the struct
structOffset += (2 * TARGET_POINTER_SIZE);
nextIndex += 2;
}
#else // TARGET_ARM
// For a >= 4 byte structSize we will generate a ldr and str instruction each loop
// ldr r2, [r0]
// str r2, [sp, #16]
while (remainingSize >= TARGET_POINTER_SIZE)
{
var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), loReg, varNode->GetLclNum(), structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(loReg != addrReg || remainingSize == TARGET_POINTER_SIZE);
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), loReg, addrReg, structOffset);
}
// Emit str instruction to store the register into the outgoing argument area
emit->emitIns_S_R(INS_str, emitTypeSize(type), loReg, varNumOut, argOffsetOut);
argOffsetOut += TARGET_POINTER_SIZE; // We stored 4-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= TARGET_POINTER_SIZE; // We loaded 4-bytes of the struct
structOffset += TARGET_POINTER_SIZE;
nextIndex += 1;
}
#endif // TARGET_ARM
// For a 12-byte structSize we will generate two load instructions
// ldr x2, [x0]
// ldr w3, [x0, #8]
// str x2, [sp, #16]
// str w3, [sp, #24]
while (remainingSize > 0)
{
var_types type;
if (remainingSize >= TARGET_POINTER_SIZE)
{
type = layout->GetGCPtrType(nextIndex);
}
else // (remainingSize < TARGET_POINTER_SIZE)
{
// the left over size is smaller than a pointer and thus can never be a GC type
assert(!layout->IsGCPtr(nextIndex));
if (remainingSize == 1)
{
type = TYP_UBYTE;
}
else if (remainingSize == 2)
{
type = TYP_USHORT;
}
else
{
assert(remainingSize == 4);
type = TYP_UINT;
}
}
const emitAttr attr = emitTypeSize(type);
const unsigned moveSize = genTypeSize(type);
assert(EA_SIZE_IN_BYTES(attr) == moveSize);
remainingSize -= moveSize;
instruction loadIns = ins_Load(type);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(loadIns, attr, loReg, varNode->GetLclNum(), structOffset);
}
else
{
assert(loReg != addrReg);
// Load from our address expression source
emit->emitIns_R_R_I(loadIns, attr, loReg, addrReg, structOffset);
}
// Emit a store instruction to store the register into the outgoing argument area
instruction storeIns = ins_Store(type);
emit->emitIns_S_R(storeIns, attr, loReg, varNumOut, argOffsetOut);
argOffsetOut += moveSize;
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
structOffset += moveSize;
nextIndex++;
}
}
}
}
//---------------------------------------------------------------------
// genPutArgReg - generate code for a GT_PUTARG_REG node
//
// Arguments
// tree - the GT_PUTARG_REG node
//
// Return value:
// None
//
void CodeGen::genPutArgReg(GenTreeOp* tree)
{
assert(tree->OperIs(GT_PUTARG_REG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
assert(targetType != TYP_STRUCT);
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
// If child node is not already in the register we need, move it
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genPutArgSplit - generate code for a GT_PUTARG_SPLIT node
//
// Arguments
// tree - the GT_PUTARG_SPLIT node
//
// Return value:
// None
//
void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode)
{
assert(treeNode->OperIs(GT_PUTARG_SPLIT));
GenTree* source = treeNode->gtOp1;
emitter* emit = GetEmitter();
unsigned varNumOut = compiler->lvaOutgoingArgSpaceVar;
unsigned argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
if (source->OperGet() == GT_FIELD_LIST)
{
// Evaluate each of the GT_FIELD_LIST items into their register
// and store their register into the outgoing argument area
unsigned regIndex = 0;
unsigned firstOnStackOffs = UINT_MAX;
for (GenTreeFieldList::Use& use : source->AsFieldList()->Uses())
{
GenTree* nextArgNode = use.GetNode();
regNumber fieldReg = nextArgNode->GetRegNum();
genConsumeReg(nextArgNode);
if (regIndex >= treeNode->gtNumRegs)
{
if (firstOnStackOffs == UINT_MAX)
{
firstOnStackOffs = use.GetOffset();
}
var_types type = nextArgNode->TypeGet();
emitAttr attr = emitTypeSize(type);
unsigned offset = treeNode->getArgOffset() + use.GetOffset() - firstOnStackOffs;
// We can't write beyond the outgoing arg area
assert(offset + EA_SIZE_IN_BYTES(attr) <= argOffsetMax);
// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
// argument area
emit->emitIns_S_R(ins_Store(type), attr, fieldReg, varNumOut, offset);
}
else
{
var_types type = treeNode->GetRegType(regIndex);
regNumber argReg = treeNode->GetRegNumByIdx(regIndex);
#ifdef TARGET_ARM
if (type == TYP_LONG)
{
// We should only see long fields for DOUBLEs passed in 2 integer registers, via bitcast.
// All other LONGs should have been decomposed.
// Handle the first INT, and then handle the 2nd below.
assert(nextArgNode->OperIs(GT_BITCAST));
type = TYP_INT;
inst_Mov(type, argReg, fieldReg, /* canSkip */ true);
// Now set up the next register for the 2nd INT
argReg = REG_NEXT(argReg);
regIndex++;
assert(argReg == treeNode->GetRegNumByIdx(regIndex));
fieldReg = nextArgNode->AsMultiRegOp()->GetRegNumByIdx(1);
}
#endif // TARGET_ARM
// If child node is not already in the register we need, move it
inst_Mov(type, argReg, fieldReg, /* canSkip */ true);
regIndex++;
}
}
}
else
{
var_types targetType = source->TypeGet();
assert(source->OperGet() == GT_OBJ);
assert(varTypeIsStruct(targetType));
regNumber baseReg = treeNode->ExtractTempReg();
regNumber addrReg = REG_NA;
GenTreeLclVarCommon* varNode = nullptr;
GenTree* addrNode = nullptr;
addrNode = source->AsOp()->gtOp1;
// addrNode can either be a GT_LCL_VAR_ADDR or an address expression
//
if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
// We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
//
varNode = addrNode->AsLclVarCommon();
addrNode = nullptr;
}
// Either varNode or addrNOde must have been setup above,
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
// This is the varNum for our load operations,
// only used when we have a struct with a LclVar source
unsigned srcVarNum = BAD_VAR_NUM;
if (varNode != nullptr)
{
assert(varNode->isContained());
srcVarNum = varNode->GetLclNum();
// handle promote situation
LclVarDsc* varDsc = compiler->lvaGetDesc(srcVarNum);
// This struct also must live in the stack frame
// And it can't live in a register (SIMD)
assert(varDsc->lvType == TYP_STRUCT);
assert(varDsc->lvOnFrame && !varDsc->lvRegister);
// We don't split HFA struct
assert(!varDsc->lvIsHfa());
}
else // addrNode is used
{
assert(addrNode != nullptr);
// TODO-Cleanup: `Lowering::NewPutArg` marks only `LCL_VAR_ADDR` as contained nowadays,
// but we use `genConsumeAddress` as a precaution, use `genConsumeReg()` instead.
assert(!addrNode->isContained());
// Generate code to load the address that we need into a register
genConsumeAddress(addrNode);
addrReg = addrNode->GetRegNum();
// If addrReg equal to baseReg, we use the last target register as alternative baseReg.
// Because the candidate mask for the internal baseReg does not include any of the target register,
// we can ensure that baseReg, addrReg, and the last target register are not all same.
assert(baseReg != addrReg);
// We don't split HFA struct
assert(!compiler->IsHfa(source->AsObj()->GetLayout()->GetClassHandle()));
}
ClassLayout* layout = source->AsObj()->GetLayout();
// Put on stack first
unsigned nextIndex = treeNode->gtNumRegs;
unsigned structOffset = nextIndex * TARGET_POINTER_SIZE;
int remainingSize = treeNode->GetStackByteSize();
unsigned argOffsetOut = treeNode->getArgOffset();
// remainingSize is always multiple of TARGET_POINTER_SIZE
assert(remainingSize % TARGET_POINTER_SIZE == 0);
while (remainingSize > 0)
{
var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), baseReg, srcVarNum, structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(baseReg != addrReg);
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), baseReg, addrReg, structOffset);
}
// Emit str instruction to store the register into the outgoing argument area
emit->emitIns_S_R(INS_str, emitTypeSize(type), baseReg, varNumOut, argOffsetOut);
argOffsetOut += TARGET_POINTER_SIZE; // We stored 4-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= TARGET_POINTER_SIZE; // We loaded 4-bytes of the struct
structOffset += TARGET_POINTER_SIZE;
nextIndex += 1;
}
// We set up the registers in order, so that we assign the last target register `baseReg` is no longer in use,
// in case we had to reuse the last target register for it.
structOffset = 0;
for (unsigned idx = 0; idx < treeNode->gtNumRegs; idx++)
{
regNumber targetReg = treeNode->GetRegNumByIdx(idx);
var_types type = treeNode->GetRegType(idx);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), targetReg, srcVarNum, structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
if (targetReg == addrReg && idx != treeNode->gtNumRegs - 1)
{
assert(targetReg != baseReg);
var_types addrType = addrNode->TypeGet();
emit->emitIns_Mov(INS_mov, emitActualTypeSize(addrType), baseReg, addrReg, /* canSkip */ false);
addrReg = baseReg;
}
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), targetReg, addrReg, structOffset);
}
structOffset += TARGET_POINTER_SIZE;
}
}
genProduceReg(treeNode);
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------
// genMultiRegStoreToSIMDLocal: store multi-reg value to a single-reg SIMD local
//
// Arguments:
// lclNode - GentreeLclVar of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode)
{
regNumber dst = lclNode->GetRegNum();
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount =
actualOp1->IsMultiRegLclVar() ? actualOp1->AsLclVar()->GetFieldCount(compiler) : actualOp1->GetMultiRegCount();
assert(op1->IsMultiRegNode());
genConsumeRegs(op1);
// Treat dst register as a homogenous vector with element size equal to the src size
// Insert pieces in reverse order
for (int i = regCount - 1; i >= 0; --i)
{
var_types type = op1->gtSkipReloadOrCopy()->GetRegTypeByIndex(i);
regNumber reg = actualOp1->GetRegByIndex(i);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
// that need to be copied or reloaded.
regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
if (reloadReg != REG_NA)
{
reg = reloadReg;
}
}
assert(reg != REG_NA);
if (varTypeIsFloating(type))
{
// If the register piece was passed in a floating point register
// Use a vector mov element instruction
// src is not a vector, so it is in the first element reg[0]
// mov dst[i], reg[0]
// This effectively moves from `reg[0]` to `dst[i]`, leaving other dst bits unchanged till further
// iterations
// For the case where reg == dst, if we iterate so that we write dst[0] last, we eliminate the need for
// a temporary
GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), dst, reg, i, 0);
}
else
{
// If the register piece was passed in an integer register
// Use a vector mov from general purpose register instruction
// mov dst[i], reg
// This effectively moves from `reg` to `dst[i]`
GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), dst, reg, i);
}
}
genProduceReg(lclNode);
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genRangeCheck: generate code for GT_BOUNDS_CHECK node.
//
void CodeGen::genRangeCheck(GenTree* oper)
{
noway_assert(oper->OperIs(GT_BOUNDS_CHECK));
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
GenTree* arrLen = bndsChk->GetArrayLength();
GenTree* arrIndex = bndsChk->GetIndex();
GenTree* arrRef = nullptr;
int lenOffset = 0;
GenTree* src1;
GenTree* src2;
emitJumpKind jmpKind;
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
if (arrIndex->isContainedIntOrIImmed())
{
// To encode using a cmp immediate, we place the
// constant operand in the second position
src1 = arrLen;
src2 = arrIndex;
jmpKind = EJ_ls;
}
else
{
src1 = arrIndex;
src2 = arrLen;
jmpKind = EJ_hs;
}
var_types bndsChkType = genActualType(src2->TypeGet());
#if DEBUG
// Bounds checks can only be 32 or 64 bit sized comparisons.
assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitActualTypeSize(src1->TypeGet()));
#endif // DEBUG
GetEmitter()->emitInsBinary(INS_cmp, emitActualTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
//---------------------------------------------------------------------
// genCodeForPhysReg - generate code for a GT_PHYSREG node
//
// Arguments
// tree - the GT_PHYSREG node
//
// Return value:
// None
//
void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
{
assert(tree->OperIs(GT_PHYSREG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
genTransferRegGCState(targetReg, tree->gtSrcReg);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genCodeForNullCheck - generate code for a GT_NULLCHECK node
//
// Arguments
// tree - the GT_NULLCHECK node
//
// Return value:
// None
//
void CodeGen::genCodeForNullCheck(GenTreeIndir* tree)
{
#ifdef TARGET_ARM
assert(!"GT_NULLCHECK isn't supported for Arm32; use GT_IND.");
#else
assert(tree->OperIs(GT_NULLCHECK));
GenTree* op1 = tree->gtOp1;
genConsumeRegs(op1);
regNumber targetReg = REG_ZR;
GetEmitter()->emitInsLoadStoreOp(ins_Load(tree->TypeGet()), emitActualTypeSize(tree), targetReg, tree);
#endif
}
//------------------------------------------------------------------------
// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
// producing the effective index by subtracting the lower bound.
//
// Arguments:
// arrIndex - the node for which we're generating code
//
// Return Value:
// None.
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
emitter* emit = GetEmitter();
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
regNumber indexReg = genConsumeReg(indexNode);
regNumber tgtReg = arrIndex->GetRegNum();
noway_assert(tgtReg != REG_NA);
// We will use a temp register to load the lower bound and dimension size values.
regNumber tmpReg = arrIndex->GetSingleTempReg();
assert(tgtReg != tmpReg);
unsigned dim = arrIndex->gtCurrDim;
unsigned rank = arrIndex->gtArrRank;
var_types elemType = arrIndex->gtArrElemType;
unsigned offset;
offset = compiler->eeGetMDArrayLowerBoundOffset(rank, dim);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R_R(INS_sub, EA_4BYTE, tgtReg, indexReg, tmpReg);
offset = compiler->eeGetMDArrayLengthOffset(rank, dim);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
//
// Arguments:
// arrOffset - the node for which we're generating code
//
// Return Value:
// None.
//
// Notes:
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTree* offsetNode = arrOffset->gtOffset;
GenTree* indexNode = arrOffset->gtIndex;
regNumber tgtReg = arrOffset->GetRegNum();
noway_assert(tgtReg != REG_NA);
if (!offsetNode->IsIntegralConst(0))
{
emitter* emit = GetEmitter();
regNumber offsetReg = genConsumeReg(offsetNode);
regNumber indexReg = genConsumeReg(indexNode);
regNumber arrReg = genConsumeReg(arrOffset->gtArrObj);
noway_assert(offsetReg != REG_NA);
noway_assert(indexReg != REG_NA);
noway_assert(arrReg != REG_NA);
regNumber tmpReg = arrOffset->GetSingleTempReg();
unsigned dim = arrOffset->gtCurrDim;
unsigned rank = arrOffset->gtArrRank;
var_types elemType = arrOffset->gtArrElemType;
unsigned offset = compiler->eeGetMDArrayLengthOffset(rank, dim);
// Load tmpReg with the dimension size and evaluate
// tgtReg = offsetReg*tmpReg + indexReg.
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R_R_R(INS_MULADD, EA_PTRSIZE, tgtReg, tmpReg, offsetReg, indexReg);
}
else
{
regNumber indexReg = genConsumeReg(indexNode);
inst_Mov(TYP_INT, tgtReg, indexReg, /* canSkip */ true);
}
genProduceReg(arrOffset);
}
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
//
void CodeGen::genCodeForShift(GenTree* tree)
{
var_types targetType = tree->TypeGet();
genTreeOps oper = tree->OperGet();
instruction ins = genGetInsForOper(oper, targetType);
emitAttr size = emitActualTypeSize(tree);
regNumber dstReg = tree->GetRegNum();
assert(dstReg != REG_NA);
genConsumeOperands(tree->AsOp());
GenTree* operand = tree->gtGetOp1();
GenTree* shiftBy = tree->gtGetOp2();
if (!shiftBy->IsCnsIntOrI())
{
GetEmitter()->emitIns_R_R_R(ins, size, dstReg, operand->GetRegNum(), shiftBy->GetRegNum());
}
else
{
unsigned immWidth = emitter::getBitWidth(size); // For ARM64, immWidth will be set to 32 or 64
unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal & (immWidth - 1);
GetEmitter()->emitIns_R_R_I(ins, size, dstReg, operand->GetRegNum(), shiftByImm);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
//
// Arguments:
// tree - the node.
//
void CodeGen::genCodeForLclAddr(GenTree* tree)
{
assert(tree->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
// Address of a local var.
noway_assert((targetType == TYP_BYREF) || (targetType == TYP_I_IMPL));
emitAttr size = emitTypeSize(targetType);
inst_RV_TT(INS_lea, targetReg, tree, 0, size);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclFld: Produce code for a GT_LCL_FLD node.
//
// Arguments:
// tree - the GT_LCL_FLD node
//
void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_LCL_FLD));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
NYI_IF(targetType == TYP_STRUCT, "GT_LCL_FLD: struct load local field not supported");
assert(targetReg != REG_NA);
unsigned offs = tree->GetLclOffs();
unsigned varNum = tree->GetLclNum();
assert(varNum < compiler->lvaCount);
#ifdef TARGET_ARM
if (tree->IsOffsetMisaligned())
{
// Arm supports unaligned access only for integer types,
// load the floating data as 1 or 2 integer registers and convert them to float.
regNumber addr = tree->ExtractTempReg();
emit->emitIns_R_S(INS_lea, EA_PTRSIZE, addr, varNum, offs);
if (targetType == TYP_FLOAT)
{
regNumber floatAsInt = tree->GetSingleTempReg();
emit->emitIns_R_R(INS_ldr, EA_4BYTE, floatAsInt, addr);
emit->emitIns_Mov(INS_vmov_i2f, EA_4BYTE, targetReg, floatAsInt, /* canSkip */ false);
}
else
{
regNumber halfdoubleAsInt1 = tree->ExtractTempReg();
regNumber halfdoubleAsInt2 = tree->GetSingleTempReg();
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, halfdoubleAsInt1, addr, 0);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, halfdoubleAsInt2, addr, 4);
emit->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, halfdoubleAsInt1, halfdoubleAsInt2);
}
}
else
#endif // TARGET_ARM
{
emitAttr attr = emitActualTypeSize(targetType);
instruction ins = ins_Load(targetType);
emit->emitIns_R_S(ins, attr, targetReg, varNum, offs);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node.
//
// Arguments:
// tree - the GT_INDEX_ADDR node
//
void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
{
GenTree* const base = node->Arr();
GenTree* const index = node->Index();
genConsumeReg(base);
genConsumeReg(index);
// NOTE: `genConsumeReg` marks the consumed register as not a GC pointer, as it assumes that the input registers
// die at the first instruction generated by the node. This is not the case for `INDEX_ADDR`, however, as the
// base register is multiply-used. As such, we need to mark the base register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(base->GetRegNum(), base->TypeGet());
assert(!varTypeIsGC(index->TypeGet()));
// The index is never contained, even if it is a constant.
assert(index->isUsedFromReg());
const regNumber tmpReg = node->GetSingleTempReg();
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, base->GetRegNum(), node->gtLenOffset);
GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(index->TypeGet()), index->GetRegNum(), tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
// Can we use a ScaledAdd instruction?
//
if (isPow2(node->gtElemSize) && (node->gtElemSize <= 32768))
{
DWORD scale;
BitScanForward(&scale, node->gtElemSize);
// dest = base + index * scale
genScaledAdd(emitActualTypeSize(node), node->GetRegNum(), base->GetRegNum(), index->GetRegNum(), scale);
}
else // we have to load the element size and use a MADD (multiply-add) instruction
{
// tmpReg = element size
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg, (ssize_t)node->gtElemSize);
// dest = index * tmpReg + base
GetEmitter()->emitIns_R_R_R_R(INS_MULADD, emitActualTypeSize(node), node->GetRegNum(), index->GetRegNum(),
tmpReg, base->GetRegNum());
}
// dest = dest + elemOffs
GetEmitter()->emitIns_R_R_I(INS_add, emitActualTypeSize(node), node->GetRegNum(), node->GetRegNum(),
node->gtElemOffset);
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForIndir: Produce code for a GT_IND node.
//
// Arguments:
// tree - the GT_IND node
//
void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
#ifdef FEATURE_SIMD
// Handling of Vector3 type values loaded through indirection.
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
var_types type = tree->TypeGet();
instruction ins = ins_Load(type);
regNumber targetReg = tree->GetRegNum();
genConsumeAddress(tree->Addr());
bool emitBarrier = false;
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
#ifdef TARGET_ARM64
bool addrIsInReg = tree->Addr()->isUsedFromReg();
bool addrIsAligned = ((tree->gtFlags & GTF_IND_UNALIGNED) == 0);
if ((ins == INS_ldrb) && addrIsInReg)
{
ins = INS_ldarb;
}
else if ((ins == INS_ldrh) && addrIsInReg && addrIsAligned)
{
ins = INS_ldarh;
}
else if ((ins == INS_ldr) && addrIsInReg && addrIsAligned && genIsValidIntReg(targetReg))
{
ins = INS_ldar;
}
else
#endif // TARGET_ARM64
{
emitBarrier = true;
}
}
GetEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), targetReg, tree);
if (emitBarrier)
{
// when INS_ldar* could not be used for a volatile load,
// we use an ordinary load followed by a load barrier.
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
genProduceReg(tree);
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile CpBlk operation
instGen_MemoryBarrier();
}
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a load barrier after a volatile CpBlk operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
}
#ifdef TARGET_ARM64
// The following classes
// - InitBlockUnrollHelper
// - CopyBlockUnrollHelper
// encapsulate algorithms that produce instruction sequences for inlined equivalents of memset() and memcpy() functions.
//
// Each class has a private template function that accepts an "InstructionStream" as a template class argument:
// - InitBlockUnrollHelper::UnrollInitBlock<InstructionStream>(startDstOffset, byteCount, initValue)
// - CopyBlockUnrollHelper::UnrollCopyBlock<InstructionStream>(startSrcOffset, startDstOffset, byteCount)
//
// The design goal is to separate optimization approaches implemented by the algorithms
// from the target platform specific details.
//
// InstructionStream is a "stream" of load/store instructions (i.e. ldr/ldp/str/stp) that represents an instruction
// sequence that will initialize a memory region with some value or copy values from one memory region to another.
//
// As far as UnrollInitBlock and UnrollCopyBlock concerned, InstructionStream implements the following class member
// functions:
// - LoadPairRegs(offset, regSizeBytes)
// - StorePairRegs(offset, regSizeBytes)
// - LoadReg(offset, regSizeBytes)
// - StoreReg(offset, regSizeBytes)
//
// There are three implementations of InstructionStream:
// - CountingStream that counts how many instructions were pushed out of the stream
// - VerifyingStream that validates that all the instructions in the stream are encodable on Arm64
// - ProducingStream that maps the function to corresponding emitter functions
//
// The idea behind the design is that decision regarding what instruction sequence to emit
// (scalar instructions vs. SIMD instructions) is made by execution an algorithm producing an instruction sequence
// while counting the number of produced instructions and verifying that all the instructions are encodable.
//
// For example, using SIMD instructions might produce a shorter sequence but require "spilling" a value of a starting
// address
// to an integer register (due to stricter offset alignment rules for 16-byte wide SIMD instructions).
// This the CodeGen can take this fact into account before emitting an instruction sequence.
//
// Alternative design might have had VerifyingStream and ProducingStream fused into one class
// that would allow to undo an instruction if the sequence is not fully encodable.
class CountingStream
{
public:
CountingStream()
{
instrCount = 0;
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instrCount++;
}
unsigned InstructionCount() const
{
return instrCount;
}
private:
unsigned instrCount;
};
class VerifyingStream
{
public:
VerifyingStream()
{
canEncodeAllLoads = true;
canEncodeAllStores = true;
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
canEncodeAllLoads = canEncodeAllLoads && emitter::canEncodeLoadOrStorePairOffset(offset, EA_SIZE(regSizeBytes));
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
canEncodeAllStores =
canEncodeAllStores && emitter::canEncodeLoadOrStorePairOffset(offset, EA_SIZE(regSizeBytes));
}
void LoadReg(int offset, unsigned regSizeBytes)
{
canEncodeAllLoads =
canEncodeAllLoads && emitter::emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(regSizeBytes));
}
void StoreReg(int offset, unsigned regSizeBytes)
{
canEncodeAllStores =
canEncodeAllStores && emitter::emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(regSizeBytes));
}
bool CanEncodeAllLoads() const
{
return canEncodeAllLoads;
}
bool CanEncodeAllStores() const
{
return canEncodeAllStores;
}
private:
bool canEncodeAllLoads;
bool canEncodeAllStores;
};
class ProducingStreamBaseInstrs
{
public:
ProducingStreamBaseInstrs(regNumber intReg1, regNumber intReg2, regNumber addrReg, emitter* emitter)
: intReg1(intReg1), intReg2(intReg2), addrReg(addrReg), emitter(emitter)
{
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
assert(regSizeBytes == 8);
emitter->emitIns_R_R_R_I(INS_ldp, EA_SIZE(regSizeBytes), intReg1, intReg2, addrReg, offset);
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
assert(regSizeBytes == 8);
emitter->emitIns_R_R_R_I(INS_stp, EA_SIZE(regSizeBytes), intReg1, intReg2, addrReg, offset);
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_ldr;
if (regSizeBytes == 1)
{
ins = INS_ldrb;
}
else if (regSizeBytes == 2)
{
ins = INS_ldrh;
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), intReg1, addrReg, offset);
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_str;
if (regSizeBytes == 1)
{
ins = INS_strb;
}
else if (regSizeBytes == 2)
{
ins = INS_strh;
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), intReg1, addrReg, offset);
}
private:
const regNumber intReg1;
const regNumber intReg2;
const regNumber addrReg;
emitter* const emitter;
};
class ProducingStream
{
public:
ProducingStream(regNumber intReg1, regNumber simdReg1, regNumber simdReg2, regNumber addrReg, emitter* emitter)
: intReg1(intReg1), simdReg1(simdReg1), simdReg2(simdReg2), addrReg(addrReg), emitter(emitter)
{
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
assert((regSizeBytes == 8) || (regSizeBytes == 16));
emitter->emitIns_R_R_R_I(INS_ldp, EA_SIZE(regSizeBytes), simdReg1, simdReg2, addrReg, offset);
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
assert((regSizeBytes == 8) || (regSizeBytes == 16));
emitter->emitIns_R_R_R_I(INS_stp, EA_SIZE(regSizeBytes), simdReg1, simdReg2, addrReg, offset);
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_ldr;
// Note that 'intReg1' can be unavailable.
// If that is the case, then use SIMD instruction ldr and
// 'simdReg1' as a temporary register.
regNumber tempReg;
if ((regSizeBytes == 16) || (intReg1 == REG_NA))
{
tempReg = simdReg1;
}
else
{
tempReg = intReg1;
if (regSizeBytes == 1)
{
ins = INS_ldrb;
}
else if (regSizeBytes == 2)
{
ins = INS_ldrh;
}
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), tempReg, addrReg, offset);
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_str;
// Note that 'intReg1' can be unavailable.
// If that is the case, then use SIMD instruction ldr and
// 'simdReg1' as a temporary register.
regNumber tempReg;
if ((regSizeBytes == 16) || (intReg1 == REG_NA))
{
tempReg = simdReg1;
}
else
{
tempReg = intReg1;
if (regSizeBytes == 1)
{
ins = INS_strb;
}
else if (regSizeBytes == 2)
{
ins = INS_strh;
}
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), tempReg, addrReg, offset);
}
private:
const regNumber intReg1;
const regNumber simdReg1;
const regNumber simdReg2;
const regNumber addrReg;
emitter* const emitter;
};
class BlockUnrollHelper
{
public:
// The following function returns a 'size' bytes that
// 1) is greater or equal to 'byteCount' and
// 2) can be read or written by a single instruction on Arm64.
// For example, Arm64 ISA has ldrb/strb and ldrh/strh that
// load/store 1 or 2 bytes, correspondingly.
// However, there are no instructions that can load/store 3 bytes and
// the next "smallest" instruction is ldr/str that operates on 4 byte granularity.
static unsigned GetRegSizeAtLeastBytes(unsigned byteCount)
{
assert(byteCount != 0);
assert(byteCount < 16);
unsigned regSizeBytes = byteCount;
if (byteCount > 8)
{
regSizeBytes = 16;
}
else if (byteCount > 4)
{
regSizeBytes = 8;
}
else if (byteCount > 2)
{
regSizeBytes = 4;
}
return regSizeBytes;
}
};
class InitBlockUnrollHelper
{
public:
InitBlockUnrollHelper(int dstOffset, unsigned byteCount) : dstStartOffset(dstOffset), byteCount(byteCount)
{
}
int GetDstOffset() const
{
return dstStartOffset;
}
void SetDstOffset(int dstOffset)
{
dstStartOffset = dstOffset;
}
bool CanEncodeAllOffsets(int regSizeBytes) const
{
VerifyingStream instrStream;
UnrollInitBlock(instrStream, regSizeBytes);
return instrStream.CanEncodeAllStores();
}
unsigned InstructionCount(int regSizeBytes) const
{
CountingStream instrStream;
UnrollInitBlock(instrStream, regSizeBytes);
return instrStream.InstructionCount();
}
void Unroll(regNumber intReg, regNumber simdReg, regNumber addrReg, emitter* emitter) const
{
ProducingStream instrStream(intReg, simdReg, simdReg, addrReg, emitter);
UnrollInitBlock(instrStream, FP_REGSIZE_BYTES);
}
void UnrollBaseInstrs(regNumber intReg, regNumber addrReg, emitter* emitter) const
{
ProducingStreamBaseInstrs instrStream(intReg, intReg, addrReg, emitter);
UnrollInitBlock(instrStream, REGSIZE_BYTES);
}
private:
template <class InstructionStream>
void UnrollInitBlock(InstructionStream& instrStream, int initialRegSizeBytes) const
{
assert((initialRegSizeBytes == 8) || (initialRegSizeBytes == 16));
int offset = dstStartOffset;
const int endOffset = offset + byteCount;
const int storePairRegsAlignment = initialRegSizeBytes;
const int storePairRegsWritesBytes = 2 * initialRegSizeBytes;
const int offsetAligned = AlignUp((UINT)offset, storePairRegsAlignment);
const int storePairRegsInstrCount = (endOffset - offsetAligned) / storePairRegsWritesBytes;
if (storePairRegsInstrCount > 0)
{
if (offset != offsetAligned)
{
const int firstRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(offsetAligned - offset);
instrStream.StoreReg(offset, firstRegSizeBytes);
offset = offsetAligned;
}
while (endOffset - offset >= storePairRegsWritesBytes)
{
instrStream.StorePairRegs(offset, initialRegSizeBytes);
offset += storePairRegsWritesBytes;
}
if (endOffset - offset >= initialRegSizeBytes)
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
}
if (offset != endOffset)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endOffset - offset);
instrStream.StoreReg(endOffset - lastRegSizeBytes, lastRegSizeBytes);
}
}
else
{
bool isSafeToWriteBehind = false;
while (endOffset - offset >= initialRegSizeBytes)
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
assert(endOffset - offset < initialRegSizeBytes);
while (offset != endOffset)
{
if (isSafeToWriteBehind)
{
assert(endOffset - offset < initialRegSizeBytes);
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endOffset - offset);
instrStream.StoreReg(endOffset - lastRegSizeBytes, lastRegSizeBytes);
break;
}
if (offset + initialRegSizeBytes > endOffset)
{
initialRegSizeBytes = initialRegSizeBytes / 2;
}
else
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
}
}
}
int dstStartOffset;
const unsigned byteCount;
};
class CopyBlockUnrollHelper
{
public:
CopyBlockUnrollHelper(int srcOffset, int dstOffset, unsigned byteCount)
: srcStartOffset(srcOffset), dstStartOffset(dstOffset), byteCount(byteCount)
{
}
int GetSrcOffset() const
{
return srcStartOffset;
}
int GetDstOffset() const
{
return dstStartOffset;
}
void SetSrcOffset(int srcOffset)
{
srcStartOffset = srcOffset;
}
void SetDstOffset(int dstOffset)
{
dstStartOffset = dstOffset;
}
unsigned InstructionCount(int regSizeBytes) const
{
CountingStream instrStream;
UnrollCopyBlock(instrStream, instrStream, regSizeBytes);
return instrStream.InstructionCount();
}
bool CanEncodeAllOffsets(int regSizeBytes) const
{
bool canEncodeAllLoads = true;
bool canEncodeAllStores = true;
TryEncodeAllOffsets(regSizeBytes, &canEncodeAllLoads, &canEncodeAllStores);
return canEncodeAllLoads && canEncodeAllStores;
}
void TryEncodeAllOffsets(int regSizeBytes, bool* pCanEncodeAllLoads, bool* pCanEncodeAllStores) const
{
assert(pCanEncodeAllLoads != nullptr);
assert(pCanEncodeAllStores != nullptr);
VerifyingStream instrStream;
UnrollCopyBlock(instrStream, instrStream, regSizeBytes);
*pCanEncodeAllLoads = instrStream.CanEncodeAllLoads();
*pCanEncodeAllStores = instrStream.CanEncodeAllStores();
}
void Unroll(unsigned initialRegSizeBytes,
regNumber intReg,
regNumber simdReg1,
regNumber simdReg2,
regNumber srcAddrReg,
regNumber dstAddrReg,
emitter* emitter) const
{
ProducingStream loadStream(intReg, simdReg1, simdReg2, srcAddrReg, emitter);
ProducingStream storeStream(intReg, simdReg1, simdReg2, dstAddrReg, emitter);
UnrollCopyBlock(loadStream, storeStream, initialRegSizeBytes);
}
void UnrollBaseInstrs(
regNumber intReg1, regNumber intReg2, regNumber srcAddrReg, regNumber dstAddrReg, emitter* emitter) const
{
ProducingStreamBaseInstrs loadStream(intReg1, intReg2, srcAddrReg, emitter);
ProducingStreamBaseInstrs storeStream(intReg1, intReg2, dstAddrReg, emitter);
UnrollCopyBlock(loadStream, storeStream, REGSIZE_BYTES);
}
private:
template <class InstructionStream>
void UnrollCopyBlock(InstructionStream& loadStream, InstructionStream& storeStream, int initialRegSizeBytes) const
{
assert((initialRegSizeBytes == 8) || (initialRegSizeBytes == 16));
int srcOffset = srcStartOffset;
int dstOffset = dstStartOffset;
const int endSrcOffset = srcOffset + byteCount;
const int endDstOffset = dstOffset + byteCount;
const int storePairRegsAlignment = initialRegSizeBytes;
const int storePairRegsWritesBytes = 2 * initialRegSizeBytes;
const int dstOffsetAligned = AlignUp((UINT)dstOffset, storePairRegsAlignment);
if (byteCount >= (unsigned)storePairRegsWritesBytes)
{
const int dstBytesToAlign = dstOffsetAligned - dstOffset;
if (dstBytesToAlign != 0)
{
const int firstRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(dstBytesToAlign);
loadStream.LoadReg(srcOffset, firstRegSizeBytes);
storeStream.StoreReg(dstOffset, firstRegSizeBytes);
srcOffset = srcOffset + dstBytesToAlign;
dstOffset = dstOffsetAligned;
}
while (endDstOffset - dstOffset >= storePairRegsWritesBytes)
{
loadStream.LoadPairRegs(srcOffset, initialRegSizeBytes);
storeStream.StorePairRegs(dstOffset, initialRegSizeBytes);
srcOffset += storePairRegsWritesBytes;
dstOffset += storePairRegsWritesBytes;
}
if (endDstOffset - dstOffset >= initialRegSizeBytes)
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
}
if (dstOffset != endDstOffset)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endDstOffset - dstOffset);
loadStream.LoadReg(endSrcOffset - lastRegSizeBytes, lastRegSizeBytes);
storeStream.StoreReg(endDstOffset - lastRegSizeBytes, lastRegSizeBytes);
}
}
else
{
bool isSafeToWriteBehind = false;
while (endDstOffset - dstOffset >= initialRegSizeBytes)
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
assert(endSrcOffset - srcOffset < initialRegSizeBytes);
while (dstOffset != endDstOffset)
{
if (isSafeToWriteBehind)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endDstOffset - dstOffset);
loadStream.LoadReg(endSrcOffset - lastRegSizeBytes, lastRegSizeBytes);
storeStream.StoreReg(endDstOffset - lastRegSizeBytes, lastRegSizeBytes);
break;
}
if (dstOffset + initialRegSizeBytes > endDstOffset)
{
initialRegSizeBytes = initialRegSizeBytes / 2;
}
else
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
}
}
}
int srcStartOffset;
int dstStartOffset;
const unsigned byteCount;
};
#endif // TARGET_ARM64
//----------------------------------------------------------------------------------
// genCodeForInitBlkUnroll: Generate unrolled block initialization code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
assert(!dstAddr->AsAddrMode()->HasIndex());
dstAddrBaseReg = genConsumeReg(dstAddr->AsAddrMode()->Base());
dstOffset = dstAddr->AsAddrMode()->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
GenTree* src = node->Data();
if (src->OperIs(GT_INIT_VAL))
{
assert(src->isContained());
src = src->gtGetOp1();
}
if (node->IsVolatile())
{
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(dstOffset < INT32_MAX - static_cast<int>(size));
#ifdef TARGET_ARM64
InitBlockUnrollHelper helper(dstOffset, size);
regNumber srcReg;
if (!src->isContained())
{
srcReg = genConsumeReg(src);
}
else
{
assert(src->IsIntegralConst(0));
srcReg = REG_ZR;
}
regNumber dstReg = dstAddrBaseReg;
int dstRegAddrAlignment = 0;
bool isDstRegAddrAlignmentKnown = false;
if (dstLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(dstLclNum, &fpBased);
dstReg = fpBased ? REG_FPBASE : REG_SPBASE;
dstRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isDstRegAddrAlignmentKnown = true;
helper.SetDstOffset(baseAddr + dstOffset);
}
if (!helper.CanEncodeAllOffsets(REGSIZE_BYTES))
{
// If dstRegAddrAlignment is known and non-zero the following ensures that the adjusted value of dstReg is at
// 16-byte aligned boundary.
// This is done to potentially allow more cases where the JIT can use 16-byte stores.
const int dstOffsetAdjustment = helper.GetDstOffset() - dstRegAddrAlignment;
dstRegAddrAlignment = 0;
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, dstReg, dstOffsetAdjustment, tempReg);
dstReg = tempReg;
helper.SetDstOffset(helper.GetDstOffset() - dstOffsetAdjustment);
}
bool shouldUse16ByteWideInstrs = false;
// Store operations that cross a 16-byte boundary reduce bandwidth or incur additional latency.
// The following condition prevents using 16-byte stores when dstRegAddrAlignment is:
// 1) unknown (i.e. dstReg is neither FP nor SP) or
// 2) non-zero (i.e. dstRegAddr is not 16-byte aligned).
const bool hasAvailableSimdReg = isDstRegAddrAlignmentKnown && (size > FP_REGSIZE_BYTES);
const bool canUse16ByteWideInstrs =
hasAvailableSimdReg && (dstRegAddrAlignment == 0) && helper.CanEncodeAllOffsets(FP_REGSIZE_BYTES);
if (canUse16ByteWideInstrs)
{
// The JIT would need to initialize a SIMD register with "movi simdReg.16B, #initValue".
const unsigned instrCount16ByteWide = helper.InstructionCount(FP_REGSIZE_BYTES) + 1;
shouldUse16ByteWideInstrs = instrCount16ByteWide < helper.InstructionCount(REGSIZE_BYTES);
}
if (shouldUse16ByteWideInstrs)
{
const regNumber simdReg = node->GetSingleTempReg(RBM_ALLFLOAT);
const int initValue = (src->AsIntCon()->IconValue() & 0xFF);
emit->emitIns_R_I(INS_movi, EA_16BYTE, simdReg, initValue, INS_OPTS_16B);
helper.Unroll(srcReg, simdReg, dstReg, GetEmitter());
}
else
{
helper.UnrollBaseInstrs(srcReg, dstReg, GetEmitter());
}
#endif // TARGET_ARM64
#ifdef TARGET_ARM
const regNumber srcReg = genConsumeReg(src);
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
instruction storeIns;
emitAttr attr;
switch (regSize)
{
case 1:
storeIns = INS_strb;
attr = EA_4BYTE;
break;
case 2:
storeIns = INS_strh;
attr = EA_4BYTE;
break;
case 4:
storeIns = INS_str;
attr = EA_ATTR(regSize);
break;
default:
unreached();
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(storeIns, attr, srcReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_R_R_I(storeIns, attr, srcReg, dstAddrBaseReg, dstOffset);
}
}
#endif // TARGET_ARM
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkUnroll: Generate unrolled block copy code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
assert(!dstAddr->AsAddrMode()->HasIndex());
dstAddrBaseReg = genConsumeReg(dstAddr->AsAddrMode()->Base());
dstOffset = dstAddr->AsAddrMode()->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
unsigned srcLclNum = BAD_VAR_NUM;
regNumber srcAddrBaseReg = REG_NA;
int srcOffset = 0;
GenTree* src = node->Data();
assert(src->isContained());
if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
srcLclNum = src->AsLclVarCommon()->GetLclNum();
srcOffset = src->AsLclVarCommon()->GetLclOffs();
}
else
{
assert(src->OperIs(GT_IND));
GenTree* srcAddr = src->AsIndir()->Addr();
if (!srcAddr->isContained())
{
srcAddrBaseReg = genConsumeReg(srcAddr);
}
else if (srcAddr->OperIsAddrMode())
{
srcAddrBaseReg = genConsumeReg(srcAddr->AsAddrMode()->Base());
srcOffset = srcAddr->AsAddrMode()->Offset();
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
}
if (node->IsVolatile())
{
// issue a full memory barrier before a volatile CpBlk operation
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(srcOffset < INT32_MAX - static_cast<int>(size));
assert(dstOffset < INT32_MAX - static_cast<int>(size));
#ifdef TARGET_ARM64
CopyBlockUnrollHelper helper(srcOffset, dstOffset, size);
regNumber srcReg = srcAddrBaseReg;
int srcRegAddrAlignment = 0;
bool isSrcRegAddrAlignmentKnown = false;
if (srcLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(srcLclNum, &fpBased);
srcReg = fpBased ? REG_FPBASE : REG_SPBASE;
srcRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isSrcRegAddrAlignmentKnown = true;
helper.SetSrcOffset(baseAddr + srcOffset);
}
regNumber dstReg = dstAddrBaseReg;
int dstRegAddrAlignment = 0;
bool isDstRegAddrAlignmentKnown = false;
if (dstLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(dstLclNum, &fpBased);
dstReg = fpBased ? REG_FPBASE : REG_SPBASE;
dstRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isDstRegAddrAlignmentKnown = true;
helper.SetDstOffset(baseAddr + dstOffset);
}
bool canEncodeAllLoads = true;
bool canEncodeAllStores = true;
helper.TryEncodeAllOffsets(REGSIZE_BYTES, &canEncodeAllLoads, &canEncodeAllStores);
srcOffset = helper.GetSrcOffset();
dstOffset = helper.GetDstOffset();
int srcOffsetAdjustment = 0;
int dstOffsetAdjustment = 0;
if (!canEncodeAllLoads && !canEncodeAllStores)
{
srcOffsetAdjustment = srcOffset;
dstOffsetAdjustment = dstOffset;
}
else if (!canEncodeAllLoads)
{
srcOffsetAdjustment = srcOffset - dstOffset;
}
else if (!canEncodeAllStores)
{
dstOffsetAdjustment = dstOffset - srcOffset;
}
helper.SetSrcOffset(srcOffset - srcOffsetAdjustment);
helper.SetDstOffset(dstOffset - dstOffsetAdjustment);
// Quad-word load operations that are not 16-byte aligned, and store operations that cross a 16-byte boundary
// can reduce bandwidth or incur additional latency.
// Therefore, the JIT would attempt to use 16-byte variants of such instructions when both conditions are met:
// 1) the base address stored in dstReg has known alignment (modulo 16 bytes) and
// 2) the base address stored in srcReg has the same alignment as the address in dstReg.
//
// When both addresses are 16-byte aligned the CopyBlock instruction sequence looks like
//
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #dstOffset+32]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+32]
// ...
//
// When both addresses are not 16-byte aligned the CopyBlock instruction sequence starts with padding
// str instruction. For example, when both addresses are 8-byte aligned the instruction sequence looks like
//
// ldr X_intReg1, [srcReg, #srcOffset]
// str X_intReg1, [dstReg, #dstOffset]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset+8]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+8]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset+40]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+40]
// ...
// LSRA allocates a pair of SIMD registers when alignments of both source and destination base addresses are
// known and the block size is larger than a single SIMD register size (i.e. when using SIMD instructions can
// be profitable).
const bool canUse16ByteWideInstrs = isSrcRegAddrAlignmentKnown && isDstRegAddrAlignmentKnown &&
(size >= 2 * FP_REGSIZE_BYTES) && (srcRegAddrAlignment == dstRegAddrAlignment);
bool shouldUse16ByteWideInstrs = false;
if (canUse16ByteWideInstrs)
{
bool canEncodeAll16ByteWideLoads = false;
bool canEncodeAll16ByteWideStores = false;
helper.TryEncodeAllOffsets(FP_REGSIZE_BYTES, &canEncodeAll16ByteWideLoads, &canEncodeAll16ByteWideStores);
if (canEncodeAll16ByteWideLoads && canEncodeAll16ByteWideStores)
{
// No further adjustments for srcOffset and dstOffset are needed.
// The JIT should use 16-byte loads and stores when the resulting sequence has fewer number of instructions.
shouldUse16ByteWideInstrs =
(helper.InstructionCount(FP_REGSIZE_BYTES) < helper.InstructionCount(REGSIZE_BYTES));
}
else if (canEncodeAllLoads && canEncodeAllStores &&
(canEncodeAll16ByteWideLoads || canEncodeAll16ByteWideStores))
{
// In order to use 16-byte instructions the JIT needs to adjust either srcOffset or dstOffset.
// The JIT should use 16-byte loads and stores when the resulting sequence (incl. an additional add
// instruction) has fewer number of instructions.
if (helper.InstructionCount(FP_REGSIZE_BYTES) + 1 < helper.InstructionCount(REGSIZE_BYTES))
{
shouldUse16ByteWideInstrs = true;
if (!canEncodeAll16ByteWideLoads)
{
srcOffsetAdjustment = srcOffset - dstOffset;
}
else
{
dstOffsetAdjustment = dstOffset - srcOffset;
}
helper.SetSrcOffset(srcOffset - srcOffsetAdjustment);
helper.SetDstOffset(dstOffset - dstOffsetAdjustment);
}
}
}
#ifdef DEBUG
if (shouldUse16ByteWideInstrs)
{
assert(helper.CanEncodeAllOffsets(FP_REGSIZE_BYTES));
}
else
{
assert(helper.CanEncodeAllOffsets(REGSIZE_BYTES));
}
#endif
if ((srcOffsetAdjustment != 0) && (dstOffsetAdjustment != 0))
{
const regNumber tempReg1 = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg1, srcReg, srcOffsetAdjustment, tempReg1);
srcReg = tempReg1;
const regNumber tempReg2 = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg2, dstReg, dstOffsetAdjustment, tempReg2);
dstReg = tempReg2;
}
else if (srcOffsetAdjustment != 0)
{
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, srcReg, srcOffsetAdjustment, tempReg);
srcReg = tempReg;
}
else if (dstOffsetAdjustment != 0)
{
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, dstReg, dstOffsetAdjustment, tempReg);
dstReg = tempReg;
}
regNumber intReg1 = REG_NA;
regNumber intReg2 = REG_NA;
const unsigned intRegCount = node->AvailableTempRegCount(RBM_ALLINT);
if (intRegCount >= 2)
{
intReg1 = node->ExtractTempReg(RBM_ALLINT);
intReg2 = node->ExtractTempReg(RBM_ALLINT);
}
else if (intRegCount == 1)
{
intReg1 = node->GetSingleTempReg(RBM_ALLINT);
intReg2 = rsGetRsvdReg();
}
else
{
intReg1 = rsGetRsvdReg();
}
if (shouldUse16ByteWideInstrs)
{
const regNumber simdReg1 = node->ExtractTempReg(RBM_ALLFLOAT);
const regNumber simdReg2 = node->GetSingleTempReg(RBM_ALLFLOAT);
helper.Unroll(FP_REGSIZE_BYTES, intReg1, simdReg1, simdReg2, srcReg, dstReg, GetEmitter());
}
else
{
helper.UnrollBaseInstrs(intReg1, intReg2, srcReg, dstReg, GetEmitter());
}
#endif // TARGET_ARM64
#ifdef TARGET_ARM
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
instruction loadIns;
instruction storeIns;
emitAttr attr;
switch (regSize)
{
case 1:
loadIns = INS_ldrb;
storeIns = INS_strb;
attr = EA_4BYTE;
break;
case 2:
loadIns = INS_ldrh;
storeIns = INS_strh;
attr = EA_4BYTE;
break;
case 4:
loadIns = INS_ldr;
storeIns = INS_str;
attr = EA_ATTR(regSize);
break;
default:
unreached();
}
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(loadIns, attr, tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_R_I(loadIns, attr, tempReg, srcAddrBaseReg, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(storeIns, attr, tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_R_R_I(storeIns, attr, tempReg, dstAddrBaseReg, dstOffset);
}
}
#endif // TARGET_ARM
if (node->IsVolatile())
{
// issue a load barrier after a volatile CpBlk operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
}
//------------------------------------------------------------------------
// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call
//
// Arguments:
// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode)
{
// Size goes in arg2, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
if (initBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile initBlock Operation
instGen_MemoryBarrier();
}
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
//------------------------------------------------------------------------
// genCall: Produce code for a GT_CALL node
//
void CodeGen::genCall(GenTreeCall* call)
{
// Consume all the arg regs
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* argNode = use.GetNode();
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
assert(curArgTabEntry);
// GT_RELOAD/GT_COPY use the child node
argNode = argNode->gtSkipReloadOrCopy();
if (curArgTabEntry->GetRegNum() == REG_STK)
continue;
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
regNumber argReg = curArgTabEntry->GetRegNum();
for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses())
{
GenTree* putArgRegNode = use.GetNode();
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
genConsumeReg(putArgRegNode);
inst_Mov_Extend(putArgRegNode->TypeGet(), /* srcInReg */ true, argReg, putArgRegNode->GetRegNum(),
/* canSkip */ true, emitActualTypeSize(TYP_I_IMPL));
argReg = genRegArgNext(argReg);
#if defined(TARGET_ARM)
// A double register is modelled as an even-numbered single one
if (putArgRegNode->TypeGet() == TYP_DOUBLE)
{
argReg = genRegArgNext(argReg);
}
#endif // TARGET_ARM
}
}
else if (curArgTabEntry->IsSplit())
{
assert(compFeatureArgSplit());
assert(curArgTabEntry->numRegs >= 1);
genConsumeArgSplitStruct(argNode->AsPutArgSplit());
for (unsigned idx = 0; idx < curArgTabEntry->numRegs; idx++)
{
regNumber argReg = (regNumber)((unsigned)curArgTabEntry->GetRegNum() + idx);
regNumber allocReg = argNode->AsPutArgSplit()->GetRegNumByIdx(idx);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ true, argReg, allocReg, /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
}
else
{
regNumber argReg = curArgTabEntry->GetRegNum();
genConsumeReg(argNode);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ true, argReg, argNode->GetRegNum(), /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
}
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
#if defined(TARGET_ARM)
const regNumber tmpReg = call->ExtractTempReg();
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
#elif defined(TARGET_ARM64)
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
#endif // TARGET*
}
// If fast tail call, then we are done here, we just have to load the call
// target into the right registers. We ensure in RA that target is loaded
// into a volatile register that won't be restored by epilog sequence.
if (call->IsFastTailCall())
{
GenTree* target = getCallTarget(call, nullptr);
if (target != nullptr)
{
// Indirect fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
genConsumeReg(target);
}
#ifdef FEATURE_READYTORUN
else if (call->IsR2ROrVirtualStubRelativeIndir())
{
assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert(call->gtControlExpr == nullptr);
regNumber tmpReg = call->GetSingleTempReg();
// Register where we save call address in should not be overridden by epilog.
assert((tmpReg & (RBM_INT_CALLEE_TRASH & ~RBM_LR)) == tmpReg);
regNumber callAddrReg =
call->IsVirtualStubRelativeIndir() ? compiler->virtualStubParamInfo->GetReg() : REG_R2R_INDIRECT_PARAM;
GetEmitter()->emitIns_R_R(ins_Load(TYP_I_IMPL), emitActualTypeSize(TYP_I_IMPL), tmpReg, callAddrReg);
// We will use this again when emitting the jump in genCallInstruction in the epilog
call->gtRsvdRegs |= genRegMask(tmpReg);
}
#endif
return;
}
// For a pinvoke to unmanaged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
if (compiler->killGCRefs(call))
{
genDefineTempLabel(genCreateTempLabel());
}
genCallInstruction(call);
// for pinvoke/intrinsic/tailcalls we may have needed to get the address of
// a label. In case it is indirect with CFG enabled make sure we do not get
// the address after the validation but only after the actual call that
// comes after.
if (genPendingCallLabel && !call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
genDefineInlineTempLabel(genPendingCallLabel);
genPendingCallLabel = nullptr;
}
#ifdef DEBUG
// We should not have GC pointers in killed registers live around the call.
// GC info for arg registers were cleared when consuming arg nodes above
// and LSRA should ensure it for other trashed registers.
regMaskTP killMask = RBM_CALLEE_TRASH;
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
assert((gcInfo.gcRegGCrefSetCur & killMask) == 0);
assert((gcInfo.gcRegByrefSetCur & killMask) == 0);
#endif
var_types returnType = call->TypeGet();
if (returnType != TYP_VOID)
{
regNumber returnReg;
if (call->HasMultiRegRetVal())
{
const ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
assert(pRetTypeDesc != nullptr);
unsigned regCount = pRetTypeDesc->GetReturnRegCount();
// If regs allocated to call node are different from ABI return
// regs in which the call has returned its result, move the result
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
var_types regType = pRetTypeDesc->GetReturnRegType(i);
returnReg = pRetTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
inst_Mov(regType, allocatedReg, returnReg, /* canSkip */ true);
}
}
else
{
#ifdef TARGET_ARM
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
// The CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
// TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
returnReg = REG_PINVOKE_TCB;
}
else if (compiler->opts.compUseSoftFP)
{
returnReg = REG_INTRET;
}
else
#endif // TARGET_ARM
if (varTypeUsesFloatArgReg(returnType))
{
returnReg = REG_FLOATRET;
}
else
{
returnReg = REG_INTRET;
}
if (call->GetRegNum() != returnReg)
{
#ifdef TARGET_ARM
if (compiler->opts.compUseSoftFP && returnType == TYP_DOUBLE)
{
inst_RV_RV_RV(INS_vmov_i2d, call->GetRegNum(), returnReg, genRegArgNext(returnReg), EA_8BYTE);
}
else if (compiler->opts.compUseSoftFP && returnType == TYP_FLOAT)
{
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ false);
}
else
#endif
{
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ false);
}
}
}
genProduceReg(call);
}
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
}
//------------------------------------------------------------------------
// genCallInstruction - Generate instructions necessary to transfer control to the call.
//
// Arguments:
// call - the GT_CALL node
//
// Remaks:
// For tailcalls this function will generate a jump.
//
void CodeGen::genCallInstruction(GenTreeCall* call)
{
// Determine return value size(s).
const ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
emitAttr retSize = EA_PTRSIZE;
emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));
}
else
{
assert(call->gtType != TYP_STRUCT);
if (call->gtType == TYP_REF)
{
retSize = EA_GCREF;
}
else if (call->gtType == TYP_BYREF)
{
retSize = EA_BYREF;
}
}
DebugInfo di;
// We need to propagate the debug information to the call instruction, so we can emit
// an IL to native mapping record for the call, to support managed return value debugging.
// We don't want tail call helper calls that were converted from normal calls to get a record,
// so we skip this hash table lookup logic in that case.
if (compiler->opts.compDbgInfo && compiler->genCallSite2DebugInfoMap != nullptr && !call->IsTailCall())
{
(void)compiler->genCallSite2DebugInfoMap->Lookup(call, &di);
}
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
// native call sites with the signatures they were generated from.
if (call->gtCallType != CT_HELPER)
{
sigInfo = call->callSig;
}
if (call->IsFastTailCall())
{
regMaskTP trashedByEpilog = RBM_CALLEE_SAVED;
// The epilog may use and trash REG_GSCOOKIE_TMP_0/1. Make sure we have no
// non-standard args that may be trash if this is a tailcall.
if (compiler->getNeedsGSSecurityCookie())
{
trashedByEpilog |= genRegMask(REG_GSCOOKIE_TMP_0);
trashedByEpilog |= genRegMask(REG_GSCOOKIE_TMP_1);
}
for (unsigned i = 0; i < call->fgArgInfo->ArgCount(); i++)
{
fgArgTabEntry* entry = call->fgArgInfo->GetArgEntry(i);
for (unsigned j = 0; j < entry->numRegs; j++)
{
regNumber reg = entry->GetRegNum(j);
if ((trashedByEpilog & genRegMask(reg)) != 0)
{
JITDUMP("Tail call node:\n");
DISPTREE(call);
JITDUMP("Register used: %s\n", getRegName(reg));
assert(!"Argument to tailcall may be trashed by epilog");
}
}
}
}
#endif // DEBUG
CORINFO_METHOD_HANDLE methHnd;
GenTree* target = getCallTarget(call, &methHnd);
if (target != nullptr)
{
// A call target can not be a contained indirection
assert(!target->isContainedIndir());
// For fast tailcall we have already consumed the target. We ensure in
// RA that the target was allocated into a volatile register that will
// not be messed up by epilog sequence.
if (!call->IsFastTailCall())
{
genConsumeReg(target);
}
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
//
assert(genIsValidIntReg(target->GetRegNum()));
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr, // addr
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
target->GetRegNum(),
call->IsFastTailCall());
// clang-format on
}
else
{
// If we have no target and this is a call with indirection cell then
// we do an optimization where we load the call address directly from
// the indirection cell instead of duplicating the tree. In BuildCall
// we ensure that get an extra register for the purpose. Note that for
// CFG the call might have changed to
// CORINFO_HELP_DISPATCH_INDIRECT_CALL in which case we still have the
// indirection cell but we should not try to optimize.
regNumber callThroughIndirReg = REG_NA;
if (!call->IsHelperCall(compiler, CORINFO_HELP_DISPATCH_INDIRECT_CALL))
{
callThroughIndirReg = getCallIndirectionCellReg(call);
}
if (callThroughIndirReg != REG_NA)
{
assert(call->IsR2ROrVirtualStubRelativeIndir());
regNumber targetAddrReg = call->GetSingleTempReg();
// For fast tailcalls we have already loaded the call target when processing the call node.
if (!call->IsFastTailCall())
{
GetEmitter()->emitIns_R_R(ins_Load(TYP_I_IMPL), emitActualTypeSize(TYP_I_IMPL), targetAddrReg,
callThroughIndirReg);
}
else
{
// Register where we save call address in should not be overridden by epilog.
assert((targetAddrReg & (RBM_INT_CALLEE_TRASH & ~RBM_LR)) == targetAddrReg);
}
// We have now generated code loading the target address from the indirection cell into `targetAddrReg`.
// We just need to emit "bl targetAddrReg" in this case.
//
assert(genIsValidIntReg(targetAddrReg));
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr, // addr
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
targetAddrReg,
call->IsFastTailCall());
// clang-format on
}
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(call->gtCallType == CT_HELPER || call->gtCallType == CT_USER_FUNC);
void* addr = nullptr;
#ifdef FEATURE_READYTORUN
if (call->gtEntryPoint.addr != NULL)
{
assert(call->gtEntryPoint.accessType == IAT_VALUE);
addr = call->gtEntryPoint.addr;
}
else
#endif // FEATURE_READYTORUN
if (call->gtCallType == CT_HELPER)
{
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
void* pAddr = nullptr;
addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
assert(pAddr == nullptr);
}
else
{
// Direct call to a non-virtual user function.
addr = call->gtDirectCallAddress;
}
assert(addr != nullptr);
// Non-virtual direct call to known addresses
#ifdef TARGET_ARM
if (!validImmForBL((ssize_t)addr))
{
regNumber tmpReg = call->GetSingleTempReg();
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, tmpReg, (ssize_t)addr);
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
NULL,
retSize,
di,
tmpReg,
call->IsFastTailCall());
// clang-format on
}
else
#endif // TARGET_ARM
{
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
}
}
}
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
void CodeGen::genJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
LclVarDsc* varDsc;
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
// We are not strictly required to spill reg args that are not in the desired reg for a jmp call
// But that would require us to deal with circularity while moving values around. Spilling
// to stack makes the implementation simple, which is not a bad trade off given Jmp calls
// are not frequent.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg && (varDsc->GetRegNum() != REG_STK))
{
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
// If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->GetRegNum() == varDsc->GetArgReg()))
continue;
}
else if (varDsc->GetRegNum() == REG_STK)
{
// Skip args which are currently living in stack.
continue;
}
// If we came here it means either a reg argument not in the right register or
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->GetRegNum() != REG_STK);
assert(varDsc->IsEnregisterableLcl());
var_types storeType = varDsc->GetActualRegisterType();
emitAttr storeSize = emitActualTypeSize(storeType);
#ifdef TARGET_ARM
if (varDsc->TypeGet() == TYP_LONG)
{
// long - at least the low half must be enregistered
GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->GetRegNum(), varNum, 0);
// Is the upper half also enregistered?
if (varDsc->GetOtherReg() != REG_STK)
{
GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->GetOtherReg(), varNum, sizeof(int));
}
}
else
#endif // TARGET_ARM
{
GetEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->GetRegNum(), varNum, 0);
}
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be expecting it.
// Therefore manually update life of varDsc->GetRegNum().
regMaskTP tempMask = genRegMask(varDsc->GetRegNum());
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varNum);
}
}
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif
// Next move any un-enregistered register arguments back to their register.
regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
if (!varDsc->lvIsRegArg)
continue;
// Register argument
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
// Is register argument already in the right register?
// If not load it from its stack location.
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
regNumber argRegNext = REG_NA;
#ifdef TARGET_ARM64
if (varDsc->GetRegNum() != argReg)
{
var_types loadType = TYP_UNDEF;
if (varDsc->lvIsHfaRegArg())
{
// Note that for HFA, the argument is currently marked address exposed so lvRegNum will always be
// REG_STK. We home the incoming HFA argument registers in the prolog. Then we'll load them back
// here, whether they are already in the correct registers or not. This is such a corner case that
// it is not worth optimizing it.
assert(!compiler->info.compIsVarArgs);
loadType = varDsc->GetHfaType();
regNumber fieldReg = argReg;
emitAttr loadSize = emitActualTypeSize(loadType);
unsigned cSlots = varDsc->lvHfaSlots();
for (unsigned ofs = 0, cSlot = 0; cSlot < cSlots; cSlot++, ofs += (unsigned)loadSize)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
assert(genIsValidFloatReg(fieldReg)); // No GC register tracking for floating point registers.
fieldReg = regNextOfType(fieldReg, loadType);
}
}
else
{
if (varTypeIsStruct(varDsc))
{
// Must be <= 16 bytes or else it wouldn't be passed in registers, except for HFA,
// which can be bigger (and is handled above).
noway_assert(EA_SIZE_IN_BYTES(varDsc->lvSize()) <= 16);
loadType = varDsc->GetLayout()->GetGCPtrType(0);
}
else
{
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
}
emitAttr loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be
// expecting it. Therefore manually update life of argReg. Note that GT_JMP marks the end of
// the basic block and after which reg life and gc info will be recomputed for the new block
// in genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
// Restore the second register.
argRegNext = genRegArgNext(argReg);
loadType = varDsc->GetLayout()->GetGCPtrType(1);
loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE);
regSet.AddMaskVars(genRegMask(argRegNext));
gcInfo.gcMarkRegPtrVal(argRegNext, loadType);
}
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
if (compiler->info.compIsVarArgs)
{
// In case of a jmp call to a vararg method ensure only integer registers are passed.
assert((genRegMask(argReg) & (RBM_ARG_REGS | RBM_ARG_RET_BUFF)) != RBM_NONE);
assert(!varDsc->lvIsHfaRegArg());
fixedIntArgMask |= genRegMask(argReg);
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
assert(argRegNext != REG_NA);
fixedIntArgMask |= genRegMask(argRegNext);
}
if (argReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
}
}
#else // !TARGET_ARM64
bool twoParts = false;
var_types loadType = TYP_UNDEF;
if (varDsc->TypeGet() == TYP_LONG)
{
twoParts = true;
}
else if (varDsc->TypeGet() == TYP_DOUBLE)
{
if (compiler->info.compIsVarArgs || compiler->opts.compUseSoftFP)
{
twoParts = true;
}
}
if (twoParts)
{
argRegNext = genRegArgNext(argReg);
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, varNum, 0);
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argRegNext, varNum, REGSIZE_BYTES);
}
if (compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(argReg);
fixedIntArgMask |= genRegMask(argRegNext);
}
}
else if (varDsc->lvIsHfaRegArg())
{
loadType = varDsc->GetHfaType();
regNumber fieldReg = argReg;
emitAttr loadSize = emitActualTypeSize(loadType);
unsigned maxSize = min(varDsc->lvSize(), (LAST_FP_ARGREG + 1 - argReg) * REGSIZE_BYTES);
for (unsigned ofs = 0; ofs < maxSize; ofs += (unsigned)loadSize)
{
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
}
assert(genIsValidFloatReg(fieldReg)); // we don't use register tracking for FP
fieldReg = regNextOfType(fieldReg, loadType);
}
}
else if (varTypeIsStruct(varDsc))
{
regNumber slotReg = argReg;
unsigned maxSize = min(varDsc->lvSize(), (REG_ARG_LAST + 1 - argReg) * REGSIZE_BYTES);
for (unsigned ofs = 0; ofs < maxSize; ofs += REGSIZE_BYTES)
{
unsigned idx = ofs / REGSIZE_BYTES;
loadType = varDsc->GetLayout()->GetGCPtrType(idx);
if (varDsc->GetRegNum() != argReg)
{
emitAttr loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, slotReg, varNum, ofs);
}
regSet.AddMaskVars(genRegMask(slotReg));
gcInfo.gcMarkRegPtrVal(slotReg, loadType);
if (genIsValidIntReg(slotReg) && compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(slotReg);
}
slotReg = genRegArgNext(slotReg);
}
}
else
{
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
}
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (genIsValidIntReg(argReg) && compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(argReg);
}
}
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
#endif // !TARGET_ARM64
}
// Jmp call to a vararg method - if the method has fewer than fixed arguments that can be max size of reg,
// load the remaining integer arg registers from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
// of caller passing all integer arg regs that can be max size of reg.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
// remaining arg registers from shadow stack slots as non-gc interruptible.
if (fixedIntArgMask != RBM_NONE)
{
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, firstArgVarNum, argOffset);
}
argOffset += REGSIZE_BYTES;
}
GetEmitter()->emitEnableGC();
}
}
}
//------------------------------------------------------------------------
// genIntCastOverflowCheck: Generate overflow checking code for an integer cast.
//
// Arguments:
// cast - The GT_CAST node
// desc - The cast description
// reg - The register containing the value to check
//
void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg)
{
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_ATTR(desc.CheckSrcSize()), reg, 0);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::CHECK_UINT_RANGE:
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 32 bits are zero.
GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF00000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
// We need to check if the value is not greater than 0x7FFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 33 bits are zero.
GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF80000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
{
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MAX);
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_gt, SCK_OVERFLOW);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MIN);
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
break;
#endif
default:
{
assert(desc.CheckKind() == GenIntCastDesc::CHECK_SMALL_INT_RANGE);
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
// Values greater than 255 cannot be encoded in the immediate operand of CMP.
// Replace (x > max) with (x >= max + 1) where max + 1 (a power of 2) can be
// encoded. We could do this for all max values but on ARM32 "cmp r0, 255"
// is better than "cmp r0, 256" because it has a shorter encoding.
if (castMaxValue > 255)
{
assert((castMaxValue == 32767) || (castMaxValue == 65535));
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue + 1);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hs : EJ_ge, SCK_OVERFLOW);
}
else
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hi : EJ_gt, SCK_OVERFLOW);
}
if (castMinValue != 0)
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
}
break;
}
}
//------------------------------------------------------------------------
// genIntToIntCast: Generate code for an integer cast, with or without overflow check.
//
// Arguments:
// cast - The GT_CAST node
//
// Assumptions:
// The cast node is not a contained node and must have an assigned register.
// Neither the source nor target type can be a floating point type.
//
// TODO-ARM64-CQ: Allow castOp to be a contained node without an assigned register.
//
void CodeGen::genIntToIntCast(GenTreeCast* cast)
{
genConsumeRegs(cast->gtGetOp1());
const regNumber srcReg = cast->gtGetOp1()->GetRegNum();
const regNumber dstReg = cast->GetRegNum();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
GenIntCastDesc desc(cast);
if (desc.CheckKind() != GenIntCastDesc::CHECK_NONE)
{
genIntCastOverflowCheck(cast, desc, srcReg);
}
if ((desc.ExtendKind() != GenIntCastDesc::COPY) || (srcReg != dstReg))
{
instruction ins;
unsigned insSize;
switch (desc.ExtendKind())
{
case GenIntCastDesc::ZERO_EXTEND_SMALL_INT:
ins = (desc.ExtendSrcSize() == 1) ? INS_uxtb : INS_uxth;
insSize = 4;
break;
case GenIntCastDesc::SIGN_EXTEND_SMALL_INT:
ins = (desc.ExtendSrcSize() == 1) ? INS_sxtb : INS_sxth;
insSize = 4;
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::ZERO_EXTEND_INT:
ins = INS_mov;
insSize = 4;
break;
case GenIntCastDesc::SIGN_EXTEND_INT:
ins = INS_sxtw;
insSize = 8;
break;
#endif
default:
assert(desc.ExtendKind() == GenIntCastDesc::COPY);
ins = INS_mov;
insSize = desc.ExtendSrcSize();
break;
}
GetEmitter()->emitIns_Mov(ins, EA_ATTR(insSize), dstReg, srcReg, /* canSkip */ false);
}
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genFloatToFloatCast: Generate code for a cast between float and double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// The cast is between float and double.
//
void CodeGen::genFloatToFloatCast(GenTree* treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidFloatReg(op1->GetRegNum())); // Must be a valid float reg.
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
// treeNode must be a reg
assert(!treeNode->isContained());
#if defined(TARGET_ARM)
if (srcType != dstType)
{
instruction insVcvt = (srcType == TYP_FLOAT) ? INS_vcvt_f2d // convert Float to Double
: INS_vcvt_d2f; // convert Double to Float
GetEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum());
}
else
{
GetEmitter()->emitIns_Mov(INS_vmov, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
/* canSkip */ true);
}
#elif defined(TARGET_ARM64)
if (srcType != dstType)
{
insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
: INS_OPTS_D_TO_S; // convert Double to Single
GetEmitter()->emitIns_R_R(INS_fcvt, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
cvtOption);
}
else
{
// If double to double cast or float to float cast. Emit a move instruction.
GetEmitter()->emitIns_Mov(INS_mov, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
/* canSkip */ true);
}
#endif // TARGET*
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCreateAndStoreGCInfo: Create and record GC Info for the function.
//
void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder != nullptr);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
// We keep the call count for the second call to gcMakeRegPtrTable() below.
unsigned callCnt = 0;
// First we figure out the encoder ID's for the stack slots and registers.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
// Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
gcInfoEncoder->FinalizeSlotIds();
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
#ifdef TARGET_ARM64
if (compiler->opts.compDbgEnC)
{
// what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
// which is:
// -return address
// -saved off RBP
// -saved 'this' pointer and bool for synchronized methods
// 4 slots for RBP + return address + RSI + RDI
int preservedAreaSize = 4 * REGSIZE_BYTES;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
preservedAreaSize += REGSIZE_BYTES;
preservedAreaSize += 1; // bool for synchronized methods
}
// Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
// frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
}
#endif // TARGET_ARM64
if (compiler->opts.IsReversePInvoke())
{
unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM);
const LclVarDsc* reversePInvokeFrameVar = compiler->lvaGetDesc(reversePInvokeFrameVarNumber);
gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar->GetStackOffset());
}
gcInfoEncoder->Build();
// GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
// let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
// clang-format off
const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32]
{
{ }, // NONE
{ }, // 1
{ EJ_lt }, // SLT
{ EJ_le }, // SLE
{ EJ_ge }, // SGE
{ EJ_gt }, // SGT
{ EJ_mi }, // S
{ EJ_pl }, // NS
{ EJ_eq }, // EQ
{ EJ_ne }, // NE
{ EJ_lo }, // ULT
{ EJ_ls }, // ULE
{ EJ_hs }, // UGE
{ EJ_hi }, // UGT
{ EJ_hs }, // C
{ EJ_lo }, // NC
{ EJ_eq }, // FEQ
{ EJ_gt, GT_AND, EJ_lo }, // FNE
{ EJ_lo }, // FLT
{ EJ_ls }, // FLE
{ EJ_ge }, // FGE
{ EJ_gt }, // FGT
{ EJ_vs }, // O
{ EJ_vc }, // NO
{ EJ_eq, GT_OR, EJ_vs }, // FEQU
{ EJ_ne }, // FNEU
{ EJ_lt }, // FLTU
{ EJ_le }, // FLEU
{ EJ_hs }, // FGEU
{ EJ_hi }, // FGTU
{ }, // P
{ }, // NP
};
// clang-format on
//------------------------------------------------------------------------
// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition.
//
// Arguments:
// condition - The condition
// type - The type of the value to be produced
// dstReg - The destination register to be set to 1 or 0
//
void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg)
{
assert(varTypeIsIntegral(type));
assert(genIsValidIntReg(dstReg));
#ifdef TARGET_ARM64
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
inst_SET(desc.jumpKind1, dstReg);
if (desc.oper != GT_NONE)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_SET(desc.jumpKind2, dstReg);
genDefineTempLabel(labelNext);
}
#else
// Emit code like that:
// ...
// bgt True
// movs rD, #0
// b Next
// True:
// movs rD, #1
// Next:
// ...
BasicBlock* labelTrue = genCreateTempLabel();
inst_JCC(condition, labelTrue);
GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 0);
BasicBlock* labelNext = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_b, labelNext);
genDefineTempLabel(labelTrue);
GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 1);
genDefineTempLabel(labelNext);
#endif
}
//------------------------------------------------------------------------
// genCodeForStoreBlk: Produce code for a GT_STORE_OBJ/GT_STORE_DYN_BLK/GT_STORE_BLK node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp)
{
assert(blkOp->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
if (blkOp->OperIs(GT_STORE_OBJ))
{
assert(!blkOp->gtBlkOpGcUnsafe);
assert(blkOp->OperIsCopyBlkOp());
assert(blkOp->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(blkOp->AsObj());
return;
}
bool isCopyBlk = blkOp->OperIsCopyBlkOp();
switch (blkOp->gtBlkOpKind)
{
case GenTreeBlk::BlkOpKindHelper:
assert(!blkOp->gtBlkOpGcUnsafe);
if (isCopyBlk)
{
genCodeForCpBlkHelper(blkOp);
}
else
{
genCodeForInitBlkHelper(blkOp);
}
break;
case GenTreeBlk::BlkOpKindUnroll:
if (isCopyBlk)
{
if (blkOp->gtBlkOpGcUnsafe)
{
GetEmitter()->emitDisableGC();
}
genCodeForCpBlkUnroll(blkOp);
if (blkOp->gtBlkOpGcUnsafe)
{
GetEmitter()->emitEnableGC();
}
}
else
{
assert(!blkOp->gtBlkOpGcUnsafe);
genCodeForInitBlkUnroll(blkOp);
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genScaledAdd: A helper for genLeaInstruction.
//
void CodeGen::genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale)
{
emitter* emit = GetEmitter();
if (scale == 0)
{
// target = base + index
GetEmitter()->emitIns_R_R_R(INS_add, attr, targetReg, baseReg, indexReg);
}
else
{
// target = base + index<<scale
#if defined(TARGET_ARM)
emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
#elif defined(TARGET_ARM64)
emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_OPTS_LSL);
#endif
}
}
//------------------------------------------------------------------------
// genCodeForMulLong: Generates code for int*int->long multiplication.
//
// Arguments:
// mul - the GT_MUL_LONG node
//
// Return Value:
// None.
//
void CodeGen::genCodeForMulLong(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL_LONG));
genConsumeOperands(mul);
regNumber srcReg1 = mul->gtGetOp1()->GetRegNum();
regNumber srcReg2 = mul->gtGetOp2()->GetRegNum();
instruction ins = mul->IsUnsigned() ? INS_umull : INS_smull;
#ifdef TARGET_ARM
GetEmitter()->emitIns_R_R_R_R(ins, EA_4BYTE, mul->GetRegNum(), mul->AsMultiRegOp()->gtOtherReg, srcReg1, srcReg2);
#else
GetEmitter()->emitIns_R_R_R(ins, EA_4BYTE, mul->GetRegNum(), srcReg1, srcReg2);
#endif
genProduceReg(mul);
}
//------------------------------------------------------------------------
// genLeaInstruction: Produce code for a GT_LEA node.
//
// Arguments:
// lea - the node
//
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
genConsumeOperands(lea);
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(lea);
int offset = lea->Offset();
// In ARM we can only load addresses of the form:
//
// [Base + index*scale]
// [Base + Offset]
// [Literal] (PC-Relative)
//
// So for the case of a LEA node of the form [Base + Index*Scale + Offset] we will generate:
// destReg = baseReg + indexReg * scale;
// destReg = destReg + offset;
//
// TODO-ARM64-CQ: The purpose of the GT_LEA node is to directly reflect a single target architecture
// addressing mode instruction. Currently we're 'cheating' by producing one or more
// instructions to generate the addressing mode so we need to modify lowering to
// produce LEAs that are a 1:1 relationship to the ARM64 architecture.
if (lea->Base() && lea->Index())
{
GenTree* memBase = lea->Base();
GenTree* index = lea->Index();
DWORD scale;
assert(isPow2(lea->gtScale));
BitScanForward(&scale, lea->gtScale);
assert(scale <= 4);
if (offset != 0)
{
regNumber tmpReg = lea->GetSingleTempReg();
// When generating fully interruptible code we have to use the "large offset" sequence
// when calculating a EA_BYREF as we can't report a byref that points outside of the object
//
bool useLargeOffsetSeq = compiler->GetInterruptible() && (size == EA_BYREF);
if (!useLargeOffsetSeq && emitter::emitIns_valid_imm_for_add(offset))
{
// Generate code to set tmpReg = base + index*scale
genScaledAdd(size, tmpReg, memBase->GetRegNum(), index->GetRegNum(), scale);
// Then compute target reg from [tmpReg + offset]
emit->emitIns_R_R_I(INS_add, size, lea->GetRegNum(), tmpReg, offset);
}
else // large offset sequence
{
noway_assert(tmpReg != index->GetRegNum());
noway_assert(tmpReg != memBase->GetRegNum());
// First load/store tmpReg with the offset constant
// rTmp = imm
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then add the scaled index register
// rTmp = rTmp + index*scale
genScaledAdd(EA_PTRSIZE, tmpReg, tmpReg, index->GetRegNum(), scale);
// Then compute target reg from [base + tmpReg ]
// rDst = base + rTmp
emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg);
}
}
else
{
// Then compute target reg from [base + index*scale]
genScaledAdd(size, lea->GetRegNum(), memBase->GetRegNum(), index->GetRegNum(), scale);
}
}
else if (lea->Base())
{
GenTree* memBase = lea->Base();
if (emitter::emitIns_valid_imm_for_add(offset))
{
if (offset != 0)
{
// Then compute target reg from [memBase + offset]
emit->emitIns_R_R_I(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), offset);
}
else // offset is zero
{
emit->emitIns_Mov(INS_mov, size, lea->GetRegNum(), memBase->GetRegNum(), /* canSkip */ true);
}
}
else
{
// We require a tmpReg to hold the offset
regNumber tmpReg = lea->GetSingleTempReg();
// First load tmpReg with the large offset constant
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then compute target reg from [memBase + tmpReg]
emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg);
}
}
else if (lea->Index())
{
// If we encounter a GT_LEA node without a base it means it came out
// when attempting to optimize an arbitrary arithmetic expression during lower.
// This is currently disabled in ARM64 since we need to adjust lower to account
// for the simpler instructions ARM64 supports.
// TODO-ARM64-CQ: Fix this and let LEA optimize arithmetic trees too.
assert(!"We shouldn't see a baseless address computation during CodeGen for ARM64");
}
genProduceReg(lea);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDSplitReturn: Generates code for returning a fixed-size SIMD type that lives
// in a single register, but is returned in multiple registers.
//
// Arguments:
// src - The source of the return
// retTypeDesc - The return type descriptor.
//
void CodeGen::genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc)
{
assert(varTypeIsSIMD(src));
assert(src->isUsedFromReg());
regNumber srcReg = src->GetRegNum();
// Treat src register as a homogenous vector with element size equal to the reg size
// Insert pieces in order
unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc->GetReturnRegType(i);
regNumber reg = retTypeDesc->GetABIReturnReg(i);
if (varTypeIsFloating(type))
{
// If the register piece is to be passed in a floating point register
// Use a vector mov element instruction
// reg is not a vector, so it is in the first element reg[0]
// mov reg[0], src[i]
// This effectively moves from `src[i]` to `reg[0]`, upper bits of reg remain unchanged
// For the case where src == reg, since we are only writing reg[0], as long as we iterate
// so that src[0] is consumed before writing reg[0], we do not need a temporary.
GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), reg, srcReg, 0, i);
}
else
{
// If the register piece is to be passed in an integer register
// Use a vector mov to general purpose register instruction
// mov reg, src[i]
// This effectively moves from `src[i]` to `reg`
GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), reg, srcReg, i);
}
}
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genPushCalleeSavedRegisters: Push any callee-saved registers we have used.
//
// Arguments (arm64):
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
#if defined(TARGET_ARM64)
void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed)
#else
void CodeGen::genPushCalleeSavedRegisters()
#endif
{
assert(compiler->compGeneratingProlog);
#ifdef TARGET_ARM64
// Probe large frames now, if necessary, since genPushCalleeSavedRegisters() will allocate the frame. Note that
// for arm64, genAllocLclFrame only probes the frame; it does not actually allocate it (it does not change SP).
// For arm64, we are probing the frame before the callee-saved registers are saved. The 'initReg' might have
// been calculated to be one of the callee-saved registers (say, if all the integer argument registers are
// in use, and perhaps with other conditions being satisfied). This is ok in other cases, after the callee-saved
// registers have been saved. So instead of letting genAllocLclFrame use initReg as a temporary register,
// always use REG_SCRATCH. We don't care if it trashes it, so ignore the initRegZeroed output argument.
bool ignoreInitRegZeroed = false;
genAllocLclFrame(compiler->compLclFrameSize, REG_SCRATCH, &ignoreInitRegZeroed,
intRegState.rsCalleeRegArgMaskLiveIn);
#endif
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// On ARM we push the FP (frame-pointer) here along with all other callee saved registers
if (isFramePointerUsed())
rsPushRegs |= RBM_FPBASE;
//
// It may be possible to skip pushing/popping lr for leaf methods. However, such optimization would require
// changes in GC suspension architecture.
//
// We would need to guarantee that a tight loop calling a virtual leaf method can be suspended for GC. Today, we
// generate partially interruptible code for both the method that contains the tight loop with the call and the leaf
// method. GC suspension depends on return address hijacking in this case. Return address hijacking depends
// on the return address to be saved on the stack. If we skipped pushing/popping lr, the return address would never
// be saved on the stack and the GC suspension would time out.
//
// So if we wanted to skip pushing pushing/popping lr for leaf frames, we would also need to do one of
// the following to make GC suspension work in the above scenario:
// - Make return address hijacking work even when lr is not saved on the stack.
// - Generate fully interruptible code for loops that contains calls
// - Generate fully interruptible code for leaf methods
//
// Given the limited benefit from this optimization (<10k for CoreLib NGen image), the extra complexity
// is not worth it.
//
rsPushRegs |= RBM_LR; // We must save the return address (in the LR register)
regSet.rsMaskCalleeSaved = rsPushRegs;
#ifdef DEBUG
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
}
#endif // DEBUG
#if defined(TARGET_ARM)
regMaskTP maskPushRegsFloat = rsPushRegs & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = rsPushRegs & ~maskPushRegsFloat;
maskPushRegsInt |= genStackAllocRegisterMask(compiler->compLclFrameSize, maskPushRegsFloat);
assert(FitsIn<int>(maskPushRegsInt));
inst_IV(INS_push, (int)maskPushRegsInt);
compiler->unwindPushMaskInt(maskPushRegsInt);
if (maskPushRegsFloat != 0)
{
genPushFltRegs(maskPushRegsFloat);
compiler->unwindPushMaskFloat(maskPushRegsFloat);
}
#elif defined(TARGET_ARM64)
// See the document "ARM64 JIT Frame Layout" and/or "ARM64 Exception Data" for more details or requirements and
// options. Case numbers in comments here refer to this document. See also Compiler::lvaAssignFrameOffsets()
// for pictures of the general frame layouts, and CodeGen::genFuncletProlog() implementations (per architecture)
// for pictures of the funclet frame layouts.
//
// For most frames, generate, e.g.:
// stp fp, lr, [sp,-0x80]! // predecrement SP with full frame size, and store FP/LR pair.
// stp r19, r20, [sp, 0x60] // store at positive offset from SP established above, into callee-saved area
// // at top of frame (highest addresses).
// stp r21, r22, [sp, 0x70]
//
// Notes:
// 1. We don't always need to save FP. If FP isn't saved, then LR is saved with the other callee-saved registers
// at the top of the frame.
// 2. If we save FP, then the first store is FP, LR.
// 3. General-purpose registers are 8 bytes, floating-point registers are 16 bytes, but FP/SIMD registers only
// preserve their lower 8 bytes, by calling convention.
// 4. For frames with varargs, we spill the integer register arguments to the stack, so all the arguments are
// consecutive, and at the top of the frame.
// 5. We allocate the frame here; no further changes to SP are allowed (except in the body, for localloc).
//
// For functions with GS and localloc, we change the frame so the frame pointer and LR are saved at the top
// of the frame, just under the varargs registers (if any). Note that the funclet frames must follow the same
// rule, and both main frame and funclet frames (if any) must put PSPSym in the same offset from Caller-SP.
// Since this frame type is relatively rare, we force using it via stress modes, for additional coverage.
//
// The frames look like the following (simplified to only include components that matter for establishing the
// frames). See also Compiler::lvaAssignFrameOffsets().
//
// Frames with FP, LR saved at bottom of frame (above outgoing argument space):
//
// | |
// |-----------------------|
// | incoming arguments |
// +=======================+ <---- Caller's SP
// | Varargs regs space | // Only for varargs functions; 64 bytes
// |-----------------------|
// |Callee saved registers | // not including FP/LR; multiple of 8 bytes
// |-----------------------|
// | PSP slot | // 8 bytes (omitted in CoreRT ABI)
// |-----------------------|
// | locals, temps, etc. |
// |-----------------------|
// | possible GS cookie |
// |-----------------------|
// | Saved LR | // 8 bytes
// |-----------------------|
// | Saved FP | // 8 bytes
// |-----------------------|
// | Outgoing arg space | // multiple of 8 bytes; if required (i.e., #outsz != 0)
// |-----------------------| <---- Ambient SP
// | | |
// ~ | Stack grows ~
// | | downward |
// V
//
// Frames with FP, LR saved at top of frame (below saved varargs incoming arguments):
//
// | |
// |-----------------------|
// | incoming arguments |
// +=======================+ <---- Caller's SP
// | Varargs regs space | // Only for varargs functions; 64 bytes
// |-----------------------|
// | Saved LR | // 8 bytes
// |-----------------------|
// | Saved FP | // 8 bytes
// |-----------------------|
// |Callee saved registers | // not including FP/LR; multiple of 8 bytes
// |-----------------------|
// | PSP slot | // 8 bytes (omitted in CoreRT ABI)
// |-----------------------|
// | locals, temps, etc. |
// |-----------------------|
// | possible GS cookie |
// |-----------------------|
// | Outgoing arg space | // multiple of 8 bytes; if required (i.e., #outsz != 0)
// |-----------------------| <---- Ambient SP
// | | |
// ~ | Stack grows ~
// | | downward |
// V
//
int totalFrameSize = genTotalFrameSize();
int offset; // This will be the starting place for saving the callee-saved registers, in increasing order.
regMaskTP maskSaveRegsFloat = rsPushRegs & RBM_ALLFLOAT;
regMaskTP maskSaveRegsInt = rsPushRegs & ~maskSaveRegsFloat;
#ifdef DEBUG
if (verbose)
{
printf("Save float regs: ");
dspRegMask(maskSaveRegsFloat);
printf("\n");
printf("Save int regs: ");
dspRegMask(maskSaveRegsInt);
printf("\n");
}
#endif // DEBUG
// The frameType number is arbitrary, is defined below, and corresponds to one of the frame styles we
// generate based on various sizes.
int frameType = 0;
// The amount to subtract from SP before starting to store the callee-saved registers. It might be folded into the
// first save instruction as a "predecrement" amount, if possible.
int calleeSaveSpDelta = 0;
if (isFramePointerUsed())
{
// We need to save both FP and LR.
assert((maskSaveRegsInt & RBM_FP) != 0);
assert((maskSaveRegsInt & RBM_LR) != 0);
// If we need to generate a GS cookie, we need to make sure the saved frame pointer and return address
// (FP and LR) are protected from buffer overrun by the GS cookie. If FP/LR are at the lowest addresses,
// then they are safe, since they are lower than any unsafe buffers. And the GS cookie we add will
// protect our caller's frame. If we have a localloc, however, that is dynamically placed lower than our
// saved FP/LR. In that case, we save FP/LR along with the rest of the callee-saved registers, above
// the GS cookie.
//
// After the frame is allocated, the frame pointer is established, pointing at the saved frame pointer to
// create a frame pointer chain.
//
// Do we need another frame pointer register to get good code quality in the case of having the frame pointer
// point high in the frame, so we can take advantage of arm64's preference for positive offsets? C++ native
// code dedicates callee-saved x19 to this, so generates:
// mov x19, sp
// in the prolog, then uses x19 for local var accesses. Given that this case is so rare, we currently do
// not do this. That means that negative offsets from FP might need to use the reserved register to form
// the local variable offset for an addressing mode.
if (((compiler->lvaOutgoingArgSpaceSize == 0) && (totalFrameSize <= 504)) &&
!genSaveFpLrWithAllCalleeSavedRegisters)
{
// Case #1.
//
// Generate:
// stp fp,lr,[sp,#-framesz]!
//
// The (totalFrameSize <= 504) condition ensures that both the pre-index STP instruction
// used in the prolog, and the post-index LDP instruction used in the epilog, can be generated.
// Note that STP and the unwind codes can handle -512, but LDP with a positive post-index value
// can only handle up to 504, and we want our prolog and epilog to match.
//
// After saving callee-saved registers, we establish the frame pointer with:
// mov fp,sp
// We do this *after* saving callee-saved registers, so the prolog/epilog unwind codes mostly match.
JITDUMP("Frame type 1. #outsz=0; #framesz=%d; LclFrameSize=%d\n", totalFrameSize,
compiler->compLclFrameSize);
frameType = 1;
assert(totalFrameSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -totalFrameSize,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
else if (totalFrameSize <= 512)
{
// Case #2.
//
// The (totalFrameSize <= 512) condition ensures the callee-saved registers can all be saved using STP
// with signed offset encoding. The maximum positive STP offset is 504, but when storing a pair of
// 8 byte registers, the largest actual offset we use would be 512 - 8 * 2 = 496. And STR with positive
// offset has a range 0 to 32760.
//
// After saving callee-saved registers, we establish the frame pointer with:
// add fp,sp,#outsz
// We do this *after* saving callee-saved registers, so the prolog/epilog unwind codes mostly match.
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
JITDUMP("Frame type 4 (save FP/LR at top). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 4;
// The frame will be allocated below, when the callee-saved registers are saved. This might mean a
// separate SUB instruction or the SP adjustment might be folded in to the first STP if there is
// no outgoing argument space AND no local frame space, that is, if the only thing the frame does
// is save callee-saved registers (and possibly varargs argument registers).
calleeSaveSpDelta = totalFrameSize;
offset = (int)compiler->compLclFrameSize;
}
else
{
JITDUMP("Frame type 2 (save FP/LR at bottom). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 2;
// Generate:
// sub sp,sp,#framesz
// stp fp,lr,[sp,#outsz] // note that by necessity, #outsz <= #framesz - 16, so #outsz <= 496.
assert(totalFrameSize - compiler->lvaOutgoingArgSpaceSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
assert(compiler->lvaOutgoingArgSpaceSize + 2 * REGSIZE_BYTES <= (unsigned)totalFrameSize);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
}
else
{
// Case 5 or 6.
//
// First, the callee-saved registers will be saved, and the callee-saved register code must use
// pre-index to subtract from SP as the first instruction. It must also leave space for varargs
// registers to be stored. For example:
// stp r19,r20,[sp,#-96]!
// stp d8,d9,[sp,#16]
// ... save varargs incoming integer registers ...
// Note that all SP alterations must be 16-byte aligned. We have already calculated any alignment to be
// lower on the stack than the callee-saved registers (see lvaAlignFrame() for how we calculate
// alignment). So, if there is an odd number of callee-saved registers, we use (for example, with just
// one saved register):
// sub sp,sp,#16
// str r19,[sp,#8]
// This is one additional instruction, but it centralizes the aligned space. Otherwise, it might be
// possible to have two 8-byte alignment padding words, one below the callee-saved registers, and one
// above them. If that is preferable, we could implement it.
//
// Note that any varargs saved space will always be 16-byte aligned, since there are 8 argument
// registers.
//
// Then, define #remainingFrameSz = #framesz - (callee-saved size + varargs space + possible alignment
// padding from above). Note that #remainingFrameSz must not be zero, since we still need to save FP,SP.
//
// Generate:
// sub sp,sp,#remainingFrameSz
// or, for large frames:
// mov rX, #remainingFrameSz // maybe multiple instructions
// sub sp,sp,rX
//
// followed by:
// stp fp,lr,[sp,#outsz]
// add fp,sp,#outsz
//
// However, we need to handle the case where #outsz is larger than the constant signed offset encoding
// can handle. And, once again, we might need to deal with #outsz that is not aligned to 16-bytes (i.e.,
// STACK_ALIGN). So, in the case of large #outsz we will have an additional SP adjustment, using one of
// the following sequences:
//
// Define #remainingFrameSz2 = #remainingFrameSz - #outsz.
//
// sub sp,sp,#remainingFrameSz2 // if #remainingFrameSz2 is 16-byte aligned
// stp fp,lr,[sp]
// mov fp,sp
// sub sp,sp,#outsz // in this case, #outsz must also be 16-byte aligned
//
// Or:
//
// sub sp,sp,roundUp(#remainingFrameSz2,16) // if #remainingFrameSz2 is not 16-byte aligned (it is
// // always guaranteed to be 8 byte aligned).
// stp fp,lr,[sp,#8] // it will always be #8 in the unaligned case
// add fp,sp,#8
// sub sp,sp,#outsz - #8
//
// (As usual, for a large constant "#outsz - #8", we might need multiple instructions:
// mov rX, #outsz - #8 // maybe multiple instructions
// sub sp,sp,rX
// )
//
// Note that even if we align the SP alterations, that does not imply that we are creating empty alignment
// slots. In fact, we are not; any empty alignment slots were calculated in
// Compiler::lvaAssignFrameOffsets() and its callees.
int calleeSaveSpDeltaUnaligned = totalFrameSize - compiler->compLclFrameSize;
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
JITDUMP("Frame type 5 (save FP/LR at top). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
// This case is much simpler, because we allocate space for the callee-saved register area, including
// FP/LR. Note the SP adjustment might be SUB or be folded into the first store as a predecrement.
// Then, we use a single SUB to establish the rest of the frame. We need to be careful about where
// to establish the frame pointer, as there is a limit of 2040 bytes offset from SP to FP in the
// unwind codes when FP is established.
frameType = 5;
}
else
{
JITDUMP("Frame type 3 (save FP/LR at bottom). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 3;
calleeSaveSpDeltaUnaligned -= 2 * REGSIZE_BYTES; // 2 for FP, LR which we'll save later.
// We'll take care of these later, but callee-saved regs code shouldn't see them.
maskSaveRegsInt &= ~(RBM_FP | RBM_LR);
}
assert(calleeSaveSpDeltaUnaligned >= 0);
assert((calleeSaveSpDeltaUnaligned % 8) == 0); // It better at least be 8 byte aligned.
calleeSaveSpDelta = AlignUp((UINT)calleeSaveSpDeltaUnaligned, STACK_ALIGN);
offset = calleeSaveSpDelta - calleeSaveSpDeltaUnaligned;
JITDUMP(" calleeSaveSpDelta=%d, offset=%d\n", calleeSaveSpDelta, offset);
// At most one alignment slot between SP and where we store the callee-saved registers.
assert((offset == 0) || (offset == REGSIZE_BYTES));
}
}
else
{
// No frame pointer (no chaining).
assert((maskSaveRegsInt & RBM_FP) == 0);
assert((maskSaveRegsInt & RBM_LR) != 0);
// Note that there is no pre-indexed save_lrpair unwind code variant, so we can't allocate the frame using
// 'stp' if we only have one callee-saved register plus LR to save.
NYI("Frame without frame pointer");
offset = 0;
}
assert(frameType != 0);
const int calleeSaveSpOffset = offset;
JITDUMP(" offset=%d, calleeSaveSpDelta=%d\n", offset, calleeSaveSpDelta);
genSaveCalleeSavedRegistersHelp(maskSaveRegsInt | maskSaveRegsFloat, offset, -calleeSaveSpDelta);
offset += genCountBits(maskSaveRegsInt | maskSaveRegsFloat) * REGSIZE_BYTES;
// For varargs, home the incoming arg registers last. Note that there is nothing to unwind here,
// so we just report "NOP" unwind codes. If there's no more frame setup after this, we don't
// need to add codes at all.
if (compiler->info.compIsVarArgs)
{
JITDUMP(" compIsVarArgs=true\n");
// There are 8 general-purpose registers to home, thus 'offset' must be 16-byte aligned here.
assert((offset % 16) == 0);
for (regNumber reg1 = REG_ARG_FIRST; reg1 < REG_ARG_LAST; reg1 = REG_NEXT(REG_NEXT(reg1)))
{
regNumber reg2 = REG_NEXT(reg1);
// stp REG, REG + 1, [SP, #offset]
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, offset);
compiler->unwindNop();
offset += 2 * REGSIZE_BYTES;
}
}
// By default, we'll establish the frame pointer chain. (Note that currently frames without FP are NYI.)
bool establishFramePointer = true;
// If we do establish the frame pointer, what is the amount we add to SP to do so?
unsigned offsetSpToSavedFp = 0;
if (frameType == 1)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
assert(offsetSpToSavedFp == 0);
}
else if (frameType == 2)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = compiler->lvaOutgoingArgSpaceSize;
}
else if (frameType == 3)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
// totalFrameSize and calleeSaveSpDelta -- is 16-byte aligned.
if (compiler->lvaOutgoingArgSpaceSize > 504)
{
// We can't do "stp fp,lr,[sp,#outsz]" because #outsz is too big.
// If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment.
assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize);
int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize;
int spAdjustment2 = (int)roundUp((unsigned)spAdjustment2Unaligned, STACK_ALIGN);
int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == 8));
JITDUMP(" spAdjustment2=%d\n", spAdjustment2);
genPrologSaveRegPair(REG_FP, REG_LR, alignmentAdjustment2, -spAdjustment2, false, initReg, pInitRegZeroed);
offset += spAdjustment2;
// Now subtract off the #outsz (or the rest of the #outsz if it was unaligned, and the above "sub"
// included some of it)
int spAdjustment3 = compiler->lvaOutgoingArgSpaceSize - alignmentAdjustment2;
assert(spAdjustment3 > 0);
assert((spAdjustment3 % 16) == 0);
JITDUMP(" alignmentAdjustment2=%d\n", alignmentAdjustment2);
genEstablishFramePointer(alignmentAdjustment2, /* reportUnwindData */ true);
// We just established the frame pointer chain; don't do it again.
establishFramePointer = false;
JITDUMP(" spAdjustment3=%d\n", spAdjustment3);
// We've already established the frame pointer, so no need to report the stack pointer change to unwind
// info.
genStackPointerAdjustment(-spAdjustment3, initReg, pInitRegZeroed, /* reportUnwindData */ false);
offset += spAdjustment3;
}
else
{
genPrologSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, -remainingFrameSz, false, initReg,
pInitRegZeroed);
offset += remainingFrameSz;
offsetSpToSavedFp = compiler->lvaOutgoingArgSpaceSize;
}
}
else if (frameType == 4)
{
assert(genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = calleeSaveSpDelta - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
}
else if (frameType == 5)
{
assert(genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = calleeSaveSpDelta - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
JITDUMP(" offsetSpToSavedFp=%d\n", offsetSpToSavedFp);
genEstablishFramePointer(offsetSpToSavedFp, /* reportUnwindData */ true);
// We just established the frame pointer chain; don't do it again.
establishFramePointer = false;
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
// totalFrameSize and calleeSaveSpDelta -- is 16-byte aligned.
JITDUMP(" remainingFrameSz=%d\n", remainingFrameSz);
// We've already established the frame pointer, so no need to report the stack pointer change to unwind info.
genStackPointerAdjustment(-remainingFrameSz, initReg, pInitRegZeroed, /* reportUnwindData */ false);
offset += remainingFrameSz;
}
else
{
unreached();
}
if (establishFramePointer)
{
JITDUMP(" offsetSpToSavedFp=%d\n", offsetSpToSavedFp);
genEstablishFramePointer(offsetSpToSavedFp, /* reportUnwindData */ true);
}
assert(offset == totalFrameSize);
// Save off information about the frame for later use
//
compiler->compFrameInfo.frameType = frameType;
compiler->compFrameInfo.calleeSaveSpOffset = calleeSaveSpOffset;
compiler->compFrameInfo.calleeSaveSpDelta = calleeSaveSpDelta;
compiler->compFrameInfo.offsetSpToSavedFp = offsetSpToSavedFp;
#endif // TARGET_ARM64
}
/*****************************************************************************
*
* Generates code for a function epilog.
*
* Please consult the "debugger team notification" comment in genFnProlog().
*/
void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFnEpilog()\n");
#endif // DEBUG
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
#ifdef DEBUG
if (compiler->opts.dspCode)
printf("\n__epilog:\n");
if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif // DEBUG
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
GenTree* lastNode = block->lastNode();
// Method handle and address info used in case of jump epilog
CORINFO_METHOD_HANDLE methHnd = nullptr;
CORINFO_CONST_LOOKUP addrInfo;
addrInfo.addr = nullptr;
addrInfo.accessType = IAT_VALUE;
if (jmpEpilog && lastNode->gtOper == GT_JMP)
{
methHnd = (CORINFO_METHOD_HANDLE)lastNode->AsVal()->gtVal1;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
}
#ifdef TARGET_ARM
// We delay starting the unwind codes until we have an instruction which we know
// needs an unwind code. In particular, for large stack frames in methods without
// localloc, the sequence might look something like this:
// movw r3, 0x38e0
// add sp, r3
// pop {r4,r5,r6,r10,r11,pc}
// In this case, the "movw" should not be part of the unwind codes, since it will
// be a NOP, and it is a waste to start with a NOP. Note that calling unwindBegEpilog()
// also sets the current location as the beginning offset of the epilog, so every
// instruction afterwards needs an unwind code. In the case above, if you call
// unwindBegEpilog() before the "movw", then you must generate a NOP for the "movw".
bool unwindStarted = false;
// Tear down the stack frame
if (compiler->compLocallocUsed)
{
if (!unwindStarted)
{
compiler->unwindBegEpilog();
unwindStarted = true;
}
// mov R9 into SP
inst_Mov(TYP_I_IMPL, REG_SP, REG_SAVED_LOCALLOC_SP, /* canSkip */ false);
compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0);
}
if (jmpEpilog ||
genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) ==
RBM_NONE)
{
genFreeLclFrame(compiler->compLclFrameSize, &unwindStarted);
}
if (!unwindStarted)
{
// If we haven't generated anything yet, we're certainly going to generate a "pop" next.
compiler->unwindBegEpilog();
unwindStarted = true;
}
if (jmpEpilog && lastNode->gtOper == GT_JMP && addrInfo.accessType == IAT_RELPVALUE)
{
// IAT_RELPVALUE jump at the end is done using relative indirection, so,
// additional helper register is required.
// We use LR just before it is going to be restored from stack, i.e.
//
// movw r12, laddr
// movt r12, haddr
// mov lr, r12
// ldr r12, [r12]
// add r12, r12, lr
// pop {lr}
// ...
// bx r12
regNumber indCallReg = REG_R12;
regNumber vptrReg1 = REG_LR;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, vptrReg1, indCallReg, /* canSkip */ false);
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, indCallReg, vptrReg1);
}
genPopCalleeSavedRegisters(jmpEpilog);
if (regSet.rsMaskPreSpillRegs(true) != RBM_NONE)
{
// We better not have used a pop PC to return otherwise this will be unreachable code
noway_assert(!genUsedPopToReturn);
int preSpillRegArgSize = genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
inst_RV_IV(INS_add, REG_SPBASE, preSpillRegArgSize, EA_PTRSIZE);
compiler->unwindAllocStack(preSpillRegArgSize);
}
if (jmpEpilog)
{
// We better not have used a pop PC to return otherwise this will be unreachable code
noway_assert(!genUsedPopToReturn);
}
#else // TARGET_ARM64
compiler->unwindBegEpilog();
genPopCalleeSavedRegistersAndFreeLclFrame(jmpEpilog);
#endif // TARGET_ARM64
if (jmpEpilog)
{
SetHasTailCalls(true);
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->GetFirstLIRNode() != nullptr);
/* figure out what jump we have */
GenTree* jmpNode = lastNode;
#if !FEATURE_FASTTAILCALL
noway_assert(jmpNode->gtOper == GT_JMP);
#else // FEATURE_FASTTAILCALL
// armarch
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
noway_assert((jmpNode->gtOper == GT_JMP) ||
((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
#endif // FEATURE_FASTTAILCALL
{
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
assert(methHnd != nullptr);
assert(addrInfo.addr != nullptr);
#ifdef TARGET_ARMARCH
emitter::EmitCallType callType;
void* addr;
regNumber indCallReg;
switch (addrInfo.accessType)
{
case IAT_VALUE:
if (validImmForBL((ssize_t)addrInfo.addr))
{
// Simple direct call
callType = emitter::EC_FUNC_TOKEN;
addr = addrInfo.addr;
indCallReg = REG_NA;
break;
}
// otherwise the target address doesn't fit in an immediate
// so we have to burn a register...
FALLTHROUGH;
case IAT_PVALUE:
// Load the address into a register, load indirect and call through a register
// We have to use R12 since we assume the argument registers are in use
callType = emitter::EC_INDIR_R;
indCallReg = REG_INDIRECT_CALL_TARGET_REG;
addr = NULL;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
if (addrInfo.accessType == IAT_PVALUE)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
regSet.verifyRegUsed(indCallReg);
}
break;
case IAT_RELPVALUE:
{
// Load the address into a register, load relative indirect and call through a register
// We have to use R12 since we assume the argument registers are in use
// LR is used as helper register right before it is restored from stack, thus,
// all relative address calculations are performed before LR is restored.
callType = emitter::EC_INDIR_R;
indCallReg = REG_R12;
addr = NULL;
regSet.verifyRegUsed(indCallReg);
break;
}
case IAT_PPVALUE:
default:
NO_WAY("Unsupported JMP indirection");
}
/* Simply emit a jump to the methodHnd. This is similar to a call so we can use
* the same descriptor with some minor adjustments.
*/
// clang-format off
GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
0, // argSize
EA_UNKNOWN, // retSize
#if defined(TARGET_ARM64)
EA_UNKNOWN, // secondRetSize
#endif
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
indCallReg, // ireg
REG_NA, // xreg
0, // xmul
0, // disp
true); // isJump
// clang-format on
CLANG_FORMAT_COMMENT_ANCHOR;
#endif // TARGET_ARMARCH
}
#if FEATURE_FASTTAILCALL
else
{
genCallInstruction(jmpNode->AsCall());
}
#endif // FEATURE_FASTTAILCALL
}
else
{
#ifdef TARGET_ARM
if (!genUsedPopToReturn)
{
// If we did not use a pop to return, then we did a "pop {..., lr}" instead of "pop {..., pc}",
// so we need a "bx lr" instruction to return from the function.
inst_RV(INS_bx, REG_LR, TYP_I_IMPL);
compiler->unwindBranch16();
}
#else // TARGET_ARM64
inst_RV(INS_ret, REG_LR, TYP_I_IMPL);
compiler->unwindReturn(REG_LR);
#endif // TARGET_ARM64
}
compiler->unwindEndEpilog();
}
// return size
// alignmentWB is out param
unsigned CodeGenInterface::InferOpSizeAlign(GenTree* op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
if (op->gtType == TYP_STRUCT || op->OperIsCopyBlkOp())
{
opSize = InferStructOpSizeAlign(op, &alignment);
}
else
{
alignment = genTypeAlignments[op->TypeGet()];
opSize = genTypeSizes[op->TypeGet()];
}
assert(opSize != 0);
assert(alignment != 0);
(*alignmentWB) = alignment;
return opSize;
}
// return size
// alignmentWB is out param
unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
while (op->gtOper == GT_COMMA)
{
op = op->AsOp()->gtOp2;
}
if (op->gtOper == GT_OBJ)
{
CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->GetLayout()->GetClassHandle();
opSize = op->AsObj()->GetLayout()->GetSize();
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else if (op->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(op->AsLclVarCommon());
assert(varDsc->lvType == TYP_STRUCT);
opSize = varDsc->lvSize();
#ifndef TARGET_64BIT
if (varDsc->lvStructDoubleAlign)
{
alignment = TARGET_POINTER_SIZE * 2;
}
else
#endif // !TARGET_64BIT
{
alignment = TARGET_POINTER_SIZE;
}
}
else if (op->gtOper == GT_MKREFANY)
{
opSize = TARGET_POINTER_SIZE * 2;
alignment = TARGET_POINTER_SIZE;
}
else if (op->IsArgPlaceHolderNode())
{
CORINFO_CLASS_HANDLE clsHnd = op->AsArgPlace()->gtArgPlaceClsHnd;
assert(clsHnd != 0);
opSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else
{
assert(!"Unhandled gtOper");
opSize = TARGET_POINTER_SIZE;
alignment = TARGET_POINTER_SIZE;
}
assert(opSize != 0);
assert(alignment != 0);
(*alignmentWB) = alignment;
return opSize;
}
#endif // TARGET_ARMARCH
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ARM/ARM64 Code Generator Common Code XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "emit.h"
#include "patchpointinfo.h"
//------------------------------------------------------------------------
// genStackPointerConstantAdjustment: add a specified constant value to the stack pointer.
// No probe is done.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero.
// regTmp - an available temporary register that is used if 'spDelta' cannot be encoded by
// 'sub sp, sp, #spDelta' instruction.
// Can be REG_NA if the caller knows for certain that 'spDelta' fits into the immediate
// value range.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
// We assert that the SP change is less than one page. If it's greater, you should have called a
// function that does a probe, which will in turn call this function.
assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize());
#ifdef TARGET_ARM64
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -spDelta, regTmp);
#else
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -spDelta, INS_FLAGS_DONT_CARE, regTmp);
#endif
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Should only be called as a helper for
// genStackPointerConstantAdjustmentLoopWithProbe.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens,
// but the stack pointer doesn't move.
// regTmp - temporary register to use as target for probe load instruction
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Generates one probe per page, up to the total amount required.
// This will generate a sequence of probes in-line.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative.
// regTmp - temporary register to use as target for probe load instruction
//
// Return Value:
// Offset in bytes from SP to last probed address.
//
target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
const target_size_t pageSize = compiler->eeGetPageSize();
ssize_t spRemainingDelta = spDelta;
do
{
ssize_t spOneDelta = -(ssize_t)min((target_size_t)-spRemainingDelta, pageSize);
genStackPointerConstantAdjustmentWithProbe(spOneDelta, regTmp);
spRemainingDelta -= spOneDelta;
} while (spRemainingDelta < 0);
// What offset from the final SP was the last probe? This depends on the fact that
// genStackPointerConstantAdjustmentWithProbe() probes first, then does "SUB SP".
target_size_t lastTouchDelta = (target_size_t)(-spDelta) % pageSize;
if ((lastTouchDelta == 0) || (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize))
{
// We haven't probed almost a complete page. If lastTouchDelta==0, then spDelta was an exact
// multiple of pageSize, which means we last probed exactly one page back. Otherwise, we probed
// the page, but very far from the end. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we do one more probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
lastTouchDelta = 0;
}
return lastTouchDelta;
}
//------------------------------------------------------------------------
// genCodeForTreeNode Generate code for a single node in the tree.
//
// Preconditions:
// All operands have been evaluated.
//
void CodeGen::genCodeForTreeNode(GenTree* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
compiler->gtDispLIRNode(treeNode, "Generating: ");
}
#endif // DEBUG
// Is this a node whose value is already in a register? LSRA denotes this by
// setting the GTF_REUSE_REG_VAL flag.
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
assert((treeNode->OperGet() == GT_CNS_INT) || (treeNode->OperGet() == GT_CNS_DBL));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
// contained nodes are part of their parents for codegen purposes
// ex : immediates, most LEAs
if (treeNode->isContained())
{
return;
}
switch (treeNode->gtOper)
{
case GT_START_NONGC:
GetEmitter()->emitDisableGC();
break;
case GT_START_PREEMPTGC:
// Kill callee saves GC registers, and create a label
// so that information gets propagated to the emitter.
gcInfo.gcMarkRegSetNpt(RBM_INT_CALLEE_SAVED);
genDefineTempLabel(genCreateTempLabel());
break;
case GT_PROF_HOOK:
// We should be seeing this only if profiler hook is needed
noway_assert(compiler->compIsProfilerHookNeeded());
#ifdef PROFILING_SUPPORTED
// Right now this node is used only for tail calls. In future if
// we intend to use it for Enter or Leave hooks, add a data member
// to this node indicating the kind of profiler hook. For example,
// helper number can be used.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
break;
case GT_LCLHEAP:
genLclHeap(treeNode);
break;
case GT_CNS_INT:
case GT_CNS_DBL:
genSetRegToConst(targetReg, targetType, treeNode);
genProduceReg(treeNode);
break;
case GT_NOT:
case GT_NEG:
genCodeForNegNot(treeNode);
break;
#if defined(TARGET_ARM64)
case GT_BSWAP:
case GT_BSWAP16:
genCodeForBswap(treeNode);
break;
#endif // defined(TARGET_ARM64)
case GT_MOD:
case GT_UMOD:
case GT_DIV:
case GT_UDIV:
genCodeForDivMod(treeNode->AsOp());
break;
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_AND_NOT:
assert(varTypeIsIntegralOrI(treeNode));
FALLTHROUGH;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif // !defined(TARGET_64BIT)
case GT_ADD:
case GT_SUB:
case GT_MUL:
genConsumeOperands(treeNode->AsOp());
genCodeForBinary(treeNode->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// case GT_ROL: // No ROL instruction on ARM; it has been lowered to ROR.
case GT_ROR:
genCodeForShift(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LSH_HI:
case GT_RSH_LO:
genCodeForShiftLong(treeNode);
break;
#endif // !defined(TARGET_64BIT)
case GT_CAST:
genCodeForCast(treeNode->AsOp());
break;
case GT_BITCAST:
genCodeForBitCast(treeNode->AsOp());
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
genCodeForLclAddr(treeNode);
break;
case GT_LCL_FLD:
genCodeForLclFld(treeNode->AsLclFld());
break;
case GT_LCL_VAR:
genCodeForLclVar(treeNode->AsLclVar());
break;
case GT_STORE_LCL_FLD:
genCodeForStoreLclFld(treeNode->AsLclFld());
break;
case GT_STORE_LCL_VAR:
genCodeForStoreLclVar(treeNode->AsLclVar());
break;
case GT_RETFILT:
case GT_RETURN:
genReturn(treeNode);
break;
case GT_LEA:
// If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
genLeaInstruction(treeNode->AsAddrMode());
break;
case GT_INDEX_ADDR:
genCodeForIndexAddr(treeNode->AsIndexAddr());
break;
case GT_IND:
genCodeForIndir(treeNode->AsIndir());
break;
case GT_MUL_LONG:
genCodeForMulLong(treeNode->AsOp());
break;
#ifdef TARGET_ARM64
case GT_MADD:
genCodeForMadd(treeNode->AsOp());
break;
case GT_INC_SATURATE:
genCodeForIncSaturate(treeNode);
break;
case GT_MULHI:
genCodeForMulHi(treeNode->AsOp());
break;
case GT_SWAP:
genCodeForSwap(treeNode->AsOp());
break;
case GT_ADDEX:
genCodeForAddEx(treeNode->AsOp());
break;
case GT_BFIZ:
genCodeForBfiz(treeNode->AsOp());
break;
#endif // TARGET_ARM64
case GT_JMP:
genJmpMethod(treeNode);
break;
case GT_CKFINITE:
genCkfinite(treeNode);
break;
case GT_INTRINSIC:
genIntrinsic(treeNode);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
genSIMDIntrinsic(treeNode->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
genHWIntrinsic(treeNode->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_CMP:
#ifdef TARGET_ARM64
case GT_TEST_EQ:
case GT_TEST_NE:
#endif // TARGET_ARM64
genCodeForCompare(treeNode->AsOp());
break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;
#ifdef TARGET_ARM64
case GT_JCMP:
genCodeForJumpCompare(treeNode->AsOp());
break;
#endif // TARGET_ARM64
case GT_JCC:
genCodeForJcc(treeNode->AsCC());
break;
case GT_SETCC:
genCodeForSetcc(treeNode->AsCC());
break;
case GT_RETURNTRAP:
genCodeForReturnTrap(treeNode->AsOp());
break;
case GT_STOREIND:
genCodeForStoreInd(treeNode->AsStoreInd());
break;
case GT_COPY:
// This is handled at the time we call genConsumeReg() on the GT_COPY
break;
case GT_FIELD_LIST:
// Should always be marked contained.
assert(!"LIST, FIELD_LIST nodes should always be marked contained.");
break;
case GT_PUTARG_STK:
genPutArgStk(treeNode->AsPutArgStk());
break;
case GT_PUTARG_REG:
genPutArgReg(treeNode->AsOp());
break;
case GT_PUTARG_SPLIT:
genPutArgSplit(treeNode->AsPutArgSplit());
break;
case GT_CALL:
genCall(treeNode->AsCall());
break;
case GT_MEMORYBARRIER:
{
CodeGen::BarrierKind barrierKind =
treeNode->gtFlags & GTF_MEMORYBARRIER_LOAD ? BARRIER_LOAD_ONLY : BARRIER_FULL;
instGen_MemoryBarrier(barrierKind);
break;
}
#ifdef TARGET_ARM64
case GT_XCHG:
case GT_XORR:
case GT_XAND:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;
case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
#endif // TARGET_ARM64
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
// into the register specified in this node.
break;
case GT_NOP:
break;
case GT_KEEPALIVE:
if (treeNode->AsOp()->gtOp1->isContained())
{
// For this case we simply need to update the lifetime of the local.
genUpdateLife(treeNode->AsOp()->gtOp1);
}
else
{
genConsumeReg(treeNode->AsOp()->gtOp1);
}
break;
case GT_NO_OP:
instGen(INS_nop);
break;
case GT_BOUNDS_CHECK:
genRangeCheck(treeNode);
break;
case GT_PHYSREG:
genCodeForPhysReg(treeNode->AsPhysReg());
break;
case GT_NULLCHECK:
genCodeForNullCheck(treeNode->AsIndir());
break;
case GT_CATCH_ARG:
noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
/* Catch arguments get passed in a register. genCodeForBBlist()
would have marked it as holding a GC object, but not used. */
noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
genConsumeReg(treeNode);
break;
case GT_PINVOKE_PROLOG:
noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
#ifdef PSEUDORANDOM_NOP_INSERTION
// the runtime side requires the codegen here to be consistent
emit->emitDisableRandomNops();
#endif // PSEUDORANDOM_NOP_INSERTION
break;
case GT_LABEL:
genPendingCallLabel = genCreateTempLabel();
#if defined(TARGET_ARM)
genMov32RelocatableDisplacement(genPendingCallLabel, targetReg);
#else
emit->emitIns_R_L(INS_adr, EA_PTRSIZE, genPendingCallLabel, targetReg);
#endif
break;
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
case GT_JMPTABLE:
genJumpTable(treeNode);
break;
case GT_SWITCH_TABLE:
genTableBasedSwitch(treeNode);
break;
case GT_ARR_INDEX:
genCodeForArrIndex(treeNode->AsArrIndex());
break;
case GT_ARR_OFFSET:
genCodeForArrOffset(treeNode->AsArrOffs());
break;
#ifdef TARGET_ARM
case GT_CLS_VAR_ADDR:
emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0);
genProduceReg(treeNode);
break;
case GT_LONG:
assert(treeNode->isUsedFromReg());
genConsumeRegs(treeNode);
break;
#endif // TARGET_ARM
case GT_IL_OFFSET:
// Do nothing; these nodes are simply markers for debug info.
break;
default:
{
#ifdef DEBUG
char message[256];
_snprintf_s(message, ArrLen(message), _TRUNCATE, "NYI: Unimplemented node type %s",
GenTree::OpName(treeNode->OperGet()));
NYIRAW(message);
#else
NYI("unimplemented node");
#endif
}
break;
}
}
//---------------------------------------------------------------------
// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog.
//
// Arguments:
// initReg - register to use as a scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
{
return;
}
if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
{
// Security cookie is on original frame and was initialized there.
return;
}
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
// initReg = #GlobalSecurityCookieVal; [frame.GSSecurityCookie] = initReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, initReg, (ssize_t)compiler->gsGlobalSecurityCookieAddr,
INS_FLAGS_DONT_CARE DEBUGARG((size_t)THT_SetGSCookie) DEBUGARG(GTF_EMPTY));
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, initReg, initReg, 0);
regSet.verifyRegUsed(initReg);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
*pInitRegZeroed = false;
}
//------------------------------------------------------------------------
// genEmitGSCookieCheck: Generate code to check that the GS cookie
// wasn't thrashed by a buffer overrun.
//
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that the return register is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by REG_INTRET (R0).
if (!pushReg && (compiler->info.compRetNativeType == TYP_REF))
gcInfo.gcRegGCrefSetCur |= RBM_INTRET;
// We need two temporary registers, to load the GS cookie values and compare them. We can't use
// any argument registers if 'pushReg' is true (meaning we have a JMP call). They should be
// callee-trash registers, which should not contain anything interesting at this point.
// We don't have any IR node representing this check, so LSRA can't communicate registers
// for us to use.
regNumber regGSConst = REG_GSCOOKIE_TMP_0;
regNumber regGSValue = REG_GSCOOKIE_TMP_1;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
// load the GS cookie constant into a reg
//
instGen_Set_Reg_To_Imm(EA_PTRSIZE, regGSConst, compiler->gsGlobalSecurityCookieVal);
}
else
{
// Ngen case - GS cookie constant needs to be accessed through an indirection.
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSConst, (ssize_t)compiler->gsGlobalSecurityCookieAddr,
INS_FLAGS_DONT_CARE DEBUGARG((size_t)THT_GSCookieCheck) DEBUGARG(GTF_EMPTY));
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, regGSConst, regGSConst, 0);
}
// Load this method's GS value from the stack frame
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, regGSValue, compiler->lvaGSSecurityCookie, 0);
// Compare with the GC cookie constant
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_eq, gsCheckBlk);
// regGSConst and regGSValue aren't needed anymore, we can use them for helper call
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN, regGSConst);
genDefineTempLabel(gsCheckBlk);
}
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
// Arguments
// treeNode - the GT_INTRINSIC node
//
// Return value:
// None
//
void CodeGen::genIntrinsic(GenTree* treeNode)
{
assert(treeNode->OperIs(GT_INTRINSIC));
// Both operand and its result must be of the same floating point type.
GenTree* srcNode = treeNode->AsOp()->gtOp1;
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
// Only a subset of functions are treated as math intrinsics.
//
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Abs:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_ABS, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
#ifdef TARGET_ARM64
case NI_System_Math_Ceiling:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintp, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Floor:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintm, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Truncate:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintz, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Round:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintn, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Max:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R_R(INS_fmax, emitActualTypeSize(treeNode), treeNode->GetRegNum(),
treeNode->gtGetOp1()->GetRegNum(), treeNode->gtGetOp2()->GetRegNum());
break;
case NI_System_Math_Min:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R_R(INS_fmin, emitActualTypeSize(treeNode), treeNode->GetRegNum(),
treeNode->gtGetOp1()->GetRegNum(), treeNode->gtGetOp2()->GetRegNum());
break;
#endif // TARGET_ARM64
case NI_System_Math_Sqrt:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_SQRT, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
default:
assert(!"genIntrinsic: Unsupported intrinsic");
unreached();
}
genProduceReg(treeNode);
}
//---------------------------------------------------------------------
// genPutArgStk - generate code for a GT_PUTARG_STK node
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
// Return value:
// None
//
void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
{
assert(treeNode->OperIs(GT_PUTARG_STK));
GenTree* source = treeNode->gtOp1;
var_types targetType;
if (!compMacOsArm64Abi())
{
targetType = genActualType(source->TypeGet());
}
else
{
targetType = source->TypeGet();
}
emitter* emit = GetEmitter();
// This is the varNum for our store operations,
// typically this is the varNum for the Outgoing arg space
// When we are generating a tail call it will be the varNum for arg0
unsigned varNumOut = (unsigned)-1;
unsigned argOffsetMax = (unsigned)-1; // Records the maximum size of this area for assert checks
// Get argument offset to use with 'varNumOut'
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
unsigned argOffsetOut = treeNode->getArgOffset();
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(treeNode->gtCall, treeNode);
assert(curArgTabEntry != nullptr);
DEBUG_ARG_SLOTS_ASSERT(argOffsetOut == (curArgTabEntry->slotNum * TARGET_POINTER_SIZE));
#endif // DEBUG
// Whether to setup stk arg in incoming or out-going arg area?
// Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
// All other calls - stk arg is setup in out-going arg area.
if (treeNode->putInIncomingArgArea())
{
varNumOut = getFirstArgWithStackSlot();
argOffsetMax = compiler->compArgSize;
#if FEATURE_FASTTAILCALL
// This must be a fast tail call.
assert(treeNode->gtCall->IsFastTailCall());
// Since it is a fast tail call, the existence of first incoming arg is guaranteed
// because fast tail call requires that in-coming arg area of caller is >= out-going
// arg area required for tail call.
LclVarDsc* varDsc = compiler->lvaGetDesc(varNumOut);
assert(varDsc != nullptr);
#endif // FEATURE_FASTTAILCALL
}
else
{
varNumOut = compiler->lvaOutgoingArgSpaceVar;
argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
}
bool isStruct = (targetType == TYP_STRUCT) || (source->OperGet() == GT_FIELD_LIST);
if (!isStruct) // a normal non-Struct argument
{
if (varTypeIsSIMD(targetType))
{
assert(!source->isContained());
regNumber srcReg = genConsumeReg(source);
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
assert(compMacOsArm64Abi() || treeNode->GetStackByteSize() % TARGET_POINTER_SIZE == 0);
#ifdef TARGET_ARM64
if (compMacOsArm64Abi() && (treeNode->GetStackByteSize() == 12))
{
regNumber tmpReg = treeNode->GetSingleTempReg();
GetEmitter()->emitStoreSIMD12ToLclOffset(varNumOut, argOffsetOut, srcReg, tmpReg);
argOffsetOut += 12;
}
else
#endif // TARGET_ARM64
{
emitAttr storeAttr = emitTypeSize(targetType);
emit->emitIns_S_R(INS_str, storeAttr, srcReg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
}
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
return;
}
if (compMacOsArm64Abi())
{
switch (treeNode->GetStackByteSize())
{
case 1:
targetType = TYP_BYTE;
break;
case 2:
targetType = TYP_SHORT;
break;
default:
assert(treeNode->GetStackByteSize() >= 4);
break;
}
}
instruction storeIns = ins_Store(targetType);
emitAttr storeAttr = emitTypeSize(targetType);
// If it is contained then source must be the integer constant zero
if (source->isContained())
{
#ifdef TARGET_ARM64
assert(source->OperGet() == GT_CNS_INT);
assert(source->AsIntConCommon()->IconValue() == 0);
emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut);
#else // !TARGET_ARM64
// There is no zero register on ARM32
unreached();
#endif // !TARGET_ARM64
}
else
{
genConsumeReg(source);
emit->emitIns_S_R(storeIns, storeAttr, source->GetRegNum(), varNumOut, argOffsetOut);
#ifdef TARGET_ARM
if (targetType == TYP_LONG)
{
// This case currently only occurs for double types that are passed as TYP_LONG;
// actual long types would have been decomposed by now.
assert(source->IsCopyOrReload());
regNumber otherReg = (regNumber)source->AsCopyOrReload()->GetRegNumByIdx(1);
assert(otherReg != REG_NA);
argOffsetOut += EA_4BYTE;
emit->emitIns_S_R(storeIns, storeAttr, otherReg, varNumOut, argOffsetOut);
}
#endif // TARGET_ARM
}
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
}
else // We have some kind of a struct argument
{
assert(source->isContained()); // We expect that this node was marked as contained in Lower
if (source->OperGet() == GT_FIELD_LIST)
{
genPutArgStkFieldList(treeNode, varNumOut);
}
else // We must have a GT_OBJ or a GT_LCL_VAR
{
noway_assert(source->OperIs(GT_LCL_VAR, GT_OBJ));
var_types targetType = source->TypeGet();
noway_assert(varTypeIsStruct(targetType));
// We will copy this struct to the stack, possibly using a ldp/ldr instruction
// in ARM64/ARM
// Setup loReg (and hiReg) from the internal registers that we reserved in lower.
//
regNumber loReg = treeNode->ExtractTempReg();
#ifdef TARGET_ARM64
regNumber hiReg = treeNode->GetSingleTempReg();
#endif // TARGET_ARM64
regNumber addrReg = REG_NA;
GenTreeLclVarCommon* varNode = nullptr;
GenTree* addrNode = nullptr;
if (source->OperGet() == GT_LCL_VAR)
{
varNode = source->AsLclVarCommon();
}
else // we must have a GT_OBJ
{
assert(source->OperGet() == GT_OBJ);
addrNode = source->AsOp()->gtOp1;
// addrNode can either be a GT_LCL_VAR_ADDR or an address expression
//
if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
// We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
//
assert(addrNode->isContained());
varNode = addrNode->AsLclVarCommon();
addrNode = nullptr;
}
else // addrNode is used
{
// TODO-Cleanup: `Lowering::NewPutArg` marks only `LCL_VAR_ADDR` as contained nowadays,
// but we use `genConsumeAddress` as a precaution, use `genConsumeReg()` instead.
assert(!addrNode->isContained());
// Generate code to load the address that we need into a register
genConsumeAddress(addrNode);
addrReg = addrNode->GetRegNum();
#ifdef TARGET_ARM64
// If addrReg equal to loReg, swap(loReg, hiReg)
// This reduces code complexity by only supporting one addrReg overwrite case
if (loReg == addrReg)
{
loReg = hiReg;
hiReg = addrReg;
}
#endif // TARGET_ARM64
}
}
// Either varNode or addrNOde must have been setup above,
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
ClassLayout* layout;
unsigned srcSize;
bool isHfa;
// Setup the srcSize, isHFa, and gcPtrCount
if (source->OperGet() == GT_LCL_VAR)
{
assert(varNode != nullptr);
LclVarDsc* varDsc = compiler->lvaGetDesc(varNode);
// This struct also must live in the stack frame
// And it can't live in a register (SIMD)
assert(varDsc->lvType == TYP_STRUCT);
assert(varDsc->lvOnFrame && !varDsc->lvRegister);
srcSize = varDsc->lvSize();
isHfa = varDsc->lvIsHfa();
layout = varDsc->GetLayout();
}
else // we must have a GT_OBJ
{
assert(source->OperGet() == GT_OBJ);
// If the source is an OBJ node then we need to use the type information
// it provides (size and GC layout) even if the node wraps a lclvar. Due
// to struct reinterpretation (e.g. Unsafe.As<X, Y>) it is possible that
// the OBJ node has a different type than the lclvar.
layout = source->AsObj()->GetLayout();
srcSize = layout->GetSize();
isHfa = compiler->IsHfa(layout->GetClassHandle());
}
// If we have an HFA we can't have any GC pointers,
// if not then the max size for the the struct is 16 bytes
if (isHfa)
{
noway_assert(!layout->HasGCPtr());
}
#ifdef TARGET_ARM64
else
{
noway_assert(srcSize <= 2 * TARGET_POINTER_SIZE);
}
noway_assert(srcSize <= MAX_PASS_MULTIREG_BYTES);
#endif // TARGET_ARM64
unsigned structSize;
unsigned dstSize = treeNode->GetStackByteSize();
if (dstSize != srcSize)
{
// We can generate a smaller code if store size is a multiple of TARGET_POINTER_SIZE.
// The dst size can be rounded up to PUTARG_STK size.
// The src size can be rounded up if it reads a local variable slot because the local
// variable stack allocation size is rounded up to be a multiple of the TARGET_POINTER_SIZE.
// The exception is arm64 apple arguments because they can be passed without padding.
if (varNode != nullptr)
{
// If we have a varNode, even if it was casted using `OBJ`, we can read its original memory size.
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNode);
const unsigned varStackSize = varDsc->lvSize();
if (varStackSize >= srcSize)
{
srcSize = varStackSize;
}
}
}
if (dstSize == srcSize)
{
structSize = dstSize;
}
else
{
// With Unsafe object cast we can have different strange combinations:
// PutArgStk<8>(Obj<16>(LclVar<8>)) -> copy 8 bytes;
// PutArgStk<16>(Obj<16>(LclVar<8>)) -> copy 16 bytes, reading undefined memory after the local.
structSize = min(dstSize, srcSize);
}
int remainingSize = structSize;
unsigned structOffset = 0;
unsigned nextIndex = 0;
#ifdef TARGET_ARM64
// For a >= 16-byte structSize we will generate a ldp and stp instruction each loop
// ldp x2, x3, [x0]
// stp x2, x3, [sp, #16]
while (remainingSize >= 2 * TARGET_POINTER_SIZE)
{
var_types type0 = layout->GetGCPtrType(nextIndex + 0);
var_types type1 = layout->GetGCPtrType(nextIndex + 1);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_R_S_S(INS_ldp, emitTypeSize(type0), emitTypeSize(type1), loReg, hiReg,
varNode->GetLclNum(), structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(loReg != addrReg);
noway_assert((remainingSize == 2 * TARGET_POINTER_SIZE) || (hiReg != addrReg));
// Load from our address expression source
emit->emitIns_R_R_R_I(INS_ldp, emitTypeSize(type0), loReg, hiReg, addrReg, structOffset,
INS_OPTS_NONE, emitTypeSize(type0));
}
// Emit stp instruction to store the two registers into the outgoing argument area
emit->emitIns_S_S_R_R(INS_stp, emitTypeSize(type0), emitTypeSize(type1), loReg, hiReg, varNumOut,
argOffsetOut);
argOffsetOut += (2 * TARGET_POINTER_SIZE); // We stored 16-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= (2 * TARGET_POINTER_SIZE); // We loaded 16-bytes of the struct
structOffset += (2 * TARGET_POINTER_SIZE);
nextIndex += 2;
}
#else // TARGET_ARM
// For a >= 4 byte structSize we will generate a ldr and str instruction each loop
// ldr r2, [r0]
// str r2, [sp, #16]
while (remainingSize >= TARGET_POINTER_SIZE)
{
var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), loReg, varNode->GetLclNum(), structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(loReg != addrReg || remainingSize == TARGET_POINTER_SIZE);
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), loReg, addrReg, structOffset);
}
// Emit str instruction to store the register into the outgoing argument area
emit->emitIns_S_R(INS_str, emitTypeSize(type), loReg, varNumOut, argOffsetOut);
argOffsetOut += TARGET_POINTER_SIZE; // We stored 4-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= TARGET_POINTER_SIZE; // We loaded 4-bytes of the struct
structOffset += TARGET_POINTER_SIZE;
nextIndex += 1;
}
#endif // TARGET_ARM
// For a 12-byte structSize we will generate two load instructions
// ldr x2, [x0]
// ldr w3, [x0, #8]
// str x2, [sp, #16]
// str w3, [sp, #24]
while (remainingSize > 0)
{
var_types type;
if (remainingSize >= TARGET_POINTER_SIZE)
{
type = layout->GetGCPtrType(nextIndex);
}
else // (remainingSize < TARGET_POINTER_SIZE)
{
// the left over size is smaller than a pointer and thus can never be a GC type
assert(!layout->IsGCPtr(nextIndex));
if (remainingSize == 1)
{
type = TYP_UBYTE;
}
else if (remainingSize == 2)
{
type = TYP_USHORT;
}
else
{
assert(remainingSize == 4);
type = TYP_UINT;
}
}
const emitAttr attr = emitTypeSize(type);
const unsigned moveSize = genTypeSize(type);
assert(EA_SIZE_IN_BYTES(attr) == moveSize);
remainingSize -= moveSize;
instruction loadIns = ins_Load(type);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(loadIns, attr, loReg, varNode->GetLclNum(), structOffset);
}
else
{
assert(loReg != addrReg);
// Load from our address expression source
emit->emitIns_R_R_I(loadIns, attr, loReg, addrReg, structOffset);
}
// Emit a store instruction to store the register into the outgoing argument area
instruction storeIns = ins_Store(type);
emit->emitIns_S_R(storeIns, attr, loReg, varNumOut, argOffsetOut);
argOffsetOut += moveSize;
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
structOffset += moveSize;
nextIndex++;
}
}
}
}
//---------------------------------------------------------------------
// genPutArgReg - generate code for a GT_PUTARG_REG node
//
// Arguments
// tree - the GT_PUTARG_REG node
//
// Return value:
// None
//
void CodeGen::genPutArgReg(GenTreeOp* tree)
{
assert(tree->OperIs(GT_PUTARG_REG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
assert(targetType != TYP_STRUCT);
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
// If child node is not already in the register we need, move it
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genPutArgSplit - generate code for a GT_PUTARG_SPLIT node
//
// Arguments
// tree - the GT_PUTARG_SPLIT node
//
// Return value:
// None
//
void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode)
{
assert(treeNode->OperIs(GT_PUTARG_SPLIT));
GenTree* source = treeNode->gtOp1;
emitter* emit = GetEmitter();
unsigned varNumOut = compiler->lvaOutgoingArgSpaceVar;
unsigned argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
if (source->OperGet() == GT_FIELD_LIST)
{
// Evaluate each of the GT_FIELD_LIST items into their register
// and store their register into the outgoing argument area
unsigned regIndex = 0;
unsigned firstOnStackOffs = UINT_MAX;
for (GenTreeFieldList::Use& use : source->AsFieldList()->Uses())
{
GenTree* nextArgNode = use.GetNode();
regNumber fieldReg = nextArgNode->GetRegNum();
genConsumeReg(nextArgNode);
if (regIndex >= treeNode->gtNumRegs)
{
if (firstOnStackOffs == UINT_MAX)
{
firstOnStackOffs = use.GetOffset();
}
var_types type = nextArgNode->TypeGet();
emitAttr attr = emitTypeSize(type);
unsigned offset = treeNode->getArgOffset() + use.GetOffset() - firstOnStackOffs;
// We can't write beyond the outgoing arg area
assert(offset + EA_SIZE_IN_BYTES(attr) <= argOffsetMax);
// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
// argument area
emit->emitIns_S_R(ins_Store(type), attr, fieldReg, varNumOut, offset);
}
else
{
var_types type = treeNode->GetRegType(regIndex);
regNumber argReg = treeNode->GetRegNumByIdx(regIndex);
#ifdef TARGET_ARM
if (type == TYP_LONG)
{
// We should only see long fields for DOUBLEs passed in 2 integer registers, via bitcast.
// All other LONGs should have been decomposed.
// Handle the first INT, and then handle the 2nd below.
assert(nextArgNode->OperIs(GT_BITCAST));
type = TYP_INT;
inst_Mov(type, argReg, fieldReg, /* canSkip */ true);
// Now set up the next register for the 2nd INT
argReg = REG_NEXT(argReg);
regIndex++;
assert(argReg == treeNode->GetRegNumByIdx(regIndex));
fieldReg = nextArgNode->AsMultiRegOp()->GetRegNumByIdx(1);
}
#endif // TARGET_ARM
// If child node is not already in the register we need, move it
inst_Mov(type, argReg, fieldReg, /* canSkip */ true);
regIndex++;
}
}
}
else
{
var_types targetType = source->TypeGet();
assert(source->OperGet() == GT_OBJ);
assert(varTypeIsStruct(targetType));
regNumber baseReg = treeNode->ExtractTempReg();
regNumber addrReg = REG_NA;
GenTreeLclVarCommon* varNode = nullptr;
GenTree* addrNode = nullptr;
addrNode = source->AsOp()->gtOp1;
// addrNode can either be a GT_LCL_VAR_ADDR or an address expression
//
if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
// We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
//
varNode = addrNode->AsLclVarCommon();
addrNode = nullptr;
}
// Either varNode or addrNOde must have been setup above,
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
// This is the varNum for our load operations,
// only used when we have a struct with a LclVar source
unsigned srcVarNum = BAD_VAR_NUM;
if (varNode != nullptr)
{
assert(varNode->isContained());
srcVarNum = varNode->GetLclNum();
// handle promote situation
LclVarDsc* varDsc = compiler->lvaGetDesc(srcVarNum);
// This struct also must live in the stack frame
// And it can't live in a register (SIMD)
assert(varDsc->lvType == TYP_STRUCT);
assert(varDsc->lvOnFrame && !varDsc->lvRegister);
// We don't split HFA struct
assert(!varDsc->lvIsHfa());
}
else // addrNode is used
{
assert(addrNode != nullptr);
// TODO-Cleanup: `Lowering::NewPutArg` marks only `LCL_VAR_ADDR` as contained nowadays,
// but we use `genConsumeAddress` as a precaution, use `genConsumeReg()` instead.
assert(!addrNode->isContained());
// Generate code to load the address that we need into a register
genConsumeAddress(addrNode);
addrReg = addrNode->GetRegNum();
// If addrReg equal to baseReg, we use the last target register as alternative baseReg.
// Because the candidate mask for the internal baseReg does not include any of the target register,
// we can ensure that baseReg, addrReg, and the last target register are not all same.
assert(baseReg != addrReg);
// We don't split HFA struct
assert(!compiler->IsHfa(source->AsObj()->GetLayout()->GetClassHandle()));
}
ClassLayout* layout = source->AsObj()->GetLayout();
// Put on stack first
unsigned nextIndex = treeNode->gtNumRegs;
unsigned structOffset = nextIndex * TARGET_POINTER_SIZE;
int remainingSize = treeNode->GetStackByteSize();
unsigned argOffsetOut = treeNode->getArgOffset();
// remainingSize is always multiple of TARGET_POINTER_SIZE
assert(remainingSize % TARGET_POINTER_SIZE == 0);
while (remainingSize > 0)
{
var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), baseReg, srcVarNum, structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(baseReg != addrReg);
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), baseReg, addrReg, structOffset);
}
// Emit str instruction to store the register into the outgoing argument area
emit->emitIns_S_R(INS_str, emitTypeSize(type), baseReg, varNumOut, argOffsetOut);
argOffsetOut += TARGET_POINTER_SIZE; // We stored 4-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= TARGET_POINTER_SIZE; // We loaded 4-bytes of the struct
structOffset += TARGET_POINTER_SIZE;
nextIndex += 1;
}
// We set up the registers in order, so that we assign the last target register `baseReg` is no longer in use,
// in case we had to reuse the last target register for it.
structOffset = 0;
for (unsigned idx = 0; idx < treeNode->gtNumRegs; idx++)
{
regNumber targetReg = treeNode->GetRegNumByIdx(idx);
var_types type = treeNode->GetRegType(idx);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), targetReg, srcVarNum, structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
if (targetReg == addrReg && idx != treeNode->gtNumRegs - 1)
{
assert(targetReg != baseReg);
var_types addrType = addrNode->TypeGet();
emit->emitIns_Mov(INS_mov, emitActualTypeSize(addrType), baseReg, addrReg, /* canSkip */ false);
addrReg = baseReg;
}
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), targetReg, addrReg, structOffset);
}
structOffset += TARGET_POINTER_SIZE;
}
}
genProduceReg(treeNode);
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------
// genMultiRegStoreToSIMDLocal: store multi-reg value to a single-reg SIMD local
//
// Arguments:
// lclNode - GentreeLclVar of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode)
{
regNumber dst = lclNode->GetRegNum();
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount = actualOp1->GetMultiRegCount(compiler);
assert(op1->IsMultiRegNode());
genConsumeRegs(op1);
// Treat dst register as a homogenous vector with element size equal to the src size
// Insert pieces in reverse order
for (int i = regCount - 1; i >= 0; --i)
{
var_types type = op1->gtSkipReloadOrCopy()->GetRegTypeByIndex(i);
regNumber reg = actualOp1->GetRegByIndex(i);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
// that need to be copied or reloaded.
regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
if (reloadReg != REG_NA)
{
reg = reloadReg;
}
}
assert(reg != REG_NA);
if (varTypeIsFloating(type))
{
// If the register piece was passed in a floating point register
// Use a vector mov element instruction
// src is not a vector, so it is in the first element reg[0]
// mov dst[i], reg[0]
// This effectively moves from `reg[0]` to `dst[i]`, leaving other dst bits unchanged till further
// iterations
// For the case where reg == dst, if we iterate so that we write dst[0] last, we eliminate the need for
// a temporary
GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), dst, reg, i, 0);
}
else
{
// If the register piece was passed in an integer register
// Use a vector mov from general purpose register instruction
// mov dst[i], reg
// This effectively moves from `reg` to `dst[i]`
GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), dst, reg, i);
}
}
genProduceReg(lclNode);
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genRangeCheck: generate code for GT_BOUNDS_CHECK node.
//
void CodeGen::genRangeCheck(GenTree* oper)
{
noway_assert(oper->OperIs(GT_BOUNDS_CHECK));
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
GenTree* arrLen = bndsChk->GetArrayLength();
GenTree* arrIndex = bndsChk->GetIndex();
GenTree* arrRef = nullptr;
int lenOffset = 0;
GenTree* src1;
GenTree* src2;
emitJumpKind jmpKind;
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
if (arrIndex->isContainedIntOrIImmed())
{
// To encode using a cmp immediate, we place the
// constant operand in the second position
src1 = arrLen;
src2 = arrIndex;
jmpKind = EJ_ls;
}
else
{
src1 = arrIndex;
src2 = arrLen;
jmpKind = EJ_hs;
}
var_types bndsChkType = genActualType(src2->TypeGet());
#if DEBUG
// Bounds checks can only be 32 or 64 bit sized comparisons.
assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitActualTypeSize(src1->TypeGet()));
#endif // DEBUG
GetEmitter()->emitInsBinary(INS_cmp, emitActualTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
//---------------------------------------------------------------------
// genCodeForPhysReg - generate code for a GT_PHYSREG node
//
// Arguments
// tree - the GT_PHYSREG node
//
// Return value:
// None
//
void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
{
assert(tree->OperIs(GT_PHYSREG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
genTransferRegGCState(targetReg, tree->gtSrcReg);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genCodeForNullCheck - generate code for a GT_NULLCHECK node
//
// Arguments
// tree - the GT_NULLCHECK node
//
// Return value:
// None
//
void CodeGen::genCodeForNullCheck(GenTreeIndir* tree)
{
#ifdef TARGET_ARM
assert(!"GT_NULLCHECK isn't supported for Arm32; use GT_IND.");
#else
assert(tree->OperIs(GT_NULLCHECK));
GenTree* op1 = tree->gtOp1;
genConsumeRegs(op1);
regNumber targetReg = REG_ZR;
GetEmitter()->emitInsLoadStoreOp(ins_Load(tree->TypeGet()), emitActualTypeSize(tree), targetReg, tree);
#endif
}
//------------------------------------------------------------------------
// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
// producing the effective index by subtracting the lower bound.
//
// Arguments:
// arrIndex - the node for which we're generating code
//
// Return Value:
// None.
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
emitter* emit = GetEmitter();
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
regNumber indexReg = genConsumeReg(indexNode);
regNumber tgtReg = arrIndex->GetRegNum();
noway_assert(tgtReg != REG_NA);
// We will use a temp register to load the lower bound and dimension size values.
regNumber tmpReg = arrIndex->GetSingleTempReg();
assert(tgtReg != tmpReg);
unsigned dim = arrIndex->gtCurrDim;
unsigned rank = arrIndex->gtArrRank;
var_types elemType = arrIndex->gtArrElemType;
unsigned offset;
offset = compiler->eeGetMDArrayLowerBoundOffset(rank, dim);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R_R(INS_sub, EA_4BYTE, tgtReg, indexReg, tmpReg);
offset = compiler->eeGetMDArrayLengthOffset(rank, dim);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
//
// Arguments:
// arrOffset - the node for which we're generating code
//
// Return Value:
// None.
//
// Notes:
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTree* offsetNode = arrOffset->gtOffset;
GenTree* indexNode = arrOffset->gtIndex;
regNumber tgtReg = arrOffset->GetRegNum();
noway_assert(tgtReg != REG_NA);
if (!offsetNode->IsIntegralConst(0))
{
emitter* emit = GetEmitter();
regNumber offsetReg = genConsumeReg(offsetNode);
regNumber indexReg = genConsumeReg(indexNode);
regNumber arrReg = genConsumeReg(arrOffset->gtArrObj);
noway_assert(offsetReg != REG_NA);
noway_assert(indexReg != REG_NA);
noway_assert(arrReg != REG_NA);
regNumber tmpReg = arrOffset->GetSingleTempReg();
unsigned dim = arrOffset->gtCurrDim;
unsigned rank = arrOffset->gtArrRank;
var_types elemType = arrOffset->gtArrElemType;
unsigned offset = compiler->eeGetMDArrayLengthOffset(rank, dim);
// Load tmpReg with the dimension size and evaluate
// tgtReg = offsetReg*tmpReg + indexReg.
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R_R_R(INS_MULADD, EA_PTRSIZE, tgtReg, tmpReg, offsetReg, indexReg);
}
else
{
regNumber indexReg = genConsumeReg(indexNode);
inst_Mov(TYP_INT, tgtReg, indexReg, /* canSkip */ true);
}
genProduceReg(arrOffset);
}
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
//
void CodeGen::genCodeForShift(GenTree* tree)
{
var_types targetType = tree->TypeGet();
genTreeOps oper = tree->OperGet();
instruction ins = genGetInsForOper(oper, targetType);
emitAttr size = emitActualTypeSize(tree);
regNumber dstReg = tree->GetRegNum();
assert(dstReg != REG_NA);
genConsumeOperands(tree->AsOp());
GenTree* operand = tree->gtGetOp1();
GenTree* shiftBy = tree->gtGetOp2();
if (!shiftBy->IsCnsIntOrI())
{
GetEmitter()->emitIns_R_R_R(ins, size, dstReg, operand->GetRegNum(), shiftBy->GetRegNum());
}
else
{
unsigned immWidth = emitter::getBitWidth(size); // For ARM64, immWidth will be set to 32 or 64
unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal & (immWidth - 1);
GetEmitter()->emitIns_R_R_I(ins, size, dstReg, operand->GetRegNum(), shiftByImm);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
//
// Arguments:
// tree - the node.
//
void CodeGen::genCodeForLclAddr(GenTree* tree)
{
assert(tree->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
// Address of a local var.
noway_assert((targetType == TYP_BYREF) || (targetType == TYP_I_IMPL));
emitAttr size = emitTypeSize(targetType);
inst_RV_TT(INS_lea, targetReg, tree, 0, size);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclFld: Produce code for a GT_LCL_FLD node.
//
// Arguments:
// tree - the GT_LCL_FLD node
//
void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_LCL_FLD));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
NYI_IF(targetType == TYP_STRUCT, "GT_LCL_FLD: struct load local field not supported");
assert(targetReg != REG_NA);
unsigned offs = tree->GetLclOffs();
unsigned varNum = tree->GetLclNum();
assert(varNum < compiler->lvaCount);
#ifdef TARGET_ARM
if (tree->IsOffsetMisaligned())
{
// Arm supports unaligned access only for integer types,
// load the floating data as 1 or 2 integer registers and convert them to float.
regNumber addr = tree->ExtractTempReg();
emit->emitIns_R_S(INS_lea, EA_PTRSIZE, addr, varNum, offs);
if (targetType == TYP_FLOAT)
{
regNumber floatAsInt = tree->GetSingleTempReg();
emit->emitIns_R_R(INS_ldr, EA_4BYTE, floatAsInt, addr);
emit->emitIns_Mov(INS_vmov_i2f, EA_4BYTE, targetReg, floatAsInt, /* canSkip */ false);
}
else
{
regNumber halfdoubleAsInt1 = tree->ExtractTempReg();
regNumber halfdoubleAsInt2 = tree->GetSingleTempReg();
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, halfdoubleAsInt1, addr, 0);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, halfdoubleAsInt2, addr, 4);
emit->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, halfdoubleAsInt1, halfdoubleAsInt2);
}
}
else
#endif // TARGET_ARM
{
emitAttr attr = emitActualTypeSize(targetType);
instruction ins = ins_Load(targetType);
emit->emitIns_R_S(ins, attr, targetReg, varNum, offs);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node.
//
// Arguments:
// tree - the GT_INDEX_ADDR node
//
void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
{
GenTree* const base = node->Arr();
GenTree* const index = node->Index();
genConsumeReg(base);
genConsumeReg(index);
// NOTE: `genConsumeReg` marks the consumed register as not a GC pointer, as it assumes that the input registers
// die at the first instruction generated by the node. This is not the case for `INDEX_ADDR`, however, as the
// base register is multiply-used. As such, we need to mark the base register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(base->GetRegNum(), base->TypeGet());
assert(!varTypeIsGC(index->TypeGet()));
// The index is never contained, even if it is a constant.
assert(index->isUsedFromReg());
const regNumber tmpReg = node->GetSingleTempReg();
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, base->GetRegNum(), node->gtLenOffset);
GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(index->TypeGet()), index->GetRegNum(), tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
// Can we use a ScaledAdd instruction?
//
if (isPow2(node->gtElemSize) && (node->gtElemSize <= 32768))
{
DWORD scale;
BitScanForward(&scale, node->gtElemSize);
// dest = base + index * scale
genScaledAdd(emitActualTypeSize(node), node->GetRegNum(), base->GetRegNum(), index->GetRegNum(), scale);
}
else // we have to load the element size and use a MADD (multiply-add) instruction
{
// tmpReg = element size
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg, (ssize_t)node->gtElemSize);
// dest = index * tmpReg + base
GetEmitter()->emitIns_R_R_R_R(INS_MULADD, emitActualTypeSize(node), node->GetRegNum(), index->GetRegNum(),
tmpReg, base->GetRegNum());
}
// dest = dest + elemOffs
GetEmitter()->emitIns_R_R_I(INS_add, emitActualTypeSize(node), node->GetRegNum(), node->GetRegNum(),
node->gtElemOffset);
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForIndir: Produce code for a GT_IND node.
//
// Arguments:
// tree - the GT_IND node
//
void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
#ifdef FEATURE_SIMD
// Handling of Vector3 type values loaded through indirection.
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
var_types type = tree->TypeGet();
instruction ins = ins_Load(type);
regNumber targetReg = tree->GetRegNum();
genConsumeAddress(tree->Addr());
bool emitBarrier = false;
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
#ifdef TARGET_ARM64
bool addrIsInReg = tree->Addr()->isUsedFromReg();
bool addrIsAligned = ((tree->gtFlags & GTF_IND_UNALIGNED) == 0);
if ((ins == INS_ldrb) && addrIsInReg)
{
ins = INS_ldarb;
}
else if ((ins == INS_ldrh) && addrIsInReg && addrIsAligned)
{
ins = INS_ldarh;
}
else if ((ins == INS_ldr) && addrIsInReg && addrIsAligned && genIsValidIntReg(targetReg))
{
ins = INS_ldar;
}
else
#endif // TARGET_ARM64
{
emitBarrier = true;
}
}
GetEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), targetReg, tree);
if (emitBarrier)
{
// when INS_ldar* could not be used for a volatile load,
// we use an ordinary load followed by a load barrier.
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
genProduceReg(tree);
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile CpBlk operation
instGen_MemoryBarrier();
}
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a load barrier after a volatile CpBlk operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
}
#ifdef TARGET_ARM64
// The following classes
// - InitBlockUnrollHelper
// - CopyBlockUnrollHelper
// encapsulate algorithms that produce instruction sequences for inlined equivalents of memset() and memcpy() functions.
//
// Each class has a private template function that accepts an "InstructionStream" as a template class argument:
// - InitBlockUnrollHelper::UnrollInitBlock<InstructionStream>(startDstOffset, byteCount, initValue)
// - CopyBlockUnrollHelper::UnrollCopyBlock<InstructionStream>(startSrcOffset, startDstOffset, byteCount)
//
// The design goal is to separate optimization approaches implemented by the algorithms
// from the target platform specific details.
//
// InstructionStream is a "stream" of load/store instructions (i.e. ldr/ldp/str/stp) that represents an instruction
// sequence that will initialize a memory region with some value or copy values from one memory region to another.
//
// As far as UnrollInitBlock and UnrollCopyBlock concerned, InstructionStream implements the following class member
// functions:
// - LoadPairRegs(offset, regSizeBytes)
// - StorePairRegs(offset, regSizeBytes)
// - LoadReg(offset, regSizeBytes)
// - StoreReg(offset, regSizeBytes)
//
// There are three implementations of InstructionStream:
// - CountingStream that counts how many instructions were pushed out of the stream
// - VerifyingStream that validates that all the instructions in the stream are encodable on Arm64
// - ProducingStream that maps the function to corresponding emitter functions
//
// The idea behind the design is that decision regarding what instruction sequence to emit
// (scalar instructions vs. SIMD instructions) is made by execution an algorithm producing an instruction sequence
// while counting the number of produced instructions and verifying that all the instructions are encodable.
//
// For example, using SIMD instructions might produce a shorter sequence but require "spilling" a value of a starting
// address
// to an integer register (due to stricter offset alignment rules for 16-byte wide SIMD instructions).
// This the CodeGen can take this fact into account before emitting an instruction sequence.
//
// Alternative design might have had VerifyingStream and ProducingStream fused into one class
// that would allow to undo an instruction if the sequence is not fully encodable.
class CountingStream
{
public:
CountingStream()
{
instrCount = 0;
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instrCount++;
}
unsigned InstructionCount() const
{
return instrCount;
}
private:
unsigned instrCount;
};
class VerifyingStream
{
public:
VerifyingStream()
{
canEncodeAllLoads = true;
canEncodeAllStores = true;
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
canEncodeAllLoads = canEncodeAllLoads && emitter::canEncodeLoadOrStorePairOffset(offset, EA_SIZE(regSizeBytes));
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
canEncodeAllStores =
canEncodeAllStores && emitter::canEncodeLoadOrStorePairOffset(offset, EA_SIZE(regSizeBytes));
}
void LoadReg(int offset, unsigned regSizeBytes)
{
canEncodeAllLoads =
canEncodeAllLoads && emitter::emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(regSizeBytes));
}
void StoreReg(int offset, unsigned regSizeBytes)
{
canEncodeAllStores =
canEncodeAllStores && emitter::emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(regSizeBytes));
}
bool CanEncodeAllLoads() const
{
return canEncodeAllLoads;
}
bool CanEncodeAllStores() const
{
return canEncodeAllStores;
}
private:
bool canEncodeAllLoads;
bool canEncodeAllStores;
};
class ProducingStreamBaseInstrs
{
public:
ProducingStreamBaseInstrs(regNumber intReg1, regNumber intReg2, regNumber addrReg, emitter* emitter)
: intReg1(intReg1), intReg2(intReg2), addrReg(addrReg), emitter(emitter)
{
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
assert(regSizeBytes == 8);
emitter->emitIns_R_R_R_I(INS_ldp, EA_SIZE(regSizeBytes), intReg1, intReg2, addrReg, offset);
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
assert(regSizeBytes == 8);
emitter->emitIns_R_R_R_I(INS_stp, EA_SIZE(regSizeBytes), intReg1, intReg2, addrReg, offset);
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_ldr;
if (regSizeBytes == 1)
{
ins = INS_ldrb;
}
else if (regSizeBytes == 2)
{
ins = INS_ldrh;
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), intReg1, addrReg, offset);
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_str;
if (regSizeBytes == 1)
{
ins = INS_strb;
}
else if (regSizeBytes == 2)
{
ins = INS_strh;
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), intReg1, addrReg, offset);
}
private:
const regNumber intReg1;
const regNumber intReg2;
const regNumber addrReg;
emitter* const emitter;
};
class ProducingStream
{
public:
ProducingStream(regNumber intReg1, regNumber simdReg1, regNumber simdReg2, regNumber addrReg, emitter* emitter)
: intReg1(intReg1), simdReg1(simdReg1), simdReg2(simdReg2), addrReg(addrReg), emitter(emitter)
{
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
assert((regSizeBytes == 8) || (regSizeBytes == 16));
emitter->emitIns_R_R_R_I(INS_ldp, EA_SIZE(regSizeBytes), simdReg1, simdReg2, addrReg, offset);
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
assert((regSizeBytes == 8) || (regSizeBytes == 16));
emitter->emitIns_R_R_R_I(INS_stp, EA_SIZE(regSizeBytes), simdReg1, simdReg2, addrReg, offset);
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_ldr;
// Note that 'intReg1' can be unavailable.
// If that is the case, then use SIMD instruction ldr and
// 'simdReg1' as a temporary register.
regNumber tempReg;
if ((regSizeBytes == 16) || (intReg1 == REG_NA))
{
tempReg = simdReg1;
}
else
{
tempReg = intReg1;
if (regSizeBytes == 1)
{
ins = INS_ldrb;
}
else if (regSizeBytes == 2)
{
ins = INS_ldrh;
}
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), tempReg, addrReg, offset);
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_str;
// Note that 'intReg1' can be unavailable.
// If that is the case, then use SIMD instruction ldr and
// 'simdReg1' as a temporary register.
regNumber tempReg;
if ((regSizeBytes == 16) || (intReg1 == REG_NA))
{
tempReg = simdReg1;
}
else
{
tempReg = intReg1;
if (regSizeBytes == 1)
{
ins = INS_strb;
}
else if (regSizeBytes == 2)
{
ins = INS_strh;
}
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), tempReg, addrReg, offset);
}
private:
const regNumber intReg1;
const regNumber simdReg1;
const regNumber simdReg2;
const regNumber addrReg;
emitter* const emitter;
};
class BlockUnrollHelper
{
public:
// The following function returns a 'size' bytes that
// 1) is greater or equal to 'byteCount' and
// 2) can be read or written by a single instruction on Arm64.
// For example, Arm64 ISA has ldrb/strb and ldrh/strh that
// load/store 1 or 2 bytes, correspondingly.
// However, there are no instructions that can load/store 3 bytes and
// the next "smallest" instruction is ldr/str that operates on 4 byte granularity.
static unsigned GetRegSizeAtLeastBytes(unsigned byteCount)
{
assert(byteCount != 0);
assert(byteCount < 16);
unsigned regSizeBytes = byteCount;
if (byteCount > 8)
{
regSizeBytes = 16;
}
else if (byteCount > 4)
{
regSizeBytes = 8;
}
else if (byteCount > 2)
{
regSizeBytes = 4;
}
return regSizeBytes;
}
};
class InitBlockUnrollHelper
{
public:
InitBlockUnrollHelper(int dstOffset, unsigned byteCount) : dstStartOffset(dstOffset), byteCount(byteCount)
{
}
int GetDstOffset() const
{
return dstStartOffset;
}
void SetDstOffset(int dstOffset)
{
dstStartOffset = dstOffset;
}
bool CanEncodeAllOffsets(int regSizeBytes) const
{
VerifyingStream instrStream;
UnrollInitBlock(instrStream, regSizeBytes);
return instrStream.CanEncodeAllStores();
}
unsigned InstructionCount(int regSizeBytes) const
{
CountingStream instrStream;
UnrollInitBlock(instrStream, regSizeBytes);
return instrStream.InstructionCount();
}
void Unroll(regNumber intReg, regNumber simdReg, regNumber addrReg, emitter* emitter) const
{
ProducingStream instrStream(intReg, simdReg, simdReg, addrReg, emitter);
UnrollInitBlock(instrStream, FP_REGSIZE_BYTES);
}
void UnrollBaseInstrs(regNumber intReg, regNumber addrReg, emitter* emitter) const
{
ProducingStreamBaseInstrs instrStream(intReg, intReg, addrReg, emitter);
UnrollInitBlock(instrStream, REGSIZE_BYTES);
}
private:
template <class InstructionStream>
void UnrollInitBlock(InstructionStream& instrStream, int initialRegSizeBytes) const
{
assert((initialRegSizeBytes == 8) || (initialRegSizeBytes == 16));
int offset = dstStartOffset;
const int endOffset = offset + byteCount;
const int storePairRegsAlignment = initialRegSizeBytes;
const int storePairRegsWritesBytes = 2 * initialRegSizeBytes;
const int offsetAligned = AlignUp((UINT)offset, storePairRegsAlignment);
const int storePairRegsInstrCount = (endOffset - offsetAligned) / storePairRegsWritesBytes;
if (storePairRegsInstrCount > 0)
{
if (offset != offsetAligned)
{
const int firstRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(offsetAligned - offset);
instrStream.StoreReg(offset, firstRegSizeBytes);
offset = offsetAligned;
}
while (endOffset - offset >= storePairRegsWritesBytes)
{
instrStream.StorePairRegs(offset, initialRegSizeBytes);
offset += storePairRegsWritesBytes;
}
if (endOffset - offset >= initialRegSizeBytes)
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
}
if (offset != endOffset)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endOffset - offset);
instrStream.StoreReg(endOffset - lastRegSizeBytes, lastRegSizeBytes);
}
}
else
{
bool isSafeToWriteBehind = false;
while (endOffset - offset >= initialRegSizeBytes)
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
assert(endOffset - offset < initialRegSizeBytes);
while (offset != endOffset)
{
if (isSafeToWriteBehind)
{
assert(endOffset - offset < initialRegSizeBytes);
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endOffset - offset);
instrStream.StoreReg(endOffset - lastRegSizeBytes, lastRegSizeBytes);
break;
}
if (offset + initialRegSizeBytes > endOffset)
{
initialRegSizeBytes = initialRegSizeBytes / 2;
}
else
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
}
}
}
int dstStartOffset;
const unsigned byteCount;
};
class CopyBlockUnrollHelper
{
public:
CopyBlockUnrollHelper(int srcOffset, int dstOffset, unsigned byteCount)
: srcStartOffset(srcOffset), dstStartOffset(dstOffset), byteCount(byteCount)
{
}
int GetSrcOffset() const
{
return srcStartOffset;
}
int GetDstOffset() const
{
return dstStartOffset;
}
void SetSrcOffset(int srcOffset)
{
srcStartOffset = srcOffset;
}
void SetDstOffset(int dstOffset)
{
dstStartOffset = dstOffset;
}
unsigned InstructionCount(int regSizeBytes) const
{
CountingStream instrStream;
UnrollCopyBlock(instrStream, instrStream, regSizeBytes);
return instrStream.InstructionCount();
}
bool CanEncodeAllOffsets(int regSizeBytes) const
{
bool canEncodeAllLoads = true;
bool canEncodeAllStores = true;
TryEncodeAllOffsets(regSizeBytes, &canEncodeAllLoads, &canEncodeAllStores);
return canEncodeAllLoads && canEncodeAllStores;
}
void TryEncodeAllOffsets(int regSizeBytes, bool* pCanEncodeAllLoads, bool* pCanEncodeAllStores) const
{
assert(pCanEncodeAllLoads != nullptr);
assert(pCanEncodeAllStores != nullptr);
VerifyingStream instrStream;
UnrollCopyBlock(instrStream, instrStream, regSizeBytes);
*pCanEncodeAllLoads = instrStream.CanEncodeAllLoads();
*pCanEncodeAllStores = instrStream.CanEncodeAllStores();
}
void Unroll(unsigned initialRegSizeBytes,
regNumber intReg,
regNumber simdReg1,
regNumber simdReg2,
regNumber srcAddrReg,
regNumber dstAddrReg,
emitter* emitter) const
{
ProducingStream loadStream(intReg, simdReg1, simdReg2, srcAddrReg, emitter);
ProducingStream storeStream(intReg, simdReg1, simdReg2, dstAddrReg, emitter);
UnrollCopyBlock(loadStream, storeStream, initialRegSizeBytes);
}
void UnrollBaseInstrs(
regNumber intReg1, regNumber intReg2, regNumber srcAddrReg, regNumber dstAddrReg, emitter* emitter) const
{
ProducingStreamBaseInstrs loadStream(intReg1, intReg2, srcAddrReg, emitter);
ProducingStreamBaseInstrs storeStream(intReg1, intReg2, dstAddrReg, emitter);
UnrollCopyBlock(loadStream, storeStream, REGSIZE_BYTES);
}
private:
template <class InstructionStream>
void UnrollCopyBlock(InstructionStream& loadStream, InstructionStream& storeStream, int initialRegSizeBytes) const
{
assert((initialRegSizeBytes == 8) || (initialRegSizeBytes == 16));
int srcOffset = srcStartOffset;
int dstOffset = dstStartOffset;
const int endSrcOffset = srcOffset + byteCount;
const int endDstOffset = dstOffset + byteCount;
const int storePairRegsAlignment = initialRegSizeBytes;
const int storePairRegsWritesBytes = 2 * initialRegSizeBytes;
const int dstOffsetAligned = AlignUp((UINT)dstOffset, storePairRegsAlignment);
if (byteCount >= (unsigned)storePairRegsWritesBytes)
{
const int dstBytesToAlign = dstOffsetAligned - dstOffset;
if (dstBytesToAlign != 0)
{
const int firstRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(dstBytesToAlign);
loadStream.LoadReg(srcOffset, firstRegSizeBytes);
storeStream.StoreReg(dstOffset, firstRegSizeBytes);
srcOffset = srcOffset + dstBytesToAlign;
dstOffset = dstOffsetAligned;
}
while (endDstOffset - dstOffset >= storePairRegsWritesBytes)
{
loadStream.LoadPairRegs(srcOffset, initialRegSizeBytes);
storeStream.StorePairRegs(dstOffset, initialRegSizeBytes);
srcOffset += storePairRegsWritesBytes;
dstOffset += storePairRegsWritesBytes;
}
if (endDstOffset - dstOffset >= initialRegSizeBytes)
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
}
if (dstOffset != endDstOffset)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endDstOffset - dstOffset);
loadStream.LoadReg(endSrcOffset - lastRegSizeBytes, lastRegSizeBytes);
storeStream.StoreReg(endDstOffset - lastRegSizeBytes, lastRegSizeBytes);
}
}
else
{
bool isSafeToWriteBehind = false;
while (endDstOffset - dstOffset >= initialRegSizeBytes)
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
assert(endSrcOffset - srcOffset < initialRegSizeBytes);
while (dstOffset != endDstOffset)
{
if (isSafeToWriteBehind)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endDstOffset - dstOffset);
loadStream.LoadReg(endSrcOffset - lastRegSizeBytes, lastRegSizeBytes);
storeStream.StoreReg(endDstOffset - lastRegSizeBytes, lastRegSizeBytes);
break;
}
if (dstOffset + initialRegSizeBytes > endDstOffset)
{
initialRegSizeBytes = initialRegSizeBytes / 2;
}
else
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
}
}
}
int srcStartOffset;
int dstStartOffset;
const unsigned byteCount;
};
#endif // TARGET_ARM64
//----------------------------------------------------------------------------------
// genCodeForInitBlkUnroll: Generate unrolled block initialization code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
assert(!dstAddr->AsAddrMode()->HasIndex());
dstAddrBaseReg = genConsumeReg(dstAddr->AsAddrMode()->Base());
dstOffset = dstAddr->AsAddrMode()->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
GenTree* src = node->Data();
if (src->OperIs(GT_INIT_VAL))
{
assert(src->isContained());
src = src->gtGetOp1();
}
if (node->IsVolatile())
{
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(dstOffset < INT32_MAX - static_cast<int>(size));
#ifdef TARGET_ARM64
InitBlockUnrollHelper helper(dstOffset, size);
regNumber srcReg;
if (!src->isContained())
{
srcReg = genConsumeReg(src);
}
else
{
assert(src->IsIntegralConst(0));
srcReg = REG_ZR;
}
regNumber dstReg = dstAddrBaseReg;
int dstRegAddrAlignment = 0;
bool isDstRegAddrAlignmentKnown = false;
if (dstLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(dstLclNum, &fpBased);
dstReg = fpBased ? REG_FPBASE : REG_SPBASE;
dstRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isDstRegAddrAlignmentKnown = true;
helper.SetDstOffset(baseAddr + dstOffset);
}
if (!helper.CanEncodeAllOffsets(REGSIZE_BYTES))
{
// If dstRegAddrAlignment is known and non-zero the following ensures that the adjusted value of dstReg is at
// 16-byte aligned boundary.
// This is done to potentially allow more cases where the JIT can use 16-byte stores.
const int dstOffsetAdjustment = helper.GetDstOffset() - dstRegAddrAlignment;
dstRegAddrAlignment = 0;
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, dstReg, dstOffsetAdjustment, tempReg);
dstReg = tempReg;
helper.SetDstOffset(helper.GetDstOffset() - dstOffsetAdjustment);
}
bool shouldUse16ByteWideInstrs = false;
// Store operations that cross a 16-byte boundary reduce bandwidth or incur additional latency.
// The following condition prevents using 16-byte stores when dstRegAddrAlignment is:
// 1) unknown (i.e. dstReg is neither FP nor SP) or
// 2) non-zero (i.e. dstRegAddr is not 16-byte aligned).
const bool hasAvailableSimdReg = isDstRegAddrAlignmentKnown && (size > FP_REGSIZE_BYTES);
const bool canUse16ByteWideInstrs =
hasAvailableSimdReg && (dstRegAddrAlignment == 0) && helper.CanEncodeAllOffsets(FP_REGSIZE_BYTES);
if (canUse16ByteWideInstrs)
{
// The JIT would need to initialize a SIMD register with "movi simdReg.16B, #initValue".
const unsigned instrCount16ByteWide = helper.InstructionCount(FP_REGSIZE_BYTES) + 1;
shouldUse16ByteWideInstrs = instrCount16ByteWide < helper.InstructionCount(REGSIZE_BYTES);
}
if (shouldUse16ByteWideInstrs)
{
const regNumber simdReg = node->GetSingleTempReg(RBM_ALLFLOAT);
const int initValue = (src->AsIntCon()->IconValue() & 0xFF);
emit->emitIns_R_I(INS_movi, EA_16BYTE, simdReg, initValue, INS_OPTS_16B);
helper.Unroll(srcReg, simdReg, dstReg, GetEmitter());
}
else
{
helper.UnrollBaseInstrs(srcReg, dstReg, GetEmitter());
}
#endif // TARGET_ARM64
#ifdef TARGET_ARM
const regNumber srcReg = genConsumeReg(src);
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
instruction storeIns;
emitAttr attr;
switch (regSize)
{
case 1:
storeIns = INS_strb;
attr = EA_4BYTE;
break;
case 2:
storeIns = INS_strh;
attr = EA_4BYTE;
break;
case 4:
storeIns = INS_str;
attr = EA_ATTR(regSize);
break;
default:
unreached();
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(storeIns, attr, srcReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_R_R_I(storeIns, attr, srcReg, dstAddrBaseReg, dstOffset);
}
}
#endif // TARGET_ARM
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkUnroll: Generate unrolled block copy code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
assert(!dstAddr->AsAddrMode()->HasIndex());
dstAddrBaseReg = genConsumeReg(dstAddr->AsAddrMode()->Base());
dstOffset = dstAddr->AsAddrMode()->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
unsigned srcLclNum = BAD_VAR_NUM;
regNumber srcAddrBaseReg = REG_NA;
int srcOffset = 0;
GenTree* src = node->Data();
assert(src->isContained());
if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
srcLclNum = src->AsLclVarCommon()->GetLclNum();
srcOffset = src->AsLclVarCommon()->GetLclOffs();
}
else
{
assert(src->OperIs(GT_IND));
GenTree* srcAddr = src->AsIndir()->Addr();
if (!srcAddr->isContained())
{
srcAddrBaseReg = genConsumeReg(srcAddr);
}
else if (srcAddr->OperIsAddrMode())
{
srcAddrBaseReg = genConsumeReg(srcAddr->AsAddrMode()->Base());
srcOffset = srcAddr->AsAddrMode()->Offset();
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
}
if (node->IsVolatile())
{
// issue a full memory barrier before a volatile CpBlk operation
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(srcOffset < INT32_MAX - static_cast<int>(size));
assert(dstOffset < INT32_MAX - static_cast<int>(size));
#ifdef TARGET_ARM64
CopyBlockUnrollHelper helper(srcOffset, dstOffset, size);
regNumber srcReg = srcAddrBaseReg;
int srcRegAddrAlignment = 0;
bool isSrcRegAddrAlignmentKnown = false;
if (srcLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(srcLclNum, &fpBased);
srcReg = fpBased ? REG_FPBASE : REG_SPBASE;
srcRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isSrcRegAddrAlignmentKnown = true;
helper.SetSrcOffset(baseAddr + srcOffset);
}
regNumber dstReg = dstAddrBaseReg;
int dstRegAddrAlignment = 0;
bool isDstRegAddrAlignmentKnown = false;
if (dstLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(dstLclNum, &fpBased);
dstReg = fpBased ? REG_FPBASE : REG_SPBASE;
dstRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isDstRegAddrAlignmentKnown = true;
helper.SetDstOffset(baseAddr + dstOffset);
}
bool canEncodeAllLoads = true;
bool canEncodeAllStores = true;
helper.TryEncodeAllOffsets(REGSIZE_BYTES, &canEncodeAllLoads, &canEncodeAllStores);
srcOffset = helper.GetSrcOffset();
dstOffset = helper.GetDstOffset();
int srcOffsetAdjustment = 0;
int dstOffsetAdjustment = 0;
if (!canEncodeAllLoads && !canEncodeAllStores)
{
srcOffsetAdjustment = srcOffset;
dstOffsetAdjustment = dstOffset;
}
else if (!canEncodeAllLoads)
{
srcOffsetAdjustment = srcOffset - dstOffset;
}
else if (!canEncodeAllStores)
{
dstOffsetAdjustment = dstOffset - srcOffset;
}
helper.SetSrcOffset(srcOffset - srcOffsetAdjustment);
helper.SetDstOffset(dstOffset - dstOffsetAdjustment);
// Quad-word load operations that are not 16-byte aligned, and store operations that cross a 16-byte boundary
// can reduce bandwidth or incur additional latency.
// Therefore, the JIT would attempt to use 16-byte variants of such instructions when both conditions are met:
// 1) the base address stored in dstReg has known alignment (modulo 16 bytes) and
// 2) the base address stored in srcReg has the same alignment as the address in dstReg.
//
// When both addresses are 16-byte aligned the CopyBlock instruction sequence looks like
//
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #dstOffset+32]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+32]
// ...
//
// When both addresses are not 16-byte aligned the CopyBlock instruction sequence starts with padding
// str instruction. For example, when both addresses are 8-byte aligned the instruction sequence looks like
//
// ldr X_intReg1, [srcReg, #srcOffset]
// str X_intReg1, [dstReg, #dstOffset]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset+8]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+8]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset+40]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+40]
// ...
// LSRA allocates a pair of SIMD registers when alignments of both source and destination base addresses are
// known and the block size is larger than a single SIMD register size (i.e. when using SIMD instructions can
// be profitable).
const bool canUse16ByteWideInstrs = isSrcRegAddrAlignmentKnown && isDstRegAddrAlignmentKnown &&
(size >= 2 * FP_REGSIZE_BYTES) && (srcRegAddrAlignment == dstRegAddrAlignment);
bool shouldUse16ByteWideInstrs = false;
if (canUse16ByteWideInstrs)
{
bool canEncodeAll16ByteWideLoads = false;
bool canEncodeAll16ByteWideStores = false;
helper.TryEncodeAllOffsets(FP_REGSIZE_BYTES, &canEncodeAll16ByteWideLoads, &canEncodeAll16ByteWideStores);
if (canEncodeAll16ByteWideLoads && canEncodeAll16ByteWideStores)
{
// No further adjustments for srcOffset and dstOffset are needed.
// The JIT should use 16-byte loads and stores when the resulting sequence has fewer number of instructions.
shouldUse16ByteWideInstrs =
(helper.InstructionCount(FP_REGSIZE_BYTES) < helper.InstructionCount(REGSIZE_BYTES));
}
else if (canEncodeAllLoads && canEncodeAllStores &&
(canEncodeAll16ByteWideLoads || canEncodeAll16ByteWideStores))
{
// In order to use 16-byte instructions the JIT needs to adjust either srcOffset or dstOffset.
// The JIT should use 16-byte loads and stores when the resulting sequence (incl. an additional add
// instruction) has fewer number of instructions.
if (helper.InstructionCount(FP_REGSIZE_BYTES) + 1 < helper.InstructionCount(REGSIZE_BYTES))
{
shouldUse16ByteWideInstrs = true;
if (!canEncodeAll16ByteWideLoads)
{
srcOffsetAdjustment = srcOffset - dstOffset;
}
else
{
dstOffsetAdjustment = dstOffset - srcOffset;
}
helper.SetSrcOffset(srcOffset - srcOffsetAdjustment);
helper.SetDstOffset(dstOffset - dstOffsetAdjustment);
}
}
}
#ifdef DEBUG
if (shouldUse16ByteWideInstrs)
{
assert(helper.CanEncodeAllOffsets(FP_REGSIZE_BYTES));
}
else
{
assert(helper.CanEncodeAllOffsets(REGSIZE_BYTES));
}
#endif
if ((srcOffsetAdjustment != 0) && (dstOffsetAdjustment != 0))
{
const regNumber tempReg1 = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg1, srcReg, srcOffsetAdjustment, tempReg1);
srcReg = tempReg1;
const regNumber tempReg2 = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg2, dstReg, dstOffsetAdjustment, tempReg2);
dstReg = tempReg2;
}
else if (srcOffsetAdjustment != 0)
{
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, srcReg, srcOffsetAdjustment, tempReg);
srcReg = tempReg;
}
else if (dstOffsetAdjustment != 0)
{
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, dstReg, dstOffsetAdjustment, tempReg);
dstReg = tempReg;
}
regNumber intReg1 = REG_NA;
regNumber intReg2 = REG_NA;
const unsigned intRegCount = node->AvailableTempRegCount(RBM_ALLINT);
if (intRegCount >= 2)
{
intReg1 = node->ExtractTempReg(RBM_ALLINT);
intReg2 = node->ExtractTempReg(RBM_ALLINT);
}
else if (intRegCount == 1)
{
intReg1 = node->GetSingleTempReg(RBM_ALLINT);
intReg2 = rsGetRsvdReg();
}
else
{
intReg1 = rsGetRsvdReg();
}
if (shouldUse16ByteWideInstrs)
{
const regNumber simdReg1 = node->ExtractTempReg(RBM_ALLFLOAT);
const regNumber simdReg2 = node->GetSingleTempReg(RBM_ALLFLOAT);
helper.Unroll(FP_REGSIZE_BYTES, intReg1, simdReg1, simdReg2, srcReg, dstReg, GetEmitter());
}
else
{
helper.UnrollBaseInstrs(intReg1, intReg2, srcReg, dstReg, GetEmitter());
}
#endif // TARGET_ARM64
#ifdef TARGET_ARM
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
instruction loadIns;
instruction storeIns;
emitAttr attr;
switch (regSize)
{
case 1:
loadIns = INS_ldrb;
storeIns = INS_strb;
attr = EA_4BYTE;
break;
case 2:
loadIns = INS_ldrh;
storeIns = INS_strh;
attr = EA_4BYTE;
break;
case 4:
loadIns = INS_ldr;
storeIns = INS_str;
attr = EA_ATTR(regSize);
break;
default:
unreached();
}
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(loadIns, attr, tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_R_I(loadIns, attr, tempReg, srcAddrBaseReg, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(storeIns, attr, tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_R_R_I(storeIns, attr, tempReg, dstAddrBaseReg, dstOffset);
}
}
#endif // TARGET_ARM
if (node->IsVolatile())
{
// issue a load barrier after a volatile CpBlk operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
}
//------------------------------------------------------------------------
// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call
//
// Arguments:
// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode)
{
// Size goes in arg2, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
if (initBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile initBlock Operation
instGen_MemoryBarrier();
}
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
//------------------------------------------------------------------------
// genCall: Produce code for a GT_CALL node
//
void CodeGen::genCall(GenTreeCall* call)
{
// Consume all the arg regs
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* argNode = use.GetNode();
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
assert(curArgTabEntry);
// GT_RELOAD/GT_COPY use the child node
argNode = argNode->gtSkipReloadOrCopy();
if (curArgTabEntry->GetRegNum() == REG_STK)
continue;
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
regNumber argReg = curArgTabEntry->GetRegNum();
for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses())
{
GenTree* putArgRegNode = use.GetNode();
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
genConsumeReg(putArgRegNode);
inst_Mov_Extend(putArgRegNode->TypeGet(), /* srcInReg */ true, argReg, putArgRegNode->GetRegNum(),
/* canSkip */ true, emitActualTypeSize(TYP_I_IMPL));
argReg = genRegArgNext(argReg);
#if defined(TARGET_ARM)
// A double register is modelled as an even-numbered single one
if (putArgRegNode->TypeGet() == TYP_DOUBLE)
{
argReg = genRegArgNext(argReg);
}
#endif // TARGET_ARM
}
}
else if (curArgTabEntry->IsSplit())
{
assert(compFeatureArgSplit());
assert(curArgTabEntry->numRegs >= 1);
genConsumeArgSplitStruct(argNode->AsPutArgSplit());
for (unsigned idx = 0; idx < curArgTabEntry->numRegs; idx++)
{
regNumber argReg = (regNumber)((unsigned)curArgTabEntry->GetRegNum() + idx);
regNumber allocReg = argNode->AsPutArgSplit()->GetRegNumByIdx(idx);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ true, argReg, allocReg, /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
}
else
{
regNumber argReg = curArgTabEntry->GetRegNum();
genConsumeReg(argNode);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ true, argReg, argNode->GetRegNum(), /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
}
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
#if defined(TARGET_ARM)
const regNumber tmpReg = call->ExtractTempReg();
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
#elif defined(TARGET_ARM64)
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
#endif // TARGET*
}
// If fast tail call, then we are done here, we just have to load the call
// target into the right registers. We ensure in RA that target is loaded
// into a volatile register that won't be restored by epilog sequence.
if (call->IsFastTailCall())
{
GenTree* target = getCallTarget(call, nullptr);
if (target != nullptr)
{
// Indirect fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
genConsumeReg(target);
}
#ifdef FEATURE_READYTORUN
else if (call->IsR2ROrVirtualStubRelativeIndir())
{
assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert(call->gtControlExpr == nullptr);
regNumber tmpReg = call->GetSingleTempReg();
// Register where we save call address in should not be overridden by epilog.
assert((tmpReg & (RBM_INT_CALLEE_TRASH & ~RBM_LR)) == tmpReg);
regNumber callAddrReg =
call->IsVirtualStubRelativeIndir() ? compiler->virtualStubParamInfo->GetReg() : REG_R2R_INDIRECT_PARAM;
GetEmitter()->emitIns_R_R(ins_Load(TYP_I_IMPL), emitActualTypeSize(TYP_I_IMPL), tmpReg, callAddrReg);
// We will use this again when emitting the jump in genCallInstruction in the epilog
call->gtRsvdRegs |= genRegMask(tmpReg);
}
#endif
return;
}
// For a pinvoke to unmanaged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
if (compiler->killGCRefs(call))
{
genDefineTempLabel(genCreateTempLabel());
}
genCallInstruction(call);
// for pinvoke/intrinsic/tailcalls we may have needed to get the address of
// a label. In case it is indirect with CFG enabled make sure we do not get
// the address after the validation but only after the actual call that
// comes after.
if (genPendingCallLabel && !call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
genDefineInlineTempLabel(genPendingCallLabel);
genPendingCallLabel = nullptr;
}
#ifdef DEBUG
// We should not have GC pointers in killed registers live around the call.
// GC info for arg registers were cleared when consuming arg nodes above
// and LSRA should ensure it for other trashed registers.
regMaskTP killMask = RBM_CALLEE_TRASH;
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
assert((gcInfo.gcRegGCrefSetCur & killMask) == 0);
assert((gcInfo.gcRegByrefSetCur & killMask) == 0);
#endif
var_types returnType = call->TypeGet();
if (returnType != TYP_VOID)
{
regNumber returnReg;
if (call->HasMultiRegRetVal())
{
const ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
assert(pRetTypeDesc != nullptr);
unsigned regCount = pRetTypeDesc->GetReturnRegCount();
// If regs allocated to call node are different from ABI return
// regs in which the call has returned its result, move the result
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
var_types regType = pRetTypeDesc->GetReturnRegType(i);
returnReg = pRetTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
inst_Mov(regType, allocatedReg, returnReg, /* canSkip */ true);
}
}
else
{
#ifdef TARGET_ARM
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
// The CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
// TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
returnReg = REG_PINVOKE_TCB;
}
else if (compiler->opts.compUseSoftFP)
{
returnReg = REG_INTRET;
}
else
#endif // TARGET_ARM
if (varTypeUsesFloatArgReg(returnType))
{
returnReg = REG_FLOATRET;
}
else
{
returnReg = REG_INTRET;
}
if (call->GetRegNum() != returnReg)
{
#ifdef TARGET_ARM
if (compiler->opts.compUseSoftFP && returnType == TYP_DOUBLE)
{
inst_RV_RV_RV(INS_vmov_i2d, call->GetRegNum(), returnReg, genRegArgNext(returnReg), EA_8BYTE);
}
else if (compiler->opts.compUseSoftFP && returnType == TYP_FLOAT)
{
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ false);
}
else
#endif
{
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ false);
}
}
}
genProduceReg(call);
}
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
}
//------------------------------------------------------------------------
// genCallInstruction - Generate instructions necessary to transfer control to the call.
//
// Arguments:
// call - the GT_CALL node
//
// Remaks:
// For tailcalls this function will generate a jump.
//
void CodeGen::genCallInstruction(GenTreeCall* call)
{
// Determine return value size(s).
const ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
emitAttr retSize = EA_PTRSIZE;
emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));
}
else
{
assert(call->gtType != TYP_STRUCT);
if (call->gtType == TYP_REF)
{
retSize = EA_GCREF;
}
else if (call->gtType == TYP_BYREF)
{
retSize = EA_BYREF;
}
}
DebugInfo di;
// We need to propagate the debug information to the call instruction, so we can emit
// an IL to native mapping record for the call, to support managed return value debugging.
// We don't want tail call helper calls that were converted from normal calls to get a record,
// so we skip this hash table lookup logic in that case.
if (compiler->opts.compDbgInfo && compiler->genCallSite2DebugInfoMap != nullptr && !call->IsTailCall())
{
(void)compiler->genCallSite2DebugInfoMap->Lookup(call, &di);
}
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
// native call sites with the signatures they were generated from.
if (call->gtCallType != CT_HELPER)
{
sigInfo = call->callSig;
}
if (call->IsFastTailCall())
{
regMaskTP trashedByEpilog = RBM_CALLEE_SAVED;
// The epilog may use and trash REG_GSCOOKIE_TMP_0/1. Make sure we have no
// non-standard args that may be trash if this is a tailcall.
if (compiler->getNeedsGSSecurityCookie())
{
trashedByEpilog |= genRegMask(REG_GSCOOKIE_TMP_0);
trashedByEpilog |= genRegMask(REG_GSCOOKIE_TMP_1);
}
for (unsigned i = 0; i < call->fgArgInfo->ArgCount(); i++)
{
fgArgTabEntry* entry = call->fgArgInfo->GetArgEntry(i);
for (unsigned j = 0; j < entry->numRegs; j++)
{
regNumber reg = entry->GetRegNum(j);
if ((trashedByEpilog & genRegMask(reg)) != 0)
{
JITDUMP("Tail call node:\n");
DISPTREE(call);
JITDUMP("Register used: %s\n", getRegName(reg));
assert(!"Argument to tailcall may be trashed by epilog");
}
}
}
}
#endif // DEBUG
CORINFO_METHOD_HANDLE methHnd;
GenTree* target = getCallTarget(call, &methHnd);
if (target != nullptr)
{
// A call target can not be a contained indirection
assert(!target->isContainedIndir());
// For fast tailcall we have already consumed the target. We ensure in
// RA that the target was allocated into a volatile register that will
// not be messed up by epilog sequence.
if (!call->IsFastTailCall())
{
genConsumeReg(target);
}
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
//
assert(genIsValidIntReg(target->GetRegNum()));
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr, // addr
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
target->GetRegNum(),
call->IsFastTailCall());
// clang-format on
}
else
{
// If we have no target and this is a call with indirection cell then
// we do an optimization where we load the call address directly from
// the indirection cell instead of duplicating the tree. In BuildCall
// we ensure that get an extra register for the purpose. Note that for
// CFG the call might have changed to
// CORINFO_HELP_DISPATCH_INDIRECT_CALL in which case we still have the
// indirection cell but we should not try to optimize.
regNumber callThroughIndirReg = REG_NA;
if (!call->IsHelperCall(compiler, CORINFO_HELP_DISPATCH_INDIRECT_CALL))
{
callThroughIndirReg = getCallIndirectionCellReg(call);
}
if (callThroughIndirReg != REG_NA)
{
assert(call->IsR2ROrVirtualStubRelativeIndir());
regNumber targetAddrReg = call->GetSingleTempReg();
// For fast tailcalls we have already loaded the call target when processing the call node.
if (!call->IsFastTailCall())
{
GetEmitter()->emitIns_R_R(ins_Load(TYP_I_IMPL), emitActualTypeSize(TYP_I_IMPL), targetAddrReg,
callThroughIndirReg);
}
else
{
// Register where we save call address in should not be overridden by epilog.
assert((targetAddrReg & (RBM_INT_CALLEE_TRASH & ~RBM_LR)) == targetAddrReg);
}
// We have now generated code loading the target address from the indirection cell into `targetAddrReg`.
// We just need to emit "bl targetAddrReg" in this case.
//
assert(genIsValidIntReg(targetAddrReg));
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr, // addr
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
targetAddrReg,
call->IsFastTailCall());
// clang-format on
}
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(call->gtCallType == CT_HELPER || call->gtCallType == CT_USER_FUNC);
void* addr = nullptr;
#ifdef FEATURE_READYTORUN
if (call->gtEntryPoint.addr != NULL)
{
assert(call->gtEntryPoint.accessType == IAT_VALUE);
addr = call->gtEntryPoint.addr;
}
else
#endif // FEATURE_READYTORUN
if (call->gtCallType == CT_HELPER)
{
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
void* pAddr = nullptr;
addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
assert(pAddr == nullptr);
}
else
{
// Direct call to a non-virtual user function.
addr = call->gtDirectCallAddress;
}
assert(addr != nullptr);
// Non-virtual direct call to known addresses
#ifdef TARGET_ARM
if (!validImmForBL((ssize_t)addr))
{
regNumber tmpReg = call->GetSingleTempReg();
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, tmpReg, (ssize_t)addr);
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
NULL,
retSize,
di,
tmpReg,
call->IsFastTailCall());
// clang-format on
}
else
#endif // TARGET_ARM
{
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
}
}
}
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
void CodeGen::genJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
LclVarDsc* varDsc;
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
// We are not strictly required to spill reg args that are not in the desired reg for a jmp call
// But that would require us to deal with circularity while moving values around. Spilling
// to stack makes the implementation simple, which is not a bad trade off given Jmp calls
// are not frequent.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg && (varDsc->GetRegNum() != REG_STK))
{
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
// If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->GetRegNum() == varDsc->GetArgReg()))
continue;
}
else if (varDsc->GetRegNum() == REG_STK)
{
// Skip args which are currently living in stack.
continue;
}
// If we came here it means either a reg argument not in the right register or
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->GetRegNum() != REG_STK);
assert(varDsc->IsEnregisterableLcl());
var_types storeType = varDsc->GetActualRegisterType();
emitAttr storeSize = emitActualTypeSize(storeType);
#ifdef TARGET_ARM
if (varDsc->TypeGet() == TYP_LONG)
{
// long - at least the low half must be enregistered
GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->GetRegNum(), varNum, 0);
// Is the upper half also enregistered?
if (varDsc->GetOtherReg() != REG_STK)
{
GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->GetOtherReg(), varNum, sizeof(int));
}
}
else
#endif // TARGET_ARM
{
GetEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->GetRegNum(), varNum, 0);
}
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be expecting it.
// Therefore manually update life of varDsc->GetRegNum().
regMaskTP tempMask = genRegMask(varDsc->GetRegNum());
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varNum);
}
}
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif
// Next move any un-enregistered register arguments back to their register.
regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
if (!varDsc->lvIsRegArg)
continue;
// Register argument
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
// Is register argument already in the right register?
// If not load it from its stack location.
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
regNumber argRegNext = REG_NA;
#ifdef TARGET_ARM64
if (varDsc->GetRegNum() != argReg)
{
var_types loadType = TYP_UNDEF;
if (varDsc->lvIsHfaRegArg())
{
// Note that for HFA, the argument is currently marked address exposed so lvRegNum will always be
// REG_STK. We home the incoming HFA argument registers in the prolog. Then we'll load them back
// here, whether they are already in the correct registers or not. This is such a corner case that
// it is not worth optimizing it.
assert(!compiler->info.compIsVarArgs);
loadType = varDsc->GetHfaType();
regNumber fieldReg = argReg;
emitAttr loadSize = emitActualTypeSize(loadType);
unsigned cSlots = varDsc->lvHfaSlots();
for (unsigned ofs = 0, cSlot = 0; cSlot < cSlots; cSlot++, ofs += (unsigned)loadSize)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
assert(genIsValidFloatReg(fieldReg)); // No GC register tracking for floating point registers.
fieldReg = regNextOfType(fieldReg, loadType);
}
}
else
{
if (varTypeIsStruct(varDsc))
{
// Must be <= 16 bytes or else it wouldn't be passed in registers, except for HFA,
// which can be bigger (and is handled above).
noway_assert(EA_SIZE_IN_BYTES(varDsc->lvSize()) <= 16);
loadType = varDsc->GetLayout()->GetGCPtrType(0);
}
else
{
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
}
emitAttr loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be
// expecting it. Therefore manually update life of argReg. Note that GT_JMP marks the end of
// the basic block and after which reg life and gc info will be recomputed for the new block
// in genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
// Restore the second register.
argRegNext = genRegArgNext(argReg);
loadType = varDsc->GetLayout()->GetGCPtrType(1);
loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE);
regSet.AddMaskVars(genRegMask(argRegNext));
gcInfo.gcMarkRegPtrVal(argRegNext, loadType);
}
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
if (compiler->info.compIsVarArgs)
{
// In case of a jmp call to a vararg method ensure only integer registers are passed.
assert((genRegMask(argReg) & (RBM_ARG_REGS | RBM_ARG_RET_BUFF)) != RBM_NONE);
assert(!varDsc->lvIsHfaRegArg());
fixedIntArgMask |= genRegMask(argReg);
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
assert(argRegNext != REG_NA);
fixedIntArgMask |= genRegMask(argRegNext);
}
if (argReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
}
}
#else // !TARGET_ARM64
bool twoParts = false;
var_types loadType = TYP_UNDEF;
if (varDsc->TypeGet() == TYP_LONG)
{
twoParts = true;
}
else if (varDsc->TypeGet() == TYP_DOUBLE)
{
if (compiler->info.compIsVarArgs || compiler->opts.compUseSoftFP)
{
twoParts = true;
}
}
if (twoParts)
{
argRegNext = genRegArgNext(argReg);
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, varNum, 0);
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argRegNext, varNum, REGSIZE_BYTES);
}
if (compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(argReg);
fixedIntArgMask |= genRegMask(argRegNext);
}
}
else if (varDsc->lvIsHfaRegArg())
{
loadType = varDsc->GetHfaType();
regNumber fieldReg = argReg;
emitAttr loadSize = emitActualTypeSize(loadType);
unsigned maxSize = min(varDsc->lvSize(), (LAST_FP_ARGREG + 1 - argReg) * REGSIZE_BYTES);
for (unsigned ofs = 0; ofs < maxSize; ofs += (unsigned)loadSize)
{
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
}
assert(genIsValidFloatReg(fieldReg)); // we don't use register tracking for FP
fieldReg = regNextOfType(fieldReg, loadType);
}
}
else if (varTypeIsStruct(varDsc))
{
regNumber slotReg = argReg;
unsigned maxSize = min(varDsc->lvSize(), (REG_ARG_LAST + 1 - argReg) * REGSIZE_BYTES);
for (unsigned ofs = 0; ofs < maxSize; ofs += REGSIZE_BYTES)
{
unsigned idx = ofs / REGSIZE_BYTES;
loadType = varDsc->GetLayout()->GetGCPtrType(idx);
if (varDsc->GetRegNum() != argReg)
{
emitAttr loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, slotReg, varNum, ofs);
}
regSet.AddMaskVars(genRegMask(slotReg));
gcInfo.gcMarkRegPtrVal(slotReg, loadType);
if (genIsValidIntReg(slotReg) && compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(slotReg);
}
slotReg = genRegArgNext(slotReg);
}
}
else
{
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
}
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (genIsValidIntReg(argReg) && compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(argReg);
}
}
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
#endif // !TARGET_ARM64
}
// Jmp call to a vararg method - if the method has fewer than fixed arguments that can be max size of reg,
// load the remaining integer arg registers from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
// of caller passing all integer arg regs that can be max size of reg.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
// remaining arg registers from shadow stack slots as non-gc interruptible.
if (fixedIntArgMask != RBM_NONE)
{
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, firstArgVarNum, argOffset);
}
argOffset += REGSIZE_BYTES;
}
GetEmitter()->emitEnableGC();
}
}
}
//------------------------------------------------------------------------
// genIntCastOverflowCheck: Generate overflow checking code for an integer cast.
//
// Arguments:
// cast - The GT_CAST node
// desc - The cast description
// reg - The register containing the value to check
//
void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg)
{
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_ATTR(desc.CheckSrcSize()), reg, 0);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::CHECK_UINT_RANGE:
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 32 bits are zero.
GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF00000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
// We need to check if the value is not greater than 0x7FFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 33 bits are zero.
GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF80000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
{
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MAX);
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_gt, SCK_OVERFLOW);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MIN);
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
break;
#endif
default:
{
assert(desc.CheckKind() == GenIntCastDesc::CHECK_SMALL_INT_RANGE);
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
// Values greater than 255 cannot be encoded in the immediate operand of CMP.
// Replace (x > max) with (x >= max + 1) where max + 1 (a power of 2) can be
// encoded. We could do this for all max values but on ARM32 "cmp r0, 255"
// is better than "cmp r0, 256" because it has a shorter encoding.
if (castMaxValue > 255)
{
assert((castMaxValue == 32767) || (castMaxValue == 65535));
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue + 1);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hs : EJ_ge, SCK_OVERFLOW);
}
else
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hi : EJ_gt, SCK_OVERFLOW);
}
if (castMinValue != 0)
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
}
break;
}
}
//------------------------------------------------------------------------
// genIntToIntCast: Generate code for an integer cast, with or without overflow check.
//
// Arguments:
// cast - The GT_CAST node
//
// Assumptions:
// The cast node is not a contained node and must have an assigned register.
// Neither the source nor target type can be a floating point type.
//
// TODO-ARM64-CQ: Allow castOp to be a contained node without an assigned register.
//
void CodeGen::genIntToIntCast(GenTreeCast* cast)
{
genConsumeRegs(cast->gtGetOp1());
const regNumber srcReg = cast->gtGetOp1()->GetRegNum();
const regNumber dstReg = cast->GetRegNum();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
GenIntCastDesc desc(cast);
if (desc.CheckKind() != GenIntCastDesc::CHECK_NONE)
{
genIntCastOverflowCheck(cast, desc, srcReg);
}
if ((desc.ExtendKind() != GenIntCastDesc::COPY) || (srcReg != dstReg))
{
instruction ins;
unsigned insSize;
switch (desc.ExtendKind())
{
case GenIntCastDesc::ZERO_EXTEND_SMALL_INT:
ins = (desc.ExtendSrcSize() == 1) ? INS_uxtb : INS_uxth;
insSize = 4;
break;
case GenIntCastDesc::SIGN_EXTEND_SMALL_INT:
ins = (desc.ExtendSrcSize() == 1) ? INS_sxtb : INS_sxth;
insSize = 4;
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::ZERO_EXTEND_INT:
ins = INS_mov;
insSize = 4;
break;
case GenIntCastDesc::SIGN_EXTEND_INT:
ins = INS_sxtw;
insSize = 8;
break;
#endif
default:
assert(desc.ExtendKind() == GenIntCastDesc::COPY);
ins = INS_mov;
insSize = desc.ExtendSrcSize();
break;
}
GetEmitter()->emitIns_Mov(ins, EA_ATTR(insSize), dstReg, srcReg, /* canSkip */ false);
}
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genFloatToFloatCast: Generate code for a cast between float and double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// The cast is between float and double.
//
void CodeGen::genFloatToFloatCast(GenTree* treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidFloatReg(op1->GetRegNum())); // Must be a valid float reg.
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
// treeNode must be a reg
assert(!treeNode->isContained());
#if defined(TARGET_ARM)
if (srcType != dstType)
{
instruction insVcvt = (srcType == TYP_FLOAT) ? INS_vcvt_f2d // convert Float to Double
: INS_vcvt_d2f; // convert Double to Float
GetEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum());
}
else
{
GetEmitter()->emitIns_Mov(INS_vmov, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
/* canSkip */ true);
}
#elif defined(TARGET_ARM64)
if (srcType != dstType)
{
insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
: INS_OPTS_D_TO_S; // convert Double to Single
GetEmitter()->emitIns_R_R(INS_fcvt, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
cvtOption);
}
else
{
// If double to double cast or float to float cast. Emit a move instruction.
GetEmitter()->emitIns_Mov(INS_mov, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
/* canSkip */ true);
}
#endif // TARGET*
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCreateAndStoreGCInfo: Create and record GC Info for the function.
//
void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder != nullptr);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
// We keep the call count for the second call to gcMakeRegPtrTable() below.
unsigned callCnt = 0;
// First we figure out the encoder ID's for the stack slots and registers.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
// Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
gcInfoEncoder->FinalizeSlotIds();
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
#ifdef TARGET_ARM64
if (compiler->opts.compDbgEnC)
{
// what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
// which is:
// -return address
// -saved off RBP
// -saved 'this' pointer and bool for synchronized methods
// 4 slots for RBP + return address + RSI + RDI
int preservedAreaSize = 4 * REGSIZE_BYTES;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
preservedAreaSize += REGSIZE_BYTES;
preservedAreaSize += 1; // bool for synchronized methods
}
// Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
// frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
}
#endif // TARGET_ARM64
if (compiler->opts.IsReversePInvoke())
{
unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM);
const LclVarDsc* reversePInvokeFrameVar = compiler->lvaGetDesc(reversePInvokeFrameVarNumber);
gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar->GetStackOffset());
}
gcInfoEncoder->Build();
// GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
// let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
// clang-format off
const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32]
{
{ }, // NONE
{ }, // 1
{ EJ_lt }, // SLT
{ EJ_le }, // SLE
{ EJ_ge }, // SGE
{ EJ_gt }, // SGT
{ EJ_mi }, // S
{ EJ_pl }, // NS
{ EJ_eq }, // EQ
{ EJ_ne }, // NE
{ EJ_lo }, // ULT
{ EJ_ls }, // ULE
{ EJ_hs }, // UGE
{ EJ_hi }, // UGT
{ EJ_hs }, // C
{ EJ_lo }, // NC
{ EJ_eq }, // FEQ
{ EJ_gt, GT_AND, EJ_lo }, // FNE
{ EJ_lo }, // FLT
{ EJ_ls }, // FLE
{ EJ_ge }, // FGE
{ EJ_gt }, // FGT
{ EJ_vs }, // O
{ EJ_vc }, // NO
{ EJ_eq, GT_OR, EJ_vs }, // FEQU
{ EJ_ne }, // FNEU
{ EJ_lt }, // FLTU
{ EJ_le }, // FLEU
{ EJ_hs }, // FGEU
{ EJ_hi }, // FGTU
{ }, // P
{ }, // NP
};
// clang-format on
//------------------------------------------------------------------------
// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition.
//
// Arguments:
// condition - The condition
// type - The type of the value to be produced
// dstReg - The destination register to be set to 1 or 0
//
void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg)
{
assert(varTypeIsIntegral(type));
assert(genIsValidIntReg(dstReg));
#ifdef TARGET_ARM64
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
inst_SET(desc.jumpKind1, dstReg);
if (desc.oper != GT_NONE)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_SET(desc.jumpKind2, dstReg);
genDefineTempLabel(labelNext);
}
#else
// Emit code like that:
// ...
// bgt True
// movs rD, #0
// b Next
// True:
// movs rD, #1
// Next:
// ...
BasicBlock* labelTrue = genCreateTempLabel();
inst_JCC(condition, labelTrue);
GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 0);
BasicBlock* labelNext = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_b, labelNext);
genDefineTempLabel(labelTrue);
GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 1);
genDefineTempLabel(labelNext);
#endif
}
//------------------------------------------------------------------------
// genCodeForStoreBlk: Produce code for a GT_STORE_OBJ/GT_STORE_DYN_BLK/GT_STORE_BLK node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp)
{
assert(blkOp->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
if (blkOp->OperIs(GT_STORE_OBJ))
{
assert(!blkOp->gtBlkOpGcUnsafe);
assert(blkOp->OperIsCopyBlkOp());
assert(blkOp->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(blkOp->AsObj());
return;
}
bool isCopyBlk = blkOp->OperIsCopyBlkOp();
switch (blkOp->gtBlkOpKind)
{
case GenTreeBlk::BlkOpKindHelper:
assert(!blkOp->gtBlkOpGcUnsafe);
if (isCopyBlk)
{
genCodeForCpBlkHelper(blkOp);
}
else
{
genCodeForInitBlkHelper(blkOp);
}
break;
case GenTreeBlk::BlkOpKindUnroll:
if (isCopyBlk)
{
if (blkOp->gtBlkOpGcUnsafe)
{
GetEmitter()->emitDisableGC();
}
genCodeForCpBlkUnroll(blkOp);
if (blkOp->gtBlkOpGcUnsafe)
{
GetEmitter()->emitEnableGC();
}
}
else
{
assert(!blkOp->gtBlkOpGcUnsafe);
genCodeForInitBlkUnroll(blkOp);
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genScaledAdd: A helper for genLeaInstruction.
//
void CodeGen::genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale)
{
emitter* emit = GetEmitter();
if (scale == 0)
{
// target = base + index
GetEmitter()->emitIns_R_R_R(INS_add, attr, targetReg, baseReg, indexReg);
}
else
{
// target = base + index<<scale
#if defined(TARGET_ARM)
emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
#elif defined(TARGET_ARM64)
emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_OPTS_LSL);
#endif
}
}
//------------------------------------------------------------------------
// genCodeForMulLong: Generates code for int*int->long multiplication.
//
// Arguments:
// mul - the GT_MUL_LONG node
//
// Return Value:
// None.
//
void CodeGen::genCodeForMulLong(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL_LONG));
genConsumeOperands(mul);
regNumber srcReg1 = mul->gtGetOp1()->GetRegNum();
regNumber srcReg2 = mul->gtGetOp2()->GetRegNum();
instruction ins = mul->IsUnsigned() ? INS_umull : INS_smull;
#ifdef TARGET_ARM
GetEmitter()->emitIns_R_R_R_R(ins, EA_4BYTE, mul->GetRegNum(), mul->AsMultiRegOp()->gtOtherReg, srcReg1, srcReg2);
#else
GetEmitter()->emitIns_R_R_R(ins, EA_4BYTE, mul->GetRegNum(), srcReg1, srcReg2);
#endif
genProduceReg(mul);
}
//------------------------------------------------------------------------
// genLeaInstruction: Produce code for a GT_LEA node.
//
// Arguments:
// lea - the node
//
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
genConsumeOperands(lea);
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(lea);
int offset = lea->Offset();
// In ARM we can only load addresses of the form:
//
// [Base + index*scale]
// [Base + Offset]
// [Literal] (PC-Relative)
//
// So for the case of a LEA node of the form [Base + Index*Scale + Offset] we will generate:
// destReg = baseReg + indexReg * scale;
// destReg = destReg + offset;
//
// TODO-ARM64-CQ: The purpose of the GT_LEA node is to directly reflect a single target architecture
// addressing mode instruction. Currently we're 'cheating' by producing one or more
// instructions to generate the addressing mode so we need to modify lowering to
// produce LEAs that are a 1:1 relationship to the ARM64 architecture.
if (lea->Base() && lea->Index())
{
GenTree* memBase = lea->Base();
GenTree* index = lea->Index();
DWORD scale;
assert(isPow2(lea->gtScale));
BitScanForward(&scale, lea->gtScale);
assert(scale <= 4);
if (offset != 0)
{
regNumber tmpReg = lea->GetSingleTempReg();
// When generating fully interruptible code we have to use the "large offset" sequence
// when calculating a EA_BYREF as we can't report a byref that points outside of the object
//
bool useLargeOffsetSeq = compiler->GetInterruptible() && (size == EA_BYREF);
if (!useLargeOffsetSeq && emitter::emitIns_valid_imm_for_add(offset))
{
// Generate code to set tmpReg = base + index*scale
genScaledAdd(size, tmpReg, memBase->GetRegNum(), index->GetRegNum(), scale);
// Then compute target reg from [tmpReg + offset]
emit->emitIns_R_R_I(INS_add, size, lea->GetRegNum(), tmpReg, offset);
}
else // large offset sequence
{
noway_assert(tmpReg != index->GetRegNum());
noway_assert(tmpReg != memBase->GetRegNum());
// First load/store tmpReg with the offset constant
// rTmp = imm
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then add the scaled index register
// rTmp = rTmp + index*scale
genScaledAdd(EA_PTRSIZE, tmpReg, tmpReg, index->GetRegNum(), scale);
// Then compute target reg from [base + tmpReg ]
// rDst = base + rTmp
emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg);
}
}
else
{
// Then compute target reg from [base + index*scale]
genScaledAdd(size, lea->GetRegNum(), memBase->GetRegNum(), index->GetRegNum(), scale);
}
}
else if (lea->Base())
{
GenTree* memBase = lea->Base();
if (emitter::emitIns_valid_imm_for_add(offset))
{
if (offset != 0)
{
// Then compute target reg from [memBase + offset]
emit->emitIns_R_R_I(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), offset);
}
else // offset is zero
{
emit->emitIns_Mov(INS_mov, size, lea->GetRegNum(), memBase->GetRegNum(), /* canSkip */ true);
}
}
else
{
// We require a tmpReg to hold the offset
regNumber tmpReg = lea->GetSingleTempReg();
// First load tmpReg with the large offset constant
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then compute target reg from [memBase + tmpReg]
emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg);
}
}
else if (lea->Index())
{
// If we encounter a GT_LEA node without a base it means it came out
// when attempting to optimize an arbitrary arithmetic expression during lower.
// This is currently disabled in ARM64 since we need to adjust lower to account
// for the simpler instructions ARM64 supports.
// TODO-ARM64-CQ: Fix this and let LEA optimize arithmetic trees too.
assert(!"We shouldn't see a baseless address computation during CodeGen for ARM64");
}
genProduceReg(lea);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDSplitReturn: Generates code for returning a fixed-size SIMD type that lives
// in a single register, but is returned in multiple registers.
//
// Arguments:
// src - The source of the return
// retTypeDesc - The return type descriptor.
//
void CodeGen::genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc)
{
assert(varTypeIsSIMD(src));
assert(src->isUsedFromReg());
regNumber srcReg = src->GetRegNum();
// Treat src register as a homogenous vector with element size equal to the reg size
// Insert pieces in order
unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc->GetReturnRegType(i);
regNumber reg = retTypeDesc->GetABIReturnReg(i);
if (varTypeIsFloating(type))
{
// If the register piece is to be passed in a floating point register
// Use a vector mov element instruction
// reg is not a vector, so it is in the first element reg[0]
// mov reg[0], src[i]
// This effectively moves from `src[i]` to `reg[0]`, upper bits of reg remain unchanged
// For the case where src == reg, since we are only writing reg[0], as long as we iterate
// so that src[0] is consumed before writing reg[0], we do not need a temporary.
GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), reg, srcReg, 0, i);
}
else
{
// If the register piece is to be passed in an integer register
// Use a vector mov to general purpose register instruction
// mov reg, src[i]
// This effectively moves from `src[i]` to `reg`
GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), reg, srcReg, i);
}
}
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genPushCalleeSavedRegisters: Push any callee-saved registers we have used.
//
// Arguments (arm64):
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
#if defined(TARGET_ARM64)
void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed)
#else
void CodeGen::genPushCalleeSavedRegisters()
#endif
{
assert(compiler->compGeneratingProlog);
#ifdef TARGET_ARM64
// Probe large frames now, if necessary, since genPushCalleeSavedRegisters() will allocate the frame. Note that
// for arm64, genAllocLclFrame only probes the frame; it does not actually allocate it (it does not change SP).
// For arm64, we are probing the frame before the callee-saved registers are saved. The 'initReg' might have
// been calculated to be one of the callee-saved registers (say, if all the integer argument registers are
// in use, and perhaps with other conditions being satisfied). This is ok in other cases, after the callee-saved
// registers have been saved. So instead of letting genAllocLclFrame use initReg as a temporary register,
// always use REG_SCRATCH. We don't care if it trashes it, so ignore the initRegZeroed output argument.
bool ignoreInitRegZeroed = false;
genAllocLclFrame(compiler->compLclFrameSize, REG_SCRATCH, &ignoreInitRegZeroed,
intRegState.rsCalleeRegArgMaskLiveIn);
#endif
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// On ARM we push the FP (frame-pointer) here along with all other callee saved registers
if (isFramePointerUsed())
rsPushRegs |= RBM_FPBASE;
//
// It may be possible to skip pushing/popping lr for leaf methods. However, such optimization would require
// changes in GC suspension architecture.
//
// We would need to guarantee that a tight loop calling a virtual leaf method can be suspended for GC. Today, we
// generate partially interruptible code for both the method that contains the tight loop with the call and the leaf
// method. GC suspension depends on return address hijacking in this case. Return address hijacking depends
// on the return address to be saved on the stack. If we skipped pushing/popping lr, the return address would never
// be saved on the stack and the GC suspension would time out.
//
// So if we wanted to skip pushing pushing/popping lr for leaf frames, we would also need to do one of
// the following to make GC suspension work in the above scenario:
// - Make return address hijacking work even when lr is not saved on the stack.
// - Generate fully interruptible code for loops that contains calls
// - Generate fully interruptible code for leaf methods
//
// Given the limited benefit from this optimization (<10k for CoreLib NGen image), the extra complexity
// is not worth it.
//
rsPushRegs |= RBM_LR; // We must save the return address (in the LR register)
regSet.rsMaskCalleeSaved = rsPushRegs;
#ifdef DEBUG
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
}
#endif // DEBUG
#if defined(TARGET_ARM)
regMaskTP maskPushRegsFloat = rsPushRegs & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = rsPushRegs & ~maskPushRegsFloat;
maskPushRegsInt |= genStackAllocRegisterMask(compiler->compLclFrameSize, maskPushRegsFloat);
assert(FitsIn<int>(maskPushRegsInt));
inst_IV(INS_push, (int)maskPushRegsInt);
compiler->unwindPushMaskInt(maskPushRegsInt);
if (maskPushRegsFloat != 0)
{
genPushFltRegs(maskPushRegsFloat);
compiler->unwindPushMaskFloat(maskPushRegsFloat);
}
#elif defined(TARGET_ARM64)
// See the document "ARM64 JIT Frame Layout" and/or "ARM64 Exception Data" for more details or requirements and
// options. Case numbers in comments here refer to this document. See also Compiler::lvaAssignFrameOffsets()
// for pictures of the general frame layouts, and CodeGen::genFuncletProlog() implementations (per architecture)
// for pictures of the funclet frame layouts.
//
// For most frames, generate, e.g.:
// stp fp, lr, [sp,-0x80]! // predecrement SP with full frame size, and store FP/LR pair.
// stp r19, r20, [sp, 0x60] // store at positive offset from SP established above, into callee-saved area
// // at top of frame (highest addresses).
// stp r21, r22, [sp, 0x70]
//
// Notes:
// 1. We don't always need to save FP. If FP isn't saved, then LR is saved with the other callee-saved registers
// at the top of the frame.
// 2. If we save FP, then the first store is FP, LR.
// 3. General-purpose registers are 8 bytes, floating-point registers are 16 bytes, but FP/SIMD registers only
// preserve their lower 8 bytes, by calling convention.
// 4. For frames with varargs, we spill the integer register arguments to the stack, so all the arguments are
// consecutive, and at the top of the frame.
// 5. We allocate the frame here; no further changes to SP are allowed (except in the body, for localloc).
//
// For functions with GS and localloc, we change the frame so the frame pointer and LR are saved at the top
// of the frame, just under the varargs registers (if any). Note that the funclet frames must follow the same
// rule, and both main frame and funclet frames (if any) must put PSPSym in the same offset from Caller-SP.
// Since this frame type is relatively rare, we force using it via stress modes, for additional coverage.
//
// The frames look like the following (simplified to only include components that matter for establishing the
// frames). See also Compiler::lvaAssignFrameOffsets().
//
// Frames with FP, LR saved at bottom of frame (above outgoing argument space):
//
// | |
// |-----------------------|
// | incoming arguments |
// +=======================+ <---- Caller's SP
// | Varargs regs space | // Only for varargs functions; 64 bytes
// |-----------------------|
// |Callee saved registers | // not including FP/LR; multiple of 8 bytes
// |-----------------------|
// | PSP slot | // 8 bytes (omitted in CoreRT ABI)
// |-----------------------|
// | locals, temps, etc. |
// |-----------------------|
// | possible GS cookie |
// |-----------------------|
// | Saved LR | // 8 bytes
// |-----------------------|
// | Saved FP | // 8 bytes
// |-----------------------|
// | Outgoing arg space | // multiple of 8 bytes; if required (i.e., #outsz != 0)
// |-----------------------| <---- Ambient SP
// | | |
// ~ | Stack grows ~
// | | downward |
// V
//
// Frames with FP, LR saved at top of frame (below saved varargs incoming arguments):
//
// | |
// |-----------------------|
// | incoming arguments |
// +=======================+ <---- Caller's SP
// | Varargs regs space | // Only for varargs functions; 64 bytes
// |-----------------------|
// | Saved LR | // 8 bytes
// |-----------------------|
// | Saved FP | // 8 bytes
// |-----------------------|
// |Callee saved registers | // not including FP/LR; multiple of 8 bytes
// |-----------------------|
// | PSP slot | // 8 bytes (omitted in CoreRT ABI)
// |-----------------------|
// | locals, temps, etc. |
// |-----------------------|
// | possible GS cookie |
// |-----------------------|
// | Outgoing arg space | // multiple of 8 bytes; if required (i.e., #outsz != 0)
// |-----------------------| <---- Ambient SP
// | | |
// ~ | Stack grows ~
// | | downward |
// V
//
int totalFrameSize = genTotalFrameSize();
int offset; // This will be the starting place for saving the callee-saved registers, in increasing order.
regMaskTP maskSaveRegsFloat = rsPushRegs & RBM_ALLFLOAT;
regMaskTP maskSaveRegsInt = rsPushRegs & ~maskSaveRegsFloat;
#ifdef DEBUG
if (verbose)
{
printf("Save float regs: ");
dspRegMask(maskSaveRegsFloat);
printf("\n");
printf("Save int regs: ");
dspRegMask(maskSaveRegsInt);
printf("\n");
}
#endif // DEBUG
// The frameType number is arbitrary, is defined below, and corresponds to one of the frame styles we
// generate based on various sizes.
int frameType = 0;
// The amount to subtract from SP before starting to store the callee-saved registers. It might be folded into the
// first save instruction as a "predecrement" amount, if possible.
int calleeSaveSpDelta = 0;
if (isFramePointerUsed())
{
// We need to save both FP and LR.
assert((maskSaveRegsInt & RBM_FP) != 0);
assert((maskSaveRegsInt & RBM_LR) != 0);
// If we need to generate a GS cookie, we need to make sure the saved frame pointer and return address
// (FP and LR) are protected from buffer overrun by the GS cookie. If FP/LR are at the lowest addresses,
// then they are safe, since they are lower than any unsafe buffers. And the GS cookie we add will
// protect our caller's frame. If we have a localloc, however, that is dynamically placed lower than our
// saved FP/LR. In that case, we save FP/LR along with the rest of the callee-saved registers, above
// the GS cookie.
//
// After the frame is allocated, the frame pointer is established, pointing at the saved frame pointer to
// create a frame pointer chain.
//
// Do we need another frame pointer register to get good code quality in the case of having the frame pointer
// point high in the frame, so we can take advantage of arm64's preference for positive offsets? C++ native
// code dedicates callee-saved x19 to this, so generates:
// mov x19, sp
// in the prolog, then uses x19 for local var accesses. Given that this case is so rare, we currently do
// not do this. That means that negative offsets from FP might need to use the reserved register to form
// the local variable offset for an addressing mode.
if (((compiler->lvaOutgoingArgSpaceSize == 0) && (totalFrameSize <= 504)) &&
!genSaveFpLrWithAllCalleeSavedRegisters)
{
// Case #1.
//
// Generate:
// stp fp,lr,[sp,#-framesz]!
//
// The (totalFrameSize <= 504) condition ensures that both the pre-index STP instruction
// used in the prolog, and the post-index LDP instruction used in the epilog, can be generated.
// Note that STP and the unwind codes can handle -512, but LDP with a positive post-index value
// can only handle up to 504, and we want our prolog and epilog to match.
//
// After saving callee-saved registers, we establish the frame pointer with:
// mov fp,sp
// We do this *after* saving callee-saved registers, so the prolog/epilog unwind codes mostly match.
JITDUMP("Frame type 1. #outsz=0; #framesz=%d; LclFrameSize=%d\n", totalFrameSize,
compiler->compLclFrameSize);
frameType = 1;
assert(totalFrameSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -totalFrameSize,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
else if (totalFrameSize <= 512)
{
// Case #2.
//
// The (totalFrameSize <= 512) condition ensures the callee-saved registers can all be saved using STP
// with signed offset encoding. The maximum positive STP offset is 504, but when storing a pair of
// 8 byte registers, the largest actual offset we use would be 512 - 8 * 2 = 496. And STR with positive
// offset has a range 0 to 32760.
//
// After saving callee-saved registers, we establish the frame pointer with:
// add fp,sp,#outsz
// We do this *after* saving callee-saved registers, so the prolog/epilog unwind codes mostly match.
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
JITDUMP("Frame type 4 (save FP/LR at top). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 4;
// The frame will be allocated below, when the callee-saved registers are saved. This might mean a
// separate SUB instruction or the SP adjustment might be folded in to the first STP if there is
// no outgoing argument space AND no local frame space, that is, if the only thing the frame does
// is save callee-saved registers (and possibly varargs argument registers).
calleeSaveSpDelta = totalFrameSize;
offset = (int)compiler->compLclFrameSize;
}
else
{
JITDUMP("Frame type 2 (save FP/LR at bottom). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 2;
// Generate:
// sub sp,sp,#framesz
// stp fp,lr,[sp,#outsz] // note that by necessity, #outsz <= #framesz - 16, so #outsz <= 496.
assert(totalFrameSize - compiler->lvaOutgoingArgSpaceSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
assert(compiler->lvaOutgoingArgSpaceSize + 2 * REGSIZE_BYTES <= (unsigned)totalFrameSize);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
}
else
{
// Case 5 or 6.
//
// First, the callee-saved registers will be saved, and the callee-saved register code must use
// pre-index to subtract from SP as the first instruction. It must also leave space for varargs
// registers to be stored. For example:
// stp r19,r20,[sp,#-96]!
// stp d8,d9,[sp,#16]
// ... save varargs incoming integer registers ...
// Note that all SP alterations must be 16-byte aligned. We have already calculated any alignment to be
// lower on the stack than the callee-saved registers (see lvaAlignFrame() for how we calculate
// alignment). So, if there is an odd number of callee-saved registers, we use (for example, with just
// one saved register):
// sub sp,sp,#16
// str r19,[sp,#8]
// This is one additional instruction, but it centralizes the aligned space. Otherwise, it might be
// possible to have two 8-byte alignment padding words, one below the callee-saved registers, and one
// above them. If that is preferable, we could implement it.
//
// Note that any varargs saved space will always be 16-byte aligned, since there are 8 argument
// registers.
//
// Then, define #remainingFrameSz = #framesz - (callee-saved size + varargs space + possible alignment
// padding from above). Note that #remainingFrameSz must not be zero, since we still need to save FP,SP.
//
// Generate:
// sub sp,sp,#remainingFrameSz
// or, for large frames:
// mov rX, #remainingFrameSz // maybe multiple instructions
// sub sp,sp,rX
//
// followed by:
// stp fp,lr,[sp,#outsz]
// add fp,sp,#outsz
//
// However, we need to handle the case where #outsz is larger than the constant signed offset encoding
// can handle. And, once again, we might need to deal with #outsz that is not aligned to 16-bytes (i.e.,
// STACK_ALIGN). So, in the case of large #outsz we will have an additional SP adjustment, using one of
// the following sequences:
//
// Define #remainingFrameSz2 = #remainingFrameSz - #outsz.
//
// sub sp,sp,#remainingFrameSz2 // if #remainingFrameSz2 is 16-byte aligned
// stp fp,lr,[sp]
// mov fp,sp
// sub sp,sp,#outsz // in this case, #outsz must also be 16-byte aligned
//
// Or:
//
// sub sp,sp,roundUp(#remainingFrameSz2,16) // if #remainingFrameSz2 is not 16-byte aligned (it is
// // always guaranteed to be 8 byte aligned).
// stp fp,lr,[sp,#8] // it will always be #8 in the unaligned case
// add fp,sp,#8
// sub sp,sp,#outsz - #8
//
// (As usual, for a large constant "#outsz - #8", we might need multiple instructions:
// mov rX, #outsz - #8 // maybe multiple instructions
// sub sp,sp,rX
// )
//
// Note that even if we align the SP alterations, that does not imply that we are creating empty alignment
// slots. In fact, we are not; any empty alignment slots were calculated in
// Compiler::lvaAssignFrameOffsets() and its callees.
int calleeSaveSpDeltaUnaligned = totalFrameSize - compiler->compLclFrameSize;
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
JITDUMP("Frame type 5 (save FP/LR at top). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
// This case is much simpler, because we allocate space for the callee-saved register area, including
// FP/LR. Note the SP adjustment might be SUB or be folded into the first store as a predecrement.
// Then, we use a single SUB to establish the rest of the frame. We need to be careful about where
// to establish the frame pointer, as there is a limit of 2040 bytes offset from SP to FP in the
// unwind codes when FP is established.
frameType = 5;
}
else
{
JITDUMP("Frame type 3 (save FP/LR at bottom). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 3;
calleeSaveSpDeltaUnaligned -= 2 * REGSIZE_BYTES; // 2 for FP, LR which we'll save later.
// We'll take care of these later, but callee-saved regs code shouldn't see them.
maskSaveRegsInt &= ~(RBM_FP | RBM_LR);
}
assert(calleeSaveSpDeltaUnaligned >= 0);
assert((calleeSaveSpDeltaUnaligned % 8) == 0); // It better at least be 8 byte aligned.
calleeSaveSpDelta = AlignUp((UINT)calleeSaveSpDeltaUnaligned, STACK_ALIGN);
offset = calleeSaveSpDelta - calleeSaveSpDeltaUnaligned;
JITDUMP(" calleeSaveSpDelta=%d, offset=%d\n", calleeSaveSpDelta, offset);
// At most one alignment slot between SP and where we store the callee-saved registers.
assert((offset == 0) || (offset == REGSIZE_BYTES));
}
}
else
{
// No frame pointer (no chaining).
assert((maskSaveRegsInt & RBM_FP) == 0);
assert((maskSaveRegsInt & RBM_LR) != 0);
// Note that there is no pre-indexed save_lrpair unwind code variant, so we can't allocate the frame using
// 'stp' if we only have one callee-saved register plus LR to save.
NYI("Frame without frame pointer");
offset = 0;
}
assert(frameType != 0);
const int calleeSaveSpOffset = offset;
JITDUMP(" offset=%d, calleeSaveSpDelta=%d\n", offset, calleeSaveSpDelta);
genSaveCalleeSavedRegistersHelp(maskSaveRegsInt | maskSaveRegsFloat, offset, -calleeSaveSpDelta);
offset += genCountBits(maskSaveRegsInt | maskSaveRegsFloat) * REGSIZE_BYTES;
// For varargs, home the incoming arg registers last. Note that there is nothing to unwind here,
// so we just report "NOP" unwind codes. If there's no more frame setup after this, we don't
// need to add codes at all.
if (compiler->info.compIsVarArgs)
{
JITDUMP(" compIsVarArgs=true\n");
// There are 8 general-purpose registers to home, thus 'offset' must be 16-byte aligned here.
assert((offset % 16) == 0);
for (regNumber reg1 = REG_ARG_FIRST; reg1 < REG_ARG_LAST; reg1 = REG_NEXT(REG_NEXT(reg1)))
{
regNumber reg2 = REG_NEXT(reg1);
// stp REG, REG + 1, [SP, #offset]
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, offset);
compiler->unwindNop();
offset += 2 * REGSIZE_BYTES;
}
}
// By default, we'll establish the frame pointer chain. (Note that currently frames without FP are NYI.)
bool establishFramePointer = true;
// If we do establish the frame pointer, what is the amount we add to SP to do so?
unsigned offsetSpToSavedFp = 0;
if (frameType == 1)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
assert(offsetSpToSavedFp == 0);
}
else if (frameType == 2)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = compiler->lvaOutgoingArgSpaceSize;
}
else if (frameType == 3)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
// totalFrameSize and calleeSaveSpDelta -- is 16-byte aligned.
if (compiler->lvaOutgoingArgSpaceSize > 504)
{
// We can't do "stp fp,lr,[sp,#outsz]" because #outsz is too big.
// If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment.
assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize);
int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize;
int spAdjustment2 = (int)roundUp((unsigned)spAdjustment2Unaligned, STACK_ALIGN);
int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == 8));
JITDUMP(" spAdjustment2=%d\n", spAdjustment2);
genPrologSaveRegPair(REG_FP, REG_LR, alignmentAdjustment2, -spAdjustment2, false, initReg, pInitRegZeroed);
offset += spAdjustment2;
// Now subtract off the #outsz (or the rest of the #outsz if it was unaligned, and the above "sub"
// included some of it)
int spAdjustment3 = compiler->lvaOutgoingArgSpaceSize - alignmentAdjustment2;
assert(spAdjustment3 > 0);
assert((spAdjustment3 % 16) == 0);
JITDUMP(" alignmentAdjustment2=%d\n", alignmentAdjustment2);
genEstablishFramePointer(alignmentAdjustment2, /* reportUnwindData */ true);
// We just established the frame pointer chain; don't do it again.
establishFramePointer = false;
JITDUMP(" spAdjustment3=%d\n", spAdjustment3);
// We've already established the frame pointer, so no need to report the stack pointer change to unwind
// info.
genStackPointerAdjustment(-spAdjustment3, initReg, pInitRegZeroed, /* reportUnwindData */ false);
offset += spAdjustment3;
}
else
{
genPrologSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, -remainingFrameSz, false, initReg,
pInitRegZeroed);
offset += remainingFrameSz;
offsetSpToSavedFp = compiler->lvaOutgoingArgSpaceSize;
}
}
else if (frameType == 4)
{
assert(genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = calleeSaveSpDelta - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
}
else if (frameType == 5)
{
assert(genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = calleeSaveSpDelta - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
JITDUMP(" offsetSpToSavedFp=%d\n", offsetSpToSavedFp);
genEstablishFramePointer(offsetSpToSavedFp, /* reportUnwindData */ true);
// We just established the frame pointer chain; don't do it again.
establishFramePointer = false;
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
// totalFrameSize and calleeSaveSpDelta -- is 16-byte aligned.
JITDUMP(" remainingFrameSz=%d\n", remainingFrameSz);
// We've already established the frame pointer, so no need to report the stack pointer change to unwind info.
genStackPointerAdjustment(-remainingFrameSz, initReg, pInitRegZeroed, /* reportUnwindData */ false);
offset += remainingFrameSz;
}
else
{
unreached();
}
if (establishFramePointer)
{
JITDUMP(" offsetSpToSavedFp=%d\n", offsetSpToSavedFp);
genEstablishFramePointer(offsetSpToSavedFp, /* reportUnwindData */ true);
}
assert(offset == totalFrameSize);
// Save off information about the frame for later use
//
compiler->compFrameInfo.frameType = frameType;
compiler->compFrameInfo.calleeSaveSpOffset = calleeSaveSpOffset;
compiler->compFrameInfo.calleeSaveSpDelta = calleeSaveSpDelta;
compiler->compFrameInfo.offsetSpToSavedFp = offsetSpToSavedFp;
#endif // TARGET_ARM64
}
/*****************************************************************************
*
* Generates code for a function epilog.
*
* Please consult the "debugger team notification" comment in genFnProlog().
*/
void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFnEpilog()\n");
#endif // DEBUG
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
#ifdef DEBUG
if (compiler->opts.dspCode)
printf("\n__epilog:\n");
if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif // DEBUG
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
GenTree* lastNode = block->lastNode();
// Method handle and address info used in case of jump epilog
CORINFO_METHOD_HANDLE methHnd = nullptr;
CORINFO_CONST_LOOKUP addrInfo;
addrInfo.addr = nullptr;
addrInfo.accessType = IAT_VALUE;
if (jmpEpilog && lastNode->gtOper == GT_JMP)
{
methHnd = (CORINFO_METHOD_HANDLE)lastNode->AsVal()->gtVal1;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
}
#ifdef TARGET_ARM
// We delay starting the unwind codes until we have an instruction which we know
// needs an unwind code. In particular, for large stack frames in methods without
// localloc, the sequence might look something like this:
// movw r3, 0x38e0
// add sp, r3
// pop {r4,r5,r6,r10,r11,pc}
// In this case, the "movw" should not be part of the unwind codes, since it will
// be a NOP, and it is a waste to start with a NOP. Note that calling unwindBegEpilog()
// also sets the current location as the beginning offset of the epilog, so every
// instruction afterwards needs an unwind code. In the case above, if you call
// unwindBegEpilog() before the "movw", then you must generate a NOP for the "movw".
bool unwindStarted = false;
// Tear down the stack frame
if (compiler->compLocallocUsed)
{
if (!unwindStarted)
{
compiler->unwindBegEpilog();
unwindStarted = true;
}
// mov R9 into SP
inst_Mov(TYP_I_IMPL, REG_SP, REG_SAVED_LOCALLOC_SP, /* canSkip */ false);
compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0);
}
if (jmpEpilog ||
genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) ==
RBM_NONE)
{
genFreeLclFrame(compiler->compLclFrameSize, &unwindStarted);
}
if (!unwindStarted)
{
// If we haven't generated anything yet, we're certainly going to generate a "pop" next.
compiler->unwindBegEpilog();
unwindStarted = true;
}
if (jmpEpilog && lastNode->gtOper == GT_JMP && addrInfo.accessType == IAT_RELPVALUE)
{
// IAT_RELPVALUE jump at the end is done using relative indirection, so,
// additional helper register is required.
// We use LR just before it is going to be restored from stack, i.e.
//
// movw r12, laddr
// movt r12, haddr
// mov lr, r12
// ldr r12, [r12]
// add r12, r12, lr
// pop {lr}
// ...
// bx r12
regNumber indCallReg = REG_R12;
regNumber vptrReg1 = REG_LR;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, vptrReg1, indCallReg, /* canSkip */ false);
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, indCallReg, vptrReg1);
}
genPopCalleeSavedRegisters(jmpEpilog);
if (regSet.rsMaskPreSpillRegs(true) != RBM_NONE)
{
// We better not have used a pop PC to return otherwise this will be unreachable code
noway_assert(!genUsedPopToReturn);
int preSpillRegArgSize = genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
inst_RV_IV(INS_add, REG_SPBASE, preSpillRegArgSize, EA_PTRSIZE);
compiler->unwindAllocStack(preSpillRegArgSize);
}
if (jmpEpilog)
{
// We better not have used a pop PC to return otherwise this will be unreachable code
noway_assert(!genUsedPopToReturn);
}
#else // TARGET_ARM64
compiler->unwindBegEpilog();
genPopCalleeSavedRegistersAndFreeLclFrame(jmpEpilog);
#endif // TARGET_ARM64
if (jmpEpilog)
{
SetHasTailCalls(true);
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->GetFirstLIRNode() != nullptr);
/* figure out what jump we have */
GenTree* jmpNode = lastNode;
#if !FEATURE_FASTTAILCALL
noway_assert(jmpNode->gtOper == GT_JMP);
#else // FEATURE_FASTTAILCALL
// armarch
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
noway_assert((jmpNode->gtOper == GT_JMP) ||
((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
#endif // FEATURE_FASTTAILCALL
{
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
assert(methHnd != nullptr);
assert(addrInfo.addr != nullptr);
#ifdef TARGET_ARMARCH
emitter::EmitCallType callType;
void* addr;
regNumber indCallReg;
switch (addrInfo.accessType)
{
case IAT_VALUE:
if (validImmForBL((ssize_t)addrInfo.addr))
{
// Simple direct call
callType = emitter::EC_FUNC_TOKEN;
addr = addrInfo.addr;
indCallReg = REG_NA;
break;
}
// otherwise the target address doesn't fit in an immediate
// so we have to burn a register...
FALLTHROUGH;
case IAT_PVALUE:
// Load the address into a register, load indirect and call through a register
// We have to use R12 since we assume the argument registers are in use
callType = emitter::EC_INDIR_R;
indCallReg = REG_INDIRECT_CALL_TARGET_REG;
addr = NULL;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
if (addrInfo.accessType == IAT_PVALUE)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
regSet.verifyRegUsed(indCallReg);
}
break;
case IAT_RELPVALUE:
{
// Load the address into a register, load relative indirect and call through a register
// We have to use R12 since we assume the argument registers are in use
// LR is used as helper register right before it is restored from stack, thus,
// all relative address calculations are performed before LR is restored.
callType = emitter::EC_INDIR_R;
indCallReg = REG_R12;
addr = NULL;
regSet.verifyRegUsed(indCallReg);
break;
}
case IAT_PPVALUE:
default:
NO_WAY("Unsupported JMP indirection");
}
/* Simply emit a jump to the methodHnd. This is similar to a call so we can use
* the same descriptor with some minor adjustments.
*/
// clang-format off
GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
0, // argSize
EA_UNKNOWN, // retSize
#if defined(TARGET_ARM64)
EA_UNKNOWN, // secondRetSize
#endif
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
indCallReg, // ireg
REG_NA, // xreg
0, // xmul
0, // disp
true); // isJump
// clang-format on
CLANG_FORMAT_COMMENT_ANCHOR;
#endif // TARGET_ARMARCH
}
#if FEATURE_FASTTAILCALL
else
{
genCallInstruction(jmpNode->AsCall());
}
#endif // FEATURE_FASTTAILCALL
}
else
{
#ifdef TARGET_ARM
if (!genUsedPopToReturn)
{
// If we did not use a pop to return, then we did a "pop {..., lr}" instead of "pop {..., pc}",
// so we need a "bx lr" instruction to return from the function.
inst_RV(INS_bx, REG_LR, TYP_I_IMPL);
compiler->unwindBranch16();
}
#else // TARGET_ARM64
inst_RV(INS_ret, REG_LR, TYP_I_IMPL);
compiler->unwindReturn(REG_LR);
#endif // TARGET_ARM64
}
compiler->unwindEndEpilog();
}
// return size
// alignmentWB is out param
unsigned CodeGenInterface::InferOpSizeAlign(GenTree* op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
if (op->gtType == TYP_STRUCT || op->OperIsCopyBlkOp())
{
opSize = InferStructOpSizeAlign(op, &alignment);
}
else
{
alignment = genTypeAlignments[op->TypeGet()];
opSize = genTypeSizes[op->TypeGet()];
}
assert(opSize != 0);
assert(alignment != 0);
(*alignmentWB) = alignment;
return opSize;
}
// return size
// alignmentWB is out param
unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
while (op->gtOper == GT_COMMA)
{
op = op->AsOp()->gtOp2;
}
if (op->gtOper == GT_OBJ)
{
CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->GetLayout()->GetClassHandle();
opSize = op->AsObj()->GetLayout()->GetSize();
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else if (op->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(op->AsLclVarCommon());
assert(varDsc->lvType == TYP_STRUCT);
opSize = varDsc->lvSize();
#ifndef TARGET_64BIT
if (varDsc->lvStructDoubleAlign)
{
alignment = TARGET_POINTER_SIZE * 2;
}
else
#endif // !TARGET_64BIT
{
alignment = TARGET_POINTER_SIZE;
}
}
else if (op->gtOper == GT_MKREFANY)
{
opSize = TARGET_POINTER_SIZE * 2;
alignment = TARGET_POINTER_SIZE;
}
else if (op->IsArgPlaceHolderNode())
{
CORINFO_CLASS_HANDLE clsHnd = op->AsArgPlace()->gtArgPlaceClsHnd;
assert(clsHnd != 0);
opSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else
{
assert(!"Unhandled gtOper");
opSize = TARGET_POINTER_SIZE;
alignment = TARGET_POINTER_SIZE;
}
assert(opSize != 0);
assert(alignment != 0);
(*alignmentWB) = alignment;
return opSize;
}
#endif // TARGET_ARMARCH
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/codegencommon.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Code Generator Common: XX
XX Methods common to all architectures and register allocation strategies XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// TODO-Cleanup: There are additional methods in CodeGen*.cpp that are almost
// identical, and which should probably be moved here.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "codegen.h"
#include "gcinfo.h"
#include "emit.h"
#ifndef JIT32_GCENCODER
#include "gcinfoencoder.h"
#endif
#include "patchpointinfo.h"
/*****************************************************************************/
void CodeGenInterface::setFramePointerRequiredEH(bool value)
{
m_cgFramePointerRequired = value;
#ifndef JIT32_GCENCODER
if (value)
{
// EnumGcRefs will only enumerate slots in aborted frames
// if they are fully-interruptible. So if we have a catch
// or finally that will keep frame-vars alive, we need to
// force fully-interruptible.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
{
printf("Method has EH, marking method as fully interruptible\n");
}
#endif
m_cgInterruptible = true;
}
#endif // JIT32_GCENCODER
}
/*****************************************************************************/
CodeGenInterface* getCodeGenerator(Compiler* comp)
{
return new (comp, CMK_Codegen) CodeGen(comp);
}
// CodeGen constructor
CodeGenInterface::CodeGenInterface(Compiler* theCompiler)
: gcInfo(theCompiler), regSet(theCompiler, gcInfo), compiler(theCompiler), treeLifeUpdater(nullptr)
{
}
/*****************************************************************************/
CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler)
{
#if defined(TARGET_XARCH)
negBitmaskFlt = nullptr;
negBitmaskDbl = nullptr;
absBitmaskFlt = nullptr;
absBitmaskDbl = nullptr;
u8ToDblBitmask = nullptr;
#endif // defined(TARGET_XARCH)
#if defined(FEATURE_PUT_STRUCT_ARG_STK) && !defined(TARGET_X86)
m_stkArgVarNum = BAD_VAR_NUM;
#endif
#if defined(UNIX_X86_ABI)
curNestedAlignment = 0;
maxNestedAlignment = 0;
#endif
gcInfo.regSet = ®Set;
m_cgEmitter = new (compiler->getAllocator()) emitter();
m_cgEmitter->codeGen = this;
m_cgEmitter->gcInfo = &gcInfo;
#ifdef DEBUG
setVerbose(compiler->verbose);
#endif // DEBUG
regSet.tmpInit();
#ifdef LATE_DISASM
getDisAssembler().disInit(compiler);
#endif
#ifdef DEBUG
genTempLiveChg = true;
genTrnslLocalVarCount = 0;
// Shouldn't be used before it is set in genFnProlog()
compiler->compCalleeRegsPushed = UninitializedWord<unsigned>(compiler);
#if defined(TARGET_XARCH)
// Shouldn't be used before it is set in genFnProlog()
compiler->compCalleeFPRegsSavedMask = (regMaskTP)-1;
#endif // defined(TARGET_XARCH)
#endif // DEBUG
#ifdef TARGET_AMD64
// This will be set before final frame layout.
compiler->compVSQuirkStackPaddingNeeded = 0;
#endif // TARGET_AMD64
compiler->genCallSite2DebugInfoMap = nullptr;
/* Assume that we not fully interruptible */
SetInterruptible(false);
#ifdef TARGET_ARMARCH
SetHasTailCalls(false);
#endif // TARGET_ARMARCH
#ifdef DEBUG
genInterruptibleUsed = false;
genCurDispOffset = (unsigned)-1;
#endif
#ifdef TARGET_ARM64
genSaveFpLrWithAllCalleeSavedRegisters = false;
#endif // TARGET_ARM64
}
void CodeGenInterface::genMarkTreeInReg(GenTree* tree, regNumber reg)
{
tree->SetRegNum(reg);
}
#if defined(TARGET_X86) || defined(TARGET_ARM)
//---------------------------------------------------------------------
// genTotalFrameSize - return the "total" size of the stack frame, including local size
// and callee-saved register size. There are a few things "missing" depending on the
// platform. The function genCallerSPtoInitialSPdelta() includes those things.
//
// For ARM, this doesn't include the prespilled registers.
//
// For x86, this doesn't include the frame pointer if codeGen->isFramePointerUsed() is true.
// It also doesn't include the pushed return address.
//
// Return value:
// Frame size
int CodeGenInterface::genTotalFrameSize() const
{
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
//---------------------------------------------------------------------
// genSPtoFPdelta - return the offset from SP to the frame pointer.
// This number is going to be positive, since SP must be at the lowest
// address.
//
// There must be a frame pointer to call this function!
int CodeGenInterface::genSPtoFPdelta() const
{
assert(isFramePointerUsed());
int delta;
delta = -genCallerSPtoInitialSPdelta() + genCallerSPtoFPdelta();
assert(delta >= 0);
return delta;
}
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
// address than the frame pointer.
//
// There must be a frame pointer to call this function!
int CodeGenInterface::genCallerSPtoFPdelta() const
{
assert(isFramePointerUsed());
int callerSPtoFPdelta = 0;
#if defined(TARGET_ARM)
// On ARM, we first push the prespill registers, then store LR, then R11 (FP), and point R11 at the saved R11.
callerSPtoFPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
callerSPtoFPdelta -= 2 * REGSIZE_BYTES;
#elif defined(TARGET_X86)
// Thanks to ebp chaining, the difference between ebp-based addresses
// and caller-SP-relative addresses is just the 2 pointers:
// return address
// pushed ebp
callerSPtoFPdelta -= 2 * REGSIZE_BYTES;
#else
#error "Unknown TARGET"
#endif // TARGET*
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
}
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
// This number will be negative.
int CodeGenInterface::genCallerSPtoInitialSPdelta() const
{
int callerSPtoSPdelta = 0;
#if defined(TARGET_ARM)
callerSPtoSPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
callerSPtoSPdelta -= genTotalFrameSize();
#elif defined(TARGET_X86)
callerSPtoSPdelta -= genTotalFrameSize();
callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
// compCalleeRegsPushed does not account for the frame pointer
// TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
if (isFramePointerUsed())
{
callerSPtoSPdelta -= REGSIZE_BYTES;
}
#else
#error "Unknown TARGET"
#endif // TARGET*
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
/*****************************************************************************
* Should we round simple operations (assignments, arithmetic operations, etc.)
*/
// inline
// static
bool CodeGen::genShouldRoundFP()
{
RoundLevel roundLevel = getRoundFloatLevel();
switch (roundLevel)
{
case ROUND_NEVER:
case ROUND_CMP_CONST:
case ROUND_CMP:
return false;
default:
assert(roundLevel == ROUND_ALWAYS);
return true;
}
}
/*****************************************************************************
*
* Initialize some global variables.
*/
void CodeGen::genPrepForCompiler()
{
treeLifeUpdater = new (compiler, CMK_bitset) TreeLifeUpdater<true>(compiler);
/* Figure out which non-register variables hold pointers */
VarSetOps::AssignNoCopy(compiler, gcInfo.gcTrkStkPtrLcls, VarSetOps::MakeEmpty(compiler));
// Also, initialize gcTrkStkPtrLcls to include all tracked variables that do not fully live
// in a register (i.e. they live on the stack for all or part of their lifetime).
// Note that lvRegister indicates that a lclVar is in a register for its entire lifetime.
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (varDsc->lvTracked || varDsc->lvIsRegCandidate())
{
if (!varDsc->lvRegister && compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::AddElemD(compiler, gcInfo.gcTrkStkPtrLcls, varDsc->lvVarIndex);
}
}
}
VarSetOps::AssignNoCopy(compiler, genLastLiveSet, VarSetOps::MakeEmpty(compiler));
genLastLiveMask = RBM_NONE;
#ifdef DEBUG
compiler->fgBBcountAtCodegen = compiler->fgBBcount;
#endif
}
//------------------------------------------------------------------------
// genMarkLabelsForCodegen: Mark labels required for codegen.
//
// Mark all blocks that require a label with BBF_HAS_LABEL. These are either blocks that are:
// 1. the target of jumps (fall-through flow doesn't require a label),
// 2. referenced labels such as for "switch" codegen,
// 3. needed to denote the range of EH regions to the VM.
// 4. needed to denote the range of code for alignment processing.
//
// No labels will be in the IR before now, but future codegen might annotate additional blocks
// with this flag, such as "switch" codegen, or codegen-created blocks from genCreateTempLabel().
// Also, the alignment processing code marks BBJ_COND fall-through labels elsewhere.
//
// To report exception handling information to the VM, we need the size of the exception
// handling regions. To compute that, we need to emit labels for the beginning block of
// an EH region, and the block that immediately follows a region. Go through the EH
// table and mark all these blocks with BBF_HAS_LABEL to make this happen.
//
// This code is closely couple with genReportEH() in the sense that any block
// that this procedure has determined it needs to have a label has to be selected
// using the same logic both here and in genReportEH(), so basically any time there is
// a change in the way we handle EH reporting, we have to keep the logic of these two
// methods 'in sync'.
//
// No blocks should be added or removed after this.
//
void CodeGen::genMarkLabelsForCodegen()
{
assert(!compiler->fgSafeBasicBlockCreation);
JITDUMP("Mark labels for codegen\n");
#ifdef DEBUG
// No label flags should be set before this.
for (BasicBlock* const block : compiler->Blocks())
{
assert((block->bbFlags & BBF_HAS_LABEL) == 0);
}
#endif // DEBUG
// The first block is special; it always needs a label. This is to properly set up GC info.
JITDUMP(" " FMT_BB " : first block\n", compiler->fgFirstBB->bbNum);
compiler->fgFirstBB->bbFlags |= BBF_HAS_LABEL;
// The current implementation of switch tables requires the first block to have a label so it
// can generate offsets to the switch label targets.
// (This is duplicative with the fact we always set the first block with a label above.)
// TODO-CQ: remove this when switches have been re-implemented to not use this.
if (compiler->fgHasSwitch)
{
JITDUMP(" " FMT_BB " : function has switch; mark first block\n", compiler->fgFirstBB->bbNum);
compiler->fgFirstBB->bbFlags |= BBF_HAS_LABEL;
}
for (BasicBlock* const block : compiler->Blocks())
{
switch (block->bbJumpKind)
{
case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair.
case BBJ_COND:
case BBJ_EHCATCHRET:
JITDUMP(" " FMT_BB " : branch target\n", block->bbJumpDest->bbNum);
block->bbJumpDest->bbFlags |= BBF_HAS_LABEL;
break;
case BBJ_SWITCH:
for (BasicBlock* const bTarget : block->SwitchTargets())
{
JITDUMP(" " FMT_BB " : branch target\n", bTarget->bbNum);
bTarget->bbFlags |= BBF_HAS_LABEL;
}
break;
case BBJ_CALLFINALLY:
// The finally target itself will get marked by walking the EH table, below, and marking
// all handler begins.
CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_EH_CALLFINALLY_THUNKS
{
// For callfinally thunks, we need to mark the block following the callfinally/always pair,
// as that's needed for identifying the range of the "duplicate finally" region in EH data.
BasicBlock* bbToLabel = block->bbNext;
if (block->isBBCallAlwaysPair())
{
bbToLabel = bbToLabel->bbNext; // skip the BBJ_ALWAYS
}
if (bbToLabel != nullptr)
{
JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum);
bbToLabel->bbFlags |= BBF_HAS_LABEL;
}
}
#endif // FEATURE_EH_CALLFINALLY_THUNKS
break;
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
case BBJ_RETURN:
case BBJ_THROW:
case BBJ_NONE:
break;
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
}
// Walk all the exceptional code blocks and mark them, since they don't appear in the normal flow graph.
for (Compiler::AddCodeDsc* add = compiler->fgAddCodeList; add; add = add->acdNext)
{
JITDUMP(" " FMT_BB " : throw helper block\n", add->acdDstBlk->bbNum);
add->acdDstBlk->bbFlags |= BBF_HAS_LABEL;
}
for (EHblkDsc* const HBtab : EHClauses(compiler))
{
HBtab->ebdTryBeg->bbFlags |= BBF_HAS_LABEL;
HBtab->ebdHndBeg->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : try begin\n", HBtab->ebdTryBeg->bbNum);
JITDUMP(" " FMT_BB " : hnd begin\n", HBtab->ebdHndBeg->bbNum);
if (HBtab->ebdTryLast->bbNext != nullptr)
{
HBtab->ebdTryLast->bbNext->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->bbNext->bbNum);
}
if (HBtab->ebdHndLast->bbNext != nullptr)
{
HBtab->ebdHndLast->bbNext->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->bbNext->bbNum);
}
if (HBtab->HasFilter())
{
HBtab->ebdFilter->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : filter begin\n", HBtab->ebdFilter->bbNum);
}
}
#ifdef DEBUG
if (compiler->verbose)
{
printf("*************** After genMarkLabelsForCodegen()\n");
compiler->fgDispBasicBlocks();
}
#endif // DEBUG
}
void CodeGenInterface::genUpdateLife(GenTree* tree)
{
treeLifeUpdater->UpdateLife(tree);
}
void CodeGenInterface::genUpdateLife(VARSET_VALARG_TP newLife)
{
compiler->compUpdateLife</*ForCodeGen*/ true>(newLife);
}
// Return the register mask for the given register variable
// inline
regMaskTP CodeGenInterface::genGetRegMask(const LclVarDsc* varDsc)
{
regMaskTP regMask = RBM_NONE;
assert(varDsc->lvIsInReg());
regNumber reg = varDsc->GetRegNum();
if (genIsValidFloatReg(reg))
{
regMask = genRegMaskFloat(reg, varDsc->GetRegisterType());
}
else
{
regMask = genRegMask(reg);
}
return regMask;
}
// Return the register mask for the given lclVar or regVar tree node
// inline
regMaskTP CodeGenInterface::genGetRegMask(GenTree* tree)
{
assert(tree->gtOper == GT_LCL_VAR);
regMaskTP regMask = RBM_NONE;
const LclVarDsc* varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->lvPromoted)
{
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
const LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(i);
noway_assert(fieldVarDsc->lvIsStructField);
if (fieldVarDsc->lvIsInReg())
{
regMask |= genGetRegMask(fieldVarDsc);
}
}
}
else if (varDsc->lvIsInReg())
{
regMask = genGetRegMask(varDsc);
}
return regMask;
}
// The given lclVar is either going live (being born) or dying.
// It might be both going live and dying (that is, it is a dead store) under MinOpts.
// Update regSet.GetMaskVars() accordingly.
// inline
void CodeGenInterface::genUpdateRegLife(const LclVarDsc* varDsc, bool isBorn, bool isDying DEBUGARG(GenTree* tree))
{
regMaskTP regMask = genGetRegMask(varDsc);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tV%02u in reg ", compiler->lvaGetLclNum(varDsc));
varDsc->PrintVarReg();
printf(" is becoming %s ", (isDying) ? "dead" : "live");
Compiler::printTreeID(tree);
printf("\n");
}
#endif // DEBUG
if (isDying)
{
// We'd like to be able to assert the following, however if we are walking
// through a qmark/colon tree, we may encounter multiple last-use nodes.
// assert((regSet.GetMaskVars() & regMask) == regMask);
regSet.RemoveMaskVars(regMask);
}
else
{
// If this is going live, the register must not have a variable in it, except
// in the case of an exception or "spill at single-def" variable, which may be already treated
// as live in the register.
assert(varDsc->IsAlwaysAliveInMemory() || ((regSet.GetMaskVars() & regMask) == 0));
regSet.AddMaskVars(regMask);
}
}
//----------------------------------------------------------------------
// compHelperCallKillSet: Gets a register mask that represents the kill set for a helper call.
// Not all JIT Helper calls follow the standard ABI on the target architecture.
//
// TODO-CQ: Currently this list is incomplete (not all helpers calls are
// enumerated) and not 100% accurate (some killsets are bigger than
// what they really are).
// There's some work to be done in several places in the JIT to
// accurately track the registers that are getting killed by
// helper calls:
// a) LSRA needs several changes to accomodate more precise killsets
// for every helper call it sees (both explicitly [easy] and
// implicitly [hard])
// b) Currently for AMD64, when we generate code for a helper call
// we're independently over-pessimizing the killsets of the call
// (independently from LSRA) and this needs changes
// both in CodeGenAmd64.cpp and emitx86.cpp.
//
// The best solution for this problem would be to try to centralize
// the killset information in a single place but then make the
// corresponding changes so every code generation phase is in sync
// about this.
//
// The interim solution is to only add known helper calls that don't
// follow the AMD64 ABI and actually trash registers that are supposed to be non-volatile.
//
// Arguments:
// helper - The helper being inquired about
//
// Return Value:
// Mask of register kills -- registers whose values are no longer guaranteed to be the same.
//
regMaskTP Compiler::compHelperCallKillSet(CorInfoHelpFunc helper)
{
switch (helper)
{
case CORINFO_HELP_ASSIGN_BYREF:
#if defined(TARGET_AMD64)
return RBM_RSI | RBM_RDI | RBM_CALLEE_TRASH_NOGC;
#elif defined(TARGET_ARMARCH)
return RBM_CALLEE_TRASH_WRITEBARRIER_BYREF;
#elif defined(TARGET_X86)
return RBM_ESI | RBM_EDI | RBM_ECX;
#else
NYI("Model kill set for CORINFO_HELP_ASSIGN_BYREF on target arch");
return RBM_CALLEE_TRASH;
#endif
#if defined(TARGET_ARMARCH)
case CORINFO_HELP_ASSIGN_REF:
case CORINFO_HELP_CHECKED_ASSIGN_REF:
return RBM_CALLEE_TRASH_WRITEBARRIER;
#endif
case CORINFO_HELP_PROF_FCN_ENTER:
#ifdef RBM_PROFILER_ENTER_TRASH
return RBM_PROFILER_ENTER_TRASH;
#else
NYI("Model kill set for CORINFO_HELP_PROF_FCN_ENTER on target arch");
#endif
case CORINFO_HELP_PROF_FCN_LEAVE:
#ifdef RBM_PROFILER_LEAVE_TRASH
return RBM_PROFILER_LEAVE_TRASH;
#else
NYI("Model kill set for CORINFO_HELP_PROF_FCN_LEAVE on target arch");
#endif
case CORINFO_HELP_PROF_FCN_TAILCALL:
#ifdef RBM_PROFILER_TAILCALL_TRASH
return RBM_PROFILER_TAILCALL_TRASH;
#else
NYI("Model kill set for CORINFO_HELP_PROF_FCN_TAILCALL on target arch");
#endif
#ifdef TARGET_X86
case CORINFO_HELP_ASSIGN_REF_EAX:
case CORINFO_HELP_ASSIGN_REF_ECX:
case CORINFO_HELP_ASSIGN_REF_EBX:
case CORINFO_HELP_ASSIGN_REF_EBP:
case CORINFO_HELP_ASSIGN_REF_ESI:
case CORINFO_HELP_ASSIGN_REF_EDI:
case CORINFO_HELP_CHECKED_ASSIGN_REF_EAX:
case CORINFO_HELP_CHECKED_ASSIGN_REF_ECX:
case CORINFO_HELP_CHECKED_ASSIGN_REF_EBX:
case CORINFO_HELP_CHECKED_ASSIGN_REF_EBP:
case CORINFO_HELP_CHECKED_ASSIGN_REF_ESI:
case CORINFO_HELP_CHECKED_ASSIGN_REF_EDI:
return RBM_EDX;
#ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
case CORINFO_HELP_ASSIGN_REF:
case CORINFO_HELP_CHECKED_ASSIGN_REF:
return RBM_EAX | RBM_EDX;
#endif // FEATURE_USE_ASM_GC_WRITE_BARRIERS
#endif
case CORINFO_HELP_STOP_FOR_GC:
return RBM_STOP_FOR_GC_TRASH;
case CORINFO_HELP_INIT_PINVOKE_FRAME:
return RBM_INIT_PINVOKE_FRAME_TRASH;
case CORINFO_HELP_VALIDATE_INDIRECT_CALL:
return RBM_VALIDATE_INDIRECT_CALL_TRASH;
default:
return RBM_CALLEE_TRASH;
}
}
//------------------------------------------------------------------------
// compChangeLife: Compare the given "newLife" with last set of live variables and update
// codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness.
//
// Arguments:
// newLife - the new set of variables that are alive.
//
// Assumptions:
// The set of live variables reflects the result of only emitted code, it should not be considering the becoming
// live/dead of instructions that has not been emitted yet. This is used to ensure [) "VariableLiveRange"
// intervals when calling "siStartVariableLiveRange" and "siEndVariableLiveRange".
//
// Notes:
// If "ForCodeGen" is false, only "compCurLife" set (and no mask) will be setted.
//
template <bool ForCodeGen>
void Compiler::compChangeLife(VARSET_VALARG_TP newLife)
{
#ifdef DEBUG
if (verbose)
{
printf("Change life %s ", VarSetOps::ToString(this, compCurLife));
dumpConvertedVarSet(this, compCurLife);
printf(" -> %s ", VarSetOps::ToString(this, newLife));
dumpConvertedVarSet(this, newLife);
printf("\n");
}
#endif // DEBUG
/* We should only be called when the live set has actually changed */
noway_assert(!VarSetOps::Equal(this, compCurLife, newLife));
if (!ForCodeGen)
{
VarSetOps::Assign(this, compCurLife, newLife);
return;
}
/* Figure out which variables are becoming live/dead at this point */
// deadSet = compCurLife - newLife
VARSET_TP deadSet(VarSetOps::Diff(this, compCurLife, newLife));
// bornSet = newLife - compCurLife
VARSET_TP bornSet(VarSetOps::Diff(this, newLife, compCurLife));
/* Can't simultaneously become live and dead at the same time */
// (deadSet UNION bornSet) != EMPTY
noway_assert(!VarSetOps::IsEmptyUnion(this, deadSet, bornSet));
// (deadSet INTERSECTION bornSet) == EMPTY
noway_assert(VarSetOps::IsEmptyIntersection(this, deadSet, bornSet));
VarSetOps::Assign(this, compCurLife, newLife);
// Handle the dying vars first, then the newly live vars.
// This is because, in the RyuJIT backend case, they may occupy registers that
// will be occupied by another var that is newly live.
VarSetOps::Iter deadIter(this, deadSet);
unsigned deadVarIndex = 0;
while (deadIter.NextElem(&deadVarIndex))
{
unsigned varNum = lvaTrackedIndexToLclNum(deadVarIndex);
LclVarDsc* varDsc = lvaGetDesc(varNum);
bool isGCRef = (varDsc->TypeGet() == TYP_REF);
bool isByRef = (varDsc->TypeGet() == TYP_BYREF);
bool isInReg = varDsc->lvIsInReg();
bool isInMemory = !isInReg || varDsc->IsAlwaysAliveInMemory();
if (isInReg)
{
// TODO-Cleanup: Move the code from compUpdateLifeVar to genUpdateRegLife that updates the
// gc sets
regMaskTP regMask = varDsc->lvRegMask();
if (isGCRef)
{
codeGen->gcInfo.gcRegGCrefSetCur &= ~regMask;
}
else if (isByRef)
{
codeGen->gcInfo.gcRegByrefSetCur &= ~regMask;
}
codeGen->genUpdateRegLife(varDsc, false /*isBorn*/, true /*isDying*/ DEBUGARG(nullptr));
}
// Update the gcVarPtrSetCur if it is in memory.
if (isInMemory && (isGCRef || isByRef))
{
VarSetOps::RemoveElemD(this, codeGen->gcInfo.gcVarPtrSetCur, deadVarIndex);
JITDUMP("\t\t\t\t\t\t\tV%02u becoming dead\n", varNum);
}
#ifdef USING_VARIABLE_LIVE_RANGE
codeGen->getVariableLiveKeeper()->siEndVariableLiveRange(varNum);
#endif // USING_VARIABLE_LIVE_RANGE
}
VarSetOps::Iter bornIter(this, bornSet);
unsigned bornVarIndex = 0;
while (bornIter.NextElem(&bornVarIndex))
{
unsigned varNum = lvaTrackedIndexToLclNum(bornVarIndex);
LclVarDsc* varDsc = lvaGetDesc(varNum);
bool isGCRef = (varDsc->TypeGet() == TYP_REF);
bool isByRef = (varDsc->TypeGet() == TYP_BYREF);
if (varDsc->lvIsInReg())
{
// If this variable is going live in a register, it is no longer live on the stack,
// unless it is an EH/"spill at single-def" var, which always remains live on the stack.
if (!varDsc->IsAlwaysAliveInMemory())
{
#ifdef DEBUG
if (VarSetOps::IsMember(this, codeGen->gcInfo.gcVarPtrSetCur, bornVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tRemoving V%02u from gcVarPtrSetCur\n", varNum);
}
#endif // DEBUG
VarSetOps::RemoveElemD(this, codeGen->gcInfo.gcVarPtrSetCur, bornVarIndex);
}
codeGen->genUpdateRegLife(varDsc, true /*isBorn*/, false /*isDying*/ DEBUGARG(nullptr));
regMaskTP regMask = varDsc->lvRegMask();
if (isGCRef)
{
codeGen->gcInfo.gcRegGCrefSetCur |= regMask;
}
else if (isByRef)
{
codeGen->gcInfo.gcRegByrefSetCur |= regMask;
}
}
else if (lvaIsGCTracked(varDsc))
{
// This isn't in a register, so update the gcVarPtrSetCur to show that it's live on the stack.
VarSetOps::AddElemD(this, codeGen->gcInfo.gcVarPtrSetCur, bornVarIndex);
JITDUMP("\t\t\t\t\t\t\tV%02u becoming live\n", varNum);
}
#ifdef USING_VARIABLE_LIVE_RANGE
codeGen->getVariableLiveKeeper()->siStartVariableLiveRange(varDsc, varNum);
#endif // USING_VARIABLE_LIVE_RANGE
}
#ifdef USING_SCOPE_INFO
codeGen->siUpdate();
#endif // USING_SCOPE_INFO
}
// Need an explicit instantiation.
template void Compiler::compChangeLife<true>(VARSET_VALARG_TP newLife);
/*****************************************************************************
*
* Generate a spill.
*/
void CodeGenInterface::spillReg(var_types type, TempDsc* tmp, regNumber reg)
{
GetEmitter()->emitIns_S_R(ins_Store(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
}
/*****************************************************************************
*
* Generate a reload.
*/
void CodeGenInterface::reloadReg(var_types type, TempDsc* tmp, regNumber reg)
{
GetEmitter()->emitIns_R_S(ins_Load(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
}
// inline
regNumber CodeGenInterface::genGetThisArgReg(GenTreeCall* call) const
{
return REG_ARG_0;
}
//----------------------------------------------------------------------
// getSpillTempDsc: get the TempDsc corresponding to a spilled tree.
//
// Arguments:
// tree - spilled GenTree node
//
// Return Value:
// TempDsc corresponding to tree
TempDsc* CodeGenInterface::getSpillTempDsc(GenTree* tree)
{
// tree must be in spilled state.
assert((tree->gtFlags & GTF_SPILLED) != 0);
// Get the tree's SpillDsc.
RegSet::SpillDsc* prevDsc;
RegSet::SpillDsc* spillDsc = regSet.rsGetSpillInfo(tree, tree->GetRegNum(), &prevDsc);
assert(spillDsc != nullptr);
// Get the temp desc.
TempDsc* temp = regSet.rsGetSpillTempWord(tree->GetRegNum(), spillDsc, prevDsc);
return temp;
}
/*****************************************************************************
*
* The following can be used to create basic blocks that serve as labels for
* the emitter. Use with caution - these are not real basic blocks!
*
*/
// inline
BasicBlock* CodeGen::genCreateTempLabel()
{
#ifdef DEBUG
// These blocks don't affect FP
compiler->fgSafeBasicBlockCreation = true;
#endif
BasicBlock* block = compiler->bbNewBasicBlock(BBJ_NONE);
#ifdef DEBUG
compiler->fgSafeBasicBlockCreation = false;
#endif
JITDUMP("Mark " FMT_BB " as label: codegen temp block\n", block->bbNum);
block->bbFlags |= BBF_HAS_LABEL;
// Use coldness of current block, as this label will
// be contained in it.
block->bbFlags |= (compiler->compCurBB->bbFlags & BBF_COLD);
#ifdef DEBUG
#ifdef UNIX_X86_ABI
block->bbTgtStkDepth = (genStackLevel - curNestedAlignment) / sizeof(int);
#else
block->bbTgtStkDepth = genStackLevel / sizeof(int);
#endif
#endif
return block;
}
void CodeGen::genLogLabel(BasicBlock* bb)
{
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n L_M%03u_" FMT_BB ":\n", compiler->compMethodID, bb->bbNum);
}
#endif
}
// genDefineTempLabel: Define a label based on the current GC info tracked by
// the code generator.
//
// Arguments:
// label - A label represented as a basic block. These are created with
// genCreateTempLabel and are not normal basic blocks.
//
// Notes:
// The label will be defined with the current GC info tracked by the code
// generator. When the emitter sees this label it will thus remove any temporary
// GC refs it is tracking in registers. For example, a call might produce a ref
// in RAX which the emitter would track but which would not be tracked in
// codegen's GC info since codegen would immediately copy it from RAX into its
// home.
//
void CodeGen::genDefineTempLabel(BasicBlock* label)
{
genLogLabel(label);
label->bbEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(label));
}
// genDefineInlineTempLabel: Define an inline label that does not affect the GC
// info.
//
// Arguments:
// label - A label represented as a basic block. These are created with
// genCreateTempLabel and are not normal basic blocks.
//
// Notes:
// The emitter will continue to track GC info as if there was no label.
//
void CodeGen::genDefineInlineTempLabel(BasicBlock* label)
{
genLogLabel(label);
label->bbEmitCookie = GetEmitter()->emitAddInlineLabel();
}
//------------------------------------------------------------------------
// genAdjustStackLevel: Adjust the stack level, if required, for a throw helper block
//
// Arguments:
// block - The BasicBlock for which we are about to generate code.
//
// Assumptions:
// Must be called just prior to generating code for 'block'.
//
// Notes:
// This only makes an adjustment if !FEATURE_FIXED_OUT_ARGS, if there is no frame pointer,
// and if 'block' is a throw helper block with a non-zero stack level.
void CodeGen::genAdjustStackLevel(BasicBlock* block)
{
#if !FEATURE_FIXED_OUT_ARGS
// Check for inserted throw blocks and adjust genStackLevel.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(UNIX_X86_ABI)
if (isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
{
// x86/Linux requires stack frames to be 16-byte aligned, but SP may be unaligned
// at this point if a jump to this block is made in the middle of pushing arugments.
//
// Here we restore SP to prevent potential stack alignment issues.
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -genSPtoFPdelta());
}
#endif
if (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
{
noway_assert(block->bbFlags & BBF_HAS_LABEL);
SetStackLevel(compiler->fgThrowHlpBlkStkLevel(block) * sizeof(int));
if (genStackLevel != 0)
{
#ifdef TARGET_X86
GetEmitter()->emitMarkStackLvl(genStackLevel);
inst_RV_IV(INS_add, REG_SPBASE, genStackLevel, EA_PTRSIZE);
SetStackLevel(0);
#else // TARGET_X86
NYI("Need emitMarkStackLvl()");
#endif // TARGET_X86
}
}
#endif // !FEATURE_FIXED_OUT_ARGS
}
/*****************************************************************************
*
* Take an address expression and try to find the best set of components to
* form an address mode; returns non-zero if this is successful.
*
* TODO-Cleanup: The RyuJIT backend never uses this to actually generate code.
* Refactor this code so that the underlying analysis can be used in
* the RyuJIT Backend to do lowering, instead of having to call this method with the
* option to not generate the code.
*
* 'fold' specifies if it is OK to fold the array index which hangs off
* a GT_NOP node.
*
* If successful, the parameters will be set to the following values:
*
* *rv1Ptr ... base operand
* *rv2Ptr ... optional operand
* *revPtr ... true if rv2 is before rv1 in the evaluation order
* *mulPtr ... optional multiplier (2/4/8) for rv2
* Note that for [reg1 + reg2] and [reg1 + reg2 + icon], *mulPtr == 0.
* *cnsPtr ... integer constant [optional]
*
* IMPORTANT NOTE: This routine doesn't generate any code, it merely
* identifies the components that might be used to
* form an address mode later on.
*/
bool CodeGen::genCreateAddrMode(
GenTree* addr, bool fold, bool* revPtr, GenTree** rv1Ptr, GenTree** rv2Ptr, unsigned* mulPtr, ssize_t* cnsPtr)
{
/*
The following indirections are valid address modes on x86/x64:
[ icon] * not handled here
[reg ]
[reg + icon]
[reg1 + reg2 ]
[reg1 + reg2 + icon]
[reg1 + 2 * reg2 ]
[reg1 + 4 * reg2 ]
[reg1 + 8 * reg2 ]
[ 2 * reg2 + icon]
[ 4 * reg2 + icon]
[ 8 * reg2 + icon]
[reg1 + 2 * reg2 + icon]
[reg1 + 4 * reg2 + icon]
[reg1 + 8 * reg2 + icon]
The following indirections are valid address modes on arm64:
[reg]
[reg + icon]
[reg1 + reg2]
[reg1 + reg2 * natural-scale]
*/
/* All indirect address modes require the address to be an addition */
if (addr->gtOper != GT_ADD)
{
return false;
}
// Can't use indirect addressing mode as we need to check for overflow.
// Also, can't use 'lea' as it doesn't set the flags.
if (addr->gtOverflow())
{
return false;
}
GenTree* rv1 = nullptr;
GenTree* rv2 = nullptr;
GenTree* op1;
GenTree* op2;
ssize_t cns;
unsigned mul;
GenTree* tmp;
/* What order are the sub-operands to be evaluated */
if (addr->gtFlags & GTF_REVERSE_OPS)
{
op1 = addr->AsOp()->gtOp2;
op2 = addr->AsOp()->gtOp1;
}
else
{
op1 = addr->AsOp()->gtOp1;
op2 = addr->AsOp()->gtOp2;
}
bool rev = false; // Is op2 first in the evaluation order?
/*
A complex address mode can combine the following operands:
op1 ... base address
op2 ... optional scaled index
mul ... optional multiplier (2/4/8) for op2
cns ... optional displacement
Here we try to find such a set of operands and arrange for these
to sit in registers.
*/
cns = 0;
mul = 0;
AGAIN:
/* We come back to 'AGAIN' if we have an add of a constant, and we are folding that
constant, or we have gone through a GT_NOP or GT_COMMA node. We never come back
here if we find a scaled index.
*/
CLANG_FORMAT_COMMENT_ANCHOR;
assert(mul == 0);
/* Special case: keep constants as 'op2', but don't do this for constant handles
because they don't fit I32 that we're going to check for below anyway. */
if (op1->IsCnsIntOrI() && !op1->IsIconHandle())
{
// Presumably op2 is assumed to not be a constant (shouldn't happen if we've done constant folding)?
tmp = op1;
op1 = op2;
op2 = tmp;
}
/* Check for an addition of a constant */
if (op2->IsIntCnsFitsInI32() && (op2->gtType != TYP_REF) && FitsIn<INT32>(cns + op2->AsIntConCommon()->IconValue()))
{
// We should not be building address modes out of non-foldable constants
assert(op2->AsIntConCommon()->ImmedValCanBeFolded(compiler, addr->OperGet()));
/* We're adding a constant */
cns += op2->AsIntConCommon()->IconValue();
#if defined(TARGET_ARMARCH)
if (cns == 0)
#endif
{
/* Inspect the operand the constant is being added to */
switch (op1->gtOper)
{
case GT_ADD:
if (op1->gtOverflow())
{
break;
}
op2 = op1->AsOp()->gtOp2;
op1 = op1->AsOp()->gtOp1;
goto AGAIN;
#if !defined(TARGET_ARMARCH)
// TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index.
case GT_MUL:
if (op1->gtOverflow())
{
return false; // Need overflow check
}
FALLTHROUGH;
case GT_LSH:
mul = op1->GetScaledIndex();
if (mul)
{
/* We can use "[mul*rv2 + icon]" */
rv1 = nullptr;
rv2 = op1->AsOp()->gtOp1;
goto FOUND_AM;
}
break;
#endif // !defined(TARGET_ARMARCH)
default:
break;
}
}
/* The best we can do is "[rv1 + icon]" */
rv1 = op1;
rv2 = nullptr;
goto FOUND_AM;
}
// op2 is not a constant. So keep on trying.
/* Neither op1 nor op2 are sitting in a register right now */
switch (op1->gtOper)
{
#if !defined(TARGET_ARMARCH)
// TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index.
case GT_ADD:
if (op1->gtOverflow())
{
break;
}
if (op1->AsOp()->gtOp2->IsIntCnsFitsInI32() &&
FitsIn<INT32>(cns + op1->AsOp()->gtOp2->AsIntCon()->gtIconVal))
{
cns += op1->AsOp()->gtOp2->AsIntCon()->gtIconVal;
op1 = op1->AsOp()->gtOp1;
goto AGAIN;
}
break;
case GT_MUL:
if (op1->gtOverflow())
{
break;
}
FALLTHROUGH;
case GT_LSH:
mul = op1->GetScaledIndex();
if (mul)
{
/* 'op1' is a scaled value */
rv1 = op2;
rv2 = op1->AsOp()->gtOp1;
int argScale;
while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
{
if (jitIsScaleIndexMul(argScale * mul))
{
mul = mul * argScale;
rv2 = rv2->AsOp()->gtOp1;
}
else
{
break;
}
}
noway_assert(rev == false);
rev = true;
goto FOUND_AM;
}
break;
#endif // !TARGET_ARMARCH
case GT_NOP:
op1 = op1->AsOp()->gtOp1;
goto AGAIN;
case GT_COMMA:
op1 = op1->AsOp()->gtOp2;
goto AGAIN;
default:
break;
}
noway_assert(op2);
switch (op2->gtOper)
{
#if !defined(TARGET_ARMARCH)
// TODO-ARM64-CQ, TODO-ARM-CQ: For now we only handle MUL and LSH because
// arm doesn't support both scale and offset at the same. Offset is handled
// at the emitter as a peephole optimization.
case GT_ADD:
if (op2->gtOverflow())
{
break;
}
if (op2->AsOp()->gtOp2->IsIntCnsFitsInI32() &&
FitsIn<INT32>(cns + op2->AsOp()->gtOp2->AsIntCon()->gtIconVal))
{
cns += op2->AsOp()->gtOp2->AsIntCon()->gtIconVal;
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
}
break;
case GT_MUL:
if (op2->gtOverflow())
{
break;
}
FALLTHROUGH;
case GT_LSH:
mul = op2->GetScaledIndex();
if (mul)
{
// 'op2' is a scaled value...is it's argument also scaled?
int argScale;
rv2 = op2->AsOp()->gtOp1;
while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
{
if (jitIsScaleIndexMul(argScale * mul))
{
mul = mul * argScale;
rv2 = rv2->AsOp()->gtOp1;
}
else
{
break;
}
}
rv1 = op1;
goto FOUND_AM;
}
break;
#endif // TARGET_ARMARCH
case GT_NOP:
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
case GT_COMMA:
op2 = op2->AsOp()->gtOp2;
goto AGAIN;
default:
break;
}
/* The best we can do "[rv1 + rv2]" or "[rv1 + rv2 + cns]" */
rv1 = op1;
rv2 = op2;
#ifdef TARGET_ARM64
assert(cns == 0);
#endif
FOUND_AM:
if (rv2)
{
/* Make sure a GC address doesn't end up in 'rv2' */
if (varTypeIsGC(rv2->TypeGet()))
{
noway_assert(rv1 && !varTypeIsGC(rv1->TypeGet()));
tmp = rv1;
rv1 = rv2;
rv2 = tmp;
rev = !rev;
}
/* Special case: constant array index (that is range-checked) */
if (fold)
{
ssize_t tmpMul;
GenTree* index;
if ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (rv2->AsOp()->gtOp2->IsCnsIntOrI()))
{
/* For valuetype arrays where we can't use the scaled address
mode, rv2 will point to the scaled index. So we have to do
more work */
tmpMul = compiler->optGetArrayRefScaleAndIndex(rv2, &index DEBUGARG(false));
if (mul)
{
tmpMul *= mul;
}
}
else
{
/* May be a simple array. rv2 will points to the actual index */
index = rv2;
tmpMul = mul;
}
/* Get hold of the array index and see if it's a constant */
if (index->IsIntCnsFitsInI32())
{
/* Get hold of the index value */
ssize_t ixv = index->AsIntConCommon()->IconValue();
/* Scale the index if necessary */
if (tmpMul)
{
ixv *= tmpMul;
}
if (FitsIn<INT32>(cns + ixv))
{
/* Add the scaled index to the offset value */
cns += ixv;
/* There is no scaled operand any more */
mul = 0;
rv2 = nullptr;
}
}
}
}
// We shouldn't have [rv2*1 + cns] - this is equivalent to [rv1 + cns]
noway_assert(rv1 || mul != 1);
noway_assert(FitsIn<INT32>(cns));
if (rv1 == nullptr && rv2 == nullptr)
{
return false;
}
/* Success - return the various components to the caller */
*revPtr = rev;
*rv1Ptr = rv1;
*rv2Ptr = rv2;
*mulPtr = mul;
*cnsPtr = cns;
return true;
}
/*****************************************************************************
*
* Generate an exit sequence for a return from a method (note: when compiling
* for speed there might be multiple exit points).
*/
void CodeGen::genExitCode(BasicBlock* block)
{
/* Just wrote the first instruction of the epilog - inform debugger
Note that this may result in a duplicate IPmapping entry, and
that this is ok */
// For non-optimized debuggable code, there is only one epilog.
genIPmappingAdd(IPmappingDscKind::Epilog, DebugInfo(), true);
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
if (compiler->getNeedsGSSecurityCookie())
{
genEmitGSCookieCheck(jmpEpilog);
if (jmpEpilog)
{
// Dev10 642944 -
// The GS cookie check created a temp label that has no live
// incoming GC registers, we need to fix that
unsigned varNum;
LclVarDsc* varDsc;
/* Figure out which register parameters hold pointers */
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount && varDsc->lvIsRegArg;
varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
gcInfo.gcMarkRegPtrVal(varDsc->GetArgReg(), varDsc->TypeGet());
}
GetEmitter()->emitThisGCrefRegs = GetEmitter()->emitInitGCrefRegs = gcInfo.gcRegGCrefSetCur;
GetEmitter()->emitThisByrefRegs = GetEmitter()->emitInitByrefRegs = gcInfo.gcRegByrefSetCur;
}
}
genReserveEpilog(block);
}
//------------------------------------------------------------------------
// genJumpToThrowHlpBlk: Generate code for an out-of-line exception.
//
// Notes:
// For code that uses throw helper blocks, we share the helper blocks created by fgAddCodeRef().
// Otherwise, we generate the 'throw' inline.
//
// Arguments:
// jumpKind - jump kind to generate;
// codeKind - the special throw-helper kind;
// failBlk - optional fail target block, if it is already known;
//
void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, BasicBlock* failBlk)
{
bool useThrowHlpBlk = compiler->fgUseThrowHelperBlocks();
#if defined(UNIX_X86_ABI) && defined(FEATURE_EH_FUNCLETS)
// Inline exception-throwing code in funclet to make it possible to unwind funclet frames.
useThrowHlpBlk = useThrowHlpBlk && (compiler->funCurrentFunc()->funKind == FUNC_ROOT);
#endif // UNIX_X86_ABI && FEATURE_EH_FUNCLETS
if (useThrowHlpBlk)
{
// For code with throw helper blocks, find and use the helper block for
// raising the exception. The block may be shared by other trees too.
BasicBlock* excpRaisingBlock;
if (failBlk != nullptr)
{
// We already know which block to jump to. Use that.
excpRaisingBlock = failBlk;
#ifdef DEBUG
Compiler::AddCodeDsc* add =
compiler->fgFindExcptnTarget(codeKind, compiler->bbThrowIndex(compiler->compCurBB));
assert(excpRaisingBlock == add->acdDstBlk);
#if !FEATURE_FIXED_OUT_ARGS
assert(add->acdStkLvlInit || isFramePointerUsed());
#endif // !FEATURE_FIXED_OUT_ARGS
#endif // DEBUG
}
else
{
// Find the helper-block which raises the exception.
Compiler::AddCodeDsc* add =
compiler->fgFindExcptnTarget(codeKind, compiler->bbThrowIndex(compiler->compCurBB));
PREFIX_ASSUME_MSG((add != nullptr), ("ERROR: failed to find exception throw block"));
excpRaisingBlock = add->acdDstBlk;
#if !FEATURE_FIXED_OUT_ARGS
assert(add->acdStkLvlInit || isFramePointerUsed());
#endif // !FEATURE_FIXED_OUT_ARGS
}
noway_assert(excpRaisingBlock != nullptr);
// Jump to the exception-throwing block on error.
inst_JMP(jumpKind, excpRaisingBlock);
}
else
{
// The code to throw the exception will be generated inline, and
// we will jump around it in the normal non-exception case.
BasicBlock* tgtBlk = nullptr;
emitJumpKind reverseJumpKind = emitter::emitReverseJumpKind(jumpKind);
if (reverseJumpKind != jumpKind)
{
tgtBlk = genCreateTempLabel();
inst_JMP(reverseJumpKind, tgtBlk);
}
genEmitHelperCall(compiler->acdHelper(codeKind), 0, EA_UNKNOWN);
// Define the spot for the normal non-exception case to jump to.
if (tgtBlk != nullptr)
{
assert(reverseJumpKind != jumpKind);
genDefineTempLabel(tgtBlk);
}
}
}
/*****************************************************************************
*
* The last operation done was generating code for "tree" and that would
* have set the flags. Check if the operation caused an overflow.
*/
// inline
void CodeGen::genCheckOverflow(GenTree* tree)
{
// Overflow-check should be asked for this tree
noway_assert(tree->gtOverflow());
const var_types type = tree->TypeGet();
// Overflow checks can only occur for the non-small types: (i.e. TYP_INT,TYP_LONG)
noway_assert(!varTypeIsSmall(type));
emitJumpKind jumpKind;
#ifdef TARGET_ARM64
if (tree->OperGet() == GT_MUL)
{
jumpKind = EJ_ne;
}
else
#endif
{
bool isUnsignedOverflow = ((tree->gtFlags & GTF_UNSIGNED) != 0);
#if defined(TARGET_XARCH)
jumpKind = isUnsignedOverflow ? EJ_jb : EJ_jo;
#elif defined(TARGET_ARMARCH)
jumpKind = isUnsignedOverflow ? EJ_lo : EJ_vs;
if (jumpKind == EJ_lo)
{
if (tree->OperGet() != GT_SUB)
{
jumpKind = EJ_hs;
}
}
#endif // defined(TARGET_ARMARCH)
}
// Jump to the block which will throw the expection
genJumpToThrowHlpBlk(jumpKind, SCK_OVERFLOW);
}
#if defined(FEATURE_EH_FUNCLETS)
/*****************************************************************************
*
* Update the current funclet as needed by calling genUpdateCurrentFunclet().
* For non-BBF_FUNCLET_BEG blocks, it asserts that the current funclet
* is up-to-date.
*
*/
void CodeGen::genUpdateCurrentFunclet(BasicBlock* block)
{
if (block->bbFlags & BBF_FUNCLET_BEG)
{
compiler->funSetCurrentFunc(compiler->funGetFuncIdx(block));
if (compiler->funCurrentFunc()->funKind == FUNC_FILTER)
{
assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->ebdFilter == block);
}
else
{
// We shouldn't see FUNC_ROOT
assert(compiler->funCurrentFunc()->funKind == FUNC_HANDLER);
assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->ebdHndBeg == block);
}
}
else
{
assert(compiler->compCurrFuncIdx <= compiler->compFuncInfoCount);
if (compiler->funCurrentFunc()->funKind == FUNC_FILTER)
{
assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->InFilterRegionBBRange(block));
}
else if (compiler->funCurrentFunc()->funKind == FUNC_ROOT)
{
assert(!block->hasHndIndex());
}
else
{
assert(compiler->funCurrentFunc()->funKind == FUNC_HANDLER);
assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->InHndRegionBBRange(block));
}
}
}
#endif // FEATURE_EH_FUNCLETS
//----------------------------------------------------------------------
// genGenerateCode: Generate code for the function.
//
// Arguments:
// codePtr [OUT] - address of generated code
// nativeSizeOfCode [OUT] - length of generated code in bytes
//
void CodeGen::genGenerateCode(void** codePtr, uint32_t* nativeSizeOfCode)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genGenerateCode()\n");
compiler->fgDispBasicBlocks(compiler->verboseTrees);
}
#endif
this->codePtr = codePtr;
this->nativeSizeOfCode = nativeSizeOfCode;
DoPhase(this, PHASE_GENERATE_CODE, &CodeGen::genGenerateMachineCode);
DoPhase(this, PHASE_EMIT_CODE, &CodeGen::genEmitMachineCode);
DoPhase(this, PHASE_EMIT_GCEH, &CodeGen::genEmitUnwindDebugGCandEH);
}
//----------------------------------------------------------------------
// genGenerateMachineCode -- determine which machine instructions to emit
//
void CodeGen::genGenerateMachineCode()
{
#ifdef DEBUG
genInterruptibleUsed = true;
compiler->fgDebugCheckBBlist();
#endif // DEBUG
/* This is the real thing */
genPrepForCompiler();
/* Prepare the emitter */
GetEmitter()->Init();
#ifdef DEBUG
VarSetOps::AssignNoCopy(compiler, genTempOldLife, VarSetOps::MakeEmpty(compiler));
#endif
#ifdef DEBUG
if (compiler->opts.disAsmSpilled && regSet.rsNeededSpillReg)
{
compiler->opts.disAsm = true;
}
if (compiler->opts.disAsm)
{
printf("; Assembly listing for method %s\n", compiler->info.compFullName);
printf("; Emitting ");
if (compiler->compCodeOpt() == Compiler::SMALL_CODE)
{
printf("SMALL_CODE");
}
else if (compiler->compCodeOpt() == Compiler::FAST_CODE)
{
printf("FAST_CODE");
}
else
{
printf("BLENDED_CODE");
}
printf(" for ");
if (compiler->info.genCPU == CPU_X86)
{
printf("generic X86 CPU");
}
else if (compiler->info.genCPU == CPU_X86_PENTIUM_4)
{
printf("Pentium 4");
}
else if (compiler->info.genCPU == CPU_X64)
{
if (compiler->canUseVexEncoding())
{
printf("X64 CPU with AVX");
}
else
{
printf("X64 CPU with SSE2");
}
}
else if (compiler->info.genCPU == CPU_ARM)
{
printf("generic ARM CPU");
}
else if (compiler->info.genCPU == CPU_ARM64)
{
printf("generic ARM64 CPU");
}
else
{
printf("unknown architecture");
}
if (TargetOS::IsWindows)
{
printf(" - Windows");
}
else if (TargetOS::IsMacOS)
{
printf(" - MacOS");
}
else if (TargetOS::IsUnix)
{
printf(" - Unix");
}
printf("\n");
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0))
{
printf("; Tier-0 compilation\n");
}
else if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1))
{
printf("; Tier-1 compilation\n");
}
else if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN))
{
printf("; ReadyToRun compilation\n");
}
if (compiler->opts.IsOSR())
{
printf("; OSR variant for entry point 0x%x\n", compiler->info.compILEntry);
}
if ((compiler->opts.compFlags & CLFLG_MAXOPT) == CLFLG_MAXOPT)
{
printf("; optimized code\n");
}
else if (compiler->opts.compDbgCode)
{
printf("; debuggable code\n");
}
else if (compiler->opts.MinOpts())
{
printf("; MinOpts code\n");
}
else
{
printf("; unknown optimization flags\n");
}
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR))
{
printf("; instrumented for collecting profile data\n");
}
else if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && compiler->fgHaveProfileData())
{
printf("; optimized using profile data\n");
}
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
printf("; double-aligned frame\n");
else
#endif
printf("; %s based frame\n", isFramePointerUsed() ? STR_FPBASE : STR_SPBASE);
if (GetInterruptible())
{
printf("; fully interruptible\n");
}
else
{
printf("; partially interruptible\n");
}
if (compiler->fgHaveProfileData())
{
printf("; with PGO: edge weights are %s, and fgCalledCount is " FMT_WT "\n",
compiler->fgHaveValidEdgeWeights ? "valid" : "invalid", compiler->fgCalledCount);
}
if (compiler->fgPgoFailReason != nullptr)
{
printf("; %s\n", compiler->fgPgoFailReason);
}
if ((compiler->fgPgoInlineePgo + compiler->fgPgoInlineeNoPgo + compiler->fgPgoInlineeNoPgoSingleBlock) > 0)
{
printf("; %u inlinees with PGO data; %u single block inlinees; %u inlinees without PGO data\n",
compiler->fgPgoInlineePgo, compiler->fgPgoInlineeNoPgoSingleBlock, compiler->fgPgoInlineeNoPgo);
}
if (compiler->opts.IsCFGEnabled())
{
printf("; control-flow guard enabled\n");
}
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT))
{
printf("; invoked as altjit\n");
}
}
#endif // DEBUG
// We compute the final frame layout before code generation. This is because LSRA
// has already computed exactly the maximum concurrent number of spill temps of each type that are
// required during code generation. So, there is nothing left to estimate: we can be precise in the frame
// layout. This helps us generate smaller code, and allocate, after code generation, a smaller amount of
// memory from the VM.
genFinalizeFrame();
unsigned maxTmpSize = regSet.tmpGetTotalSize(); // This is precise after LSRA has pre-allocated the temps.
GetEmitter()->emitBegFN(isFramePointerUsed()
#if defined(DEBUG)
,
(compiler->compCodeOpt() != Compiler::SMALL_CODE) &&
!compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)
#endif
,
maxTmpSize);
/* Now generate code for the function */
genCodeForBBlist();
#ifdef DEBUG
// After code generation, dump the frame layout again. It should be the same as before code generation, if code
// generation hasn't touched it (it shouldn't!).
if (verbose)
{
compiler->lvaTableDump();
}
#endif // DEBUG
/* We can now generate the function prolog and epilog */
genGeneratePrologsAndEpilogs();
/* Bind jump distances */
GetEmitter()->emitJumpDistBind();
#if FEATURE_LOOP_ALIGN
/* Perform alignment adjustments */
GetEmitter()->emitLoopAlignAdjustments();
#endif
/* The code is now complete and final; it should not change after this. */
}
//----------------------------------------------------------------------
// genEmitMachineCode -- emit the actual machine instruction code
//
void CodeGen::genEmitMachineCode()
{
/* Compute the size of the code sections that we are going to ask the VM
to allocate. Note that this might not be precisely the size of the
code we emit, though it's fatal if we emit more code than the size we
compute here.
(Note: an example of a case where we emit less code would be useful.)
*/
GetEmitter()->emitComputeCodeSizes();
#ifdef DEBUG
unsigned instrCount;
// Code to test or stress our ability to run a fallback compile.
// We trigger the fallback here, before asking the VM for any memory,
// because if not, we will leak mem, as the current codebase can't free
// the mem after the emitter asks the VM for it. As this is only a stress
// mode, we only want the functionality, and don't care about the relative
// ugliness of having the failure here.
if (!compiler->jitFallbackCompile)
{
// Use COMPlus_JitNoForceFallback=1 to prevent NOWAY assert testing from happening,
// especially that caused by enabling JIT stress.
if (!JitConfig.JitNoForceFallback())
{
if (JitConfig.JitForceFallback() || compiler->compStressCompile(Compiler::STRESS_GENERIC_VARN, 5))
{
JITDUMP("\n\n*** forcing no-way fallback -- current jit request will be abandoned ***\n\n");
NO_WAY_NOASSERT("Stress failure");
}
}
}
#endif // DEBUG
/* We've finished collecting all the unwind information for the function. Now reserve
space for it from the VM.
*/
compiler->unwindReserve();
bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ?
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
trackedStackPtrsContig = false;
#elif defined(TARGET_ARM)
// On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous
trackedStackPtrsContig = !compiler->opts.compDbgEnC && !compiler->compIsProfilerHookNeeded();
#else
trackedStackPtrsContig = !compiler->opts.compDbgEnC;
#endif
codeSize = GetEmitter()->emitEndCodeGen(compiler, trackedStackPtrsContig, GetInterruptible(),
IsFullPtrRegMapRequired(), compiler->compHndBBtabCount, &prologSize,
&epilogSize, codePtr, &coldCodePtr, &consPtr DEBUGARG(&instrCount));
#ifdef DEBUG
assert(compiler->compCodeGenDone == false);
/* We're done generating code for this function */
compiler->compCodeGenDone = true;
#endif
#if defined(DEBUG) || defined(LATE_DISASM)
// Add code size information into the Perf Score
// All compPerfScore calculations must be performed using doubles
compiler->info.compPerfScore += ((double)compiler->info.compTotalHotCodeSize * (double)PERFSCORE_CODESIZE_COST_HOT);
compiler->info.compPerfScore +=
((double)compiler->info.compTotalColdCodeSize * (double)PERFSCORE_CODESIZE_COST_COLD);
#endif // DEBUG || LATE_DISASM
#ifdef DEBUG
if (compiler->opts.disAsm || verbose)
{
printf("\n; Total bytes of code %d, prolog size %d, PerfScore %.2f, instruction count %d, allocated bytes for "
"code %d",
codeSize, prologSize, compiler->info.compPerfScore, instrCount,
GetEmitter()->emitTotalHotCodeSize + GetEmitter()->emitTotalColdCodeSize);
#if TRACK_LSRA_STATS
if (JitConfig.DisplayLsraStats() == 3)
{
compiler->m_pLinearScan->dumpLsraStatsSummary(jitstdout);
}
#endif // TRACK_LSRA_STATS
printf(" (MethodHash=%08x) for method %s\n", compiler->info.compMethodHash(), compiler->info.compFullName);
printf("; ============================================================\n\n");
printf(""); // in our logic this causes a flush
}
if (verbose)
{
printf("*************** After end code gen, before unwindEmit()\n");
GetEmitter()->emitDispIGlist(true);
}
#endif
#if EMIT_TRACK_STACK_DEPTH && defined(DEBUG_ARG_SLOTS)
// Check our max stack level. Needed for fgAddCodeRef().
// We need to relax the assert as our estimation won't include code-gen
// stack changes (which we know don't affect fgAddCodeRef()).
// NOTE: after emitEndCodeGen (including here), emitMaxStackDepth is a
// count of DWORD-sized arguments, NOT argument size in bytes.
{
unsigned maxAllowedStackDepth = compiler->fgGetPtrArgCntMax() + // Max number of pointer-sized stack arguments.
compiler->compHndBBtabCount + // Return address for locally-called finallys
genTypeStSz(TYP_LONG) + // longs/doubles may be transferred via stack, etc
(compiler->compTailCallUsed ? 4 : 0); // CORINFO_HELP_TAILCALL args
#if defined(UNIX_X86_ABI)
// Convert maxNestedAlignment to DWORD count before adding to maxAllowedStackDepth.
assert(maxNestedAlignment % sizeof(int) == 0);
maxAllowedStackDepth += maxNestedAlignment / sizeof(int);
#endif
assert(GetEmitter()->emitMaxStackDepth <= maxAllowedStackDepth);
}
#endif // EMIT_TRACK_STACK_DEPTH && DEBUG
*nativeSizeOfCode = codeSize;
compiler->info.compNativeCodeSize = (UNATIVE_OFFSET)codeSize;
// printf("%6u bytes of code generated for %s.%s\n", codeSize, compiler->info.compFullName);
// Make sure that the x86 alignment and cache prefetch optimization rules
// were obeyed.
// Don't start a method in the last 7 bytes of a 16-byte alignment area
// unless we are generating SMALL_CODE
// noway_assert( (((unsigned)(*codePtr) % 16) <= 8) || (compiler->compCodeOpt() == SMALL_CODE));
}
//----------------------------------------------------------------------
// genEmitUnwindDebugGCandEH: emit unwind, debug, gc, and EH info
//
void CodeGen::genEmitUnwindDebugGCandEH()
{
/* Now that the code is issued, we can finalize and emit the unwind data */
compiler->unwindEmit(*codePtr, coldCodePtr);
/* Finalize the line # tracking logic after we know the exact block sizes/offsets */
genIPmappingGen();
INDEBUG(genDumpPreciseDebugInfo());
/* Finalize the Local Var info in terms of generated code */
genSetScopeInfo();
#if defined(USING_VARIABLE_LIVE_RANGE) && defined(DEBUG)
if (compiler->verbose)
{
varLiveKeeper->dumpLvaVariableLiveRanges();
}
#endif // defined(USING_VARIABLE_LIVE_RANGE) && defined(DEBUG)
#ifdef LATE_DISASM
unsigned finalHotCodeSize;
unsigned finalColdCodeSize;
if (compiler->fgFirstColdBlock != nullptr)
{
// We did some hot/cold splitting. The hot section is always padded out to the
// size we thought it would be, but the cold section is not.
assert(codeSize <= compiler->info.compTotalHotCodeSize + compiler->info.compTotalColdCodeSize);
assert(compiler->info.compTotalHotCodeSize > 0);
assert(compiler->info.compTotalColdCodeSize > 0);
finalHotCodeSize = compiler->info.compTotalHotCodeSize;
finalColdCodeSize = codeSize - finalHotCodeSize;
}
else
{
// No hot/cold splitting
assert(codeSize <= compiler->info.compTotalHotCodeSize);
assert(compiler->info.compTotalHotCodeSize > 0);
assert(compiler->info.compTotalColdCodeSize == 0);
finalHotCodeSize = codeSize;
finalColdCodeSize = 0;
}
getDisAssembler().disAsmCode((BYTE*)*codePtr, finalHotCodeSize, (BYTE*)coldCodePtr, finalColdCodeSize);
#endif // LATE_DISASM
/* Report any exception handlers to the VM */
genReportEH();
#ifdef JIT32_GCENCODER
#ifdef DEBUG
void* infoPtr =
#endif // DEBUG
#endif
// Create and store the GC info for this method.
genCreateAndStoreGCInfo(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
#ifdef DEBUG
FILE* dmpf = jitstdout;
compiler->opts.dmpHex = false;
if (!strcmp(compiler->info.compMethodName, "<name of method you want the hex dump for"))
{
FILE* codf;
errno_t ec = fopen_s(&codf, "C:\\JIT.COD", "at"); // NOTE: file append mode
if (ec != 0)
{
assert(codf);
dmpf = codf;
compiler->opts.dmpHex = true;
}
}
if (compiler->opts.dmpHex)
{
size_t consSize = GetEmitter()->emitDataSize();
fprintf(dmpf, "Generated code for %s:\n", compiler->info.compFullName);
fprintf(dmpf, "\n");
if (codeSize)
{
fprintf(dmpf, " Code at %p [%04X bytes]\n", dspPtr(*codePtr), codeSize);
}
if (consSize)
{
fprintf(dmpf, " Const at %p [%04X bytes]\n", dspPtr(consPtr), consSize);
}
#ifdef JIT32_GCENCODER
size_t infoSize = compiler->compInfoBlkSize;
if (infoSize)
fprintf(dmpf, " Info at %p [%04X bytes]\n", dspPtr(infoPtr), infoSize);
#endif // JIT32_GCENCODER
fprintf(dmpf, "\n");
if (codeSize)
{
hexDump(dmpf, "Code", (BYTE*)*codePtr, codeSize);
}
if (consSize)
{
hexDump(dmpf, "Const", (BYTE*)consPtr, consSize);
}
#ifdef JIT32_GCENCODER
if (infoSize)
hexDump(dmpf, "Info", (BYTE*)infoPtr, infoSize);
#endif // JIT32_GCENCODER
fflush(dmpf);
}
if (dmpf != jitstdout)
{
fclose(dmpf);
}
#endif // DEBUG
/* Tell the emitter that we're done with this function */
GetEmitter()->emitEndFN();
/* Shut down the spill logic */
regSet.rsSpillDone();
/* Shut down the temp logic */
regSet.tmpDone();
#if DISPLAY_SIZES
size_t dataSize = GetEmitter()->emitDataSize();
grossVMsize += compiler->info.compILCodeSize;
totalNCsize += codeSize + dataSize + compiler->compInfoBlkSize;
grossNCsize += codeSize + dataSize;
#endif // DISPLAY_SIZES
}
/*****************************************************************************
*
* Report EH clauses to the VM
*/
void CodeGen::genReportEH()
{
if (compiler->compHndBBtabCount == 0)
{
return;
}
#ifdef DEBUG
if (compiler->opts.dspEHTable)
{
printf("*************** EH table for %s\n", compiler->info.compFullName);
}
#endif // DEBUG
unsigned XTnum;
bool isCoreRTABI = compiler->IsTargetAbi(CORINFO_CORERT_ABI);
unsigned EHCount = compiler->compHndBBtabCount;
#if defined(FEATURE_EH_FUNCLETS)
// Count duplicated clauses. This uses the same logic as below, where we actually generate them for reporting to the
// VM.
unsigned duplicateClauseCount = 0;
unsigned enclosingTryIndex;
// Duplicate clauses are not used by CoreRT ABI
if (!isCoreRTABI)
{
for (XTnum = 0; XTnum < compiler->compHndBBtabCount; XTnum++)
{
for (enclosingTryIndex = compiler->ehTrueEnclosingTryIndexIL(XTnum); // find the true enclosing try index,
// ignoring 'mutual protect' trys
enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX;
enclosingTryIndex = compiler->ehGetEnclosingTryIndex(enclosingTryIndex))
{
++duplicateClauseCount;
}
}
EHCount += duplicateClauseCount;
}
#if FEATURE_EH_CALLFINALLY_THUNKS
unsigned clonedFinallyCount = 0;
// Duplicate clauses are not used by CoreRT ABI
if (!isCoreRTABI)
{
// We don't keep track of how many cloned finally there are. So, go through and count.
// We do a quick pass first through the EH table to see if there are any try/finally
// clauses. If there aren't, we don't need to look for BBJ_CALLFINALLY.
bool anyFinallys = false;
for (EHblkDsc* const HBtab : EHClauses(compiler))
{
if (HBtab->HasFinallyHandler())
{
anyFinallys = true;
break;
}
}
if (anyFinallys)
{
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
++clonedFinallyCount;
}
}
EHCount += clonedFinallyCount;
}
}
#endif // FEATURE_EH_CALLFINALLY_THUNKS
#endif // FEATURE_EH_FUNCLETS
#ifdef DEBUG
if (compiler->opts.dspEHTable)
{
#if defined(FEATURE_EH_FUNCLETS)
#if FEATURE_EH_CALLFINALLY_THUNKS
printf("%d EH table entries, %d duplicate clauses, %d cloned finallys, %d total EH entries reported to VM\n",
compiler->compHndBBtabCount, duplicateClauseCount, clonedFinallyCount, EHCount);
assert(compiler->compHndBBtabCount + duplicateClauseCount + clonedFinallyCount == EHCount);
#else // !FEATURE_EH_CALLFINALLY_THUNKS
printf("%d EH table entries, %d duplicate clauses, %d total EH entries reported to VM\n",
compiler->compHndBBtabCount, duplicateClauseCount, EHCount);
assert(compiler->compHndBBtabCount + duplicateClauseCount == EHCount);
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
#else // !FEATURE_EH_FUNCLETS
printf("%d EH table entries, %d total EH entries reported to VM\n", compiler->compHndBBtabCount, EHCount);
assert(compiler->compHndBBtabCount == EHCount);
#endif // !FEATURE_EH_FUNCLETS
}
#endif // DEBUG
// Tell the VM how many EH clauses to expect.
compiler->eeSetEHcount(EHCount);
XTnum = 0; // This is the index we pass to the VM
for (EHblkDsc* const HBtab : EHClauses(compiler))
{
UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
tryBeg = compiler->ehCodeOffset(HBtab->ebdTryBeg);
hndBeg = compiler->ehCodeOffset(HBtab->ebdHndBeg);
tryEnd = (HBtab->ebdTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(HBtab->ebdTryLast->bbNext);
hndEnd = (HBtab->ebdHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(HBtab->ebdHndLast->bbNext);
if (HBtab->HasFilter())
{
hndTyp = compiler->ehCodeOffset(HBtab->ebdFilter);
}
else
{
hndTyp = HBtab->ebdTyp;
}
CORINFO_EH_CLAUSE_FLAGS flags = ToCORINFO_EH_CLAUSE_FLAGS(HBtab->ebdHandlerType);
if (isCoreRTABI && (XTnum > 0))
{
// For CoreRT, CORINFO_EH_CLAUSE_SAMETRY flag means that the current clause covers same
// try block as the previous one. The runtime cannot reliably infer this information from
// native code offsets because of different try blocks can have same offsets. Alternative
// solution to this problem would be inserting extra nops to ensure that different try
// blocks have different offsets.
if (EHblkDsc::ebdIsSameTry(HBtab, HBtab - 1))
{
// The SAMETRY bit should only be set on catch clauses. This is ensured in IL, where only 'catch' is
// allowed to be mutually-protect. E.g., the C# "try {} catch {} catch {} finally {}" actually exists in
// IL as "try { try {} catch {} catch {} } finally {}".
assert(HBtab->HasCatchHandler());
flags = (CORINFO_EH_CLAUSE_FLAGS)(flags | CORINFO_EH_CLAUSE_SAMETRY);
}
}
// Note that we reuse the CORINFO_EH_CLAUSE type, even though the names of
// the fields aren't accurate.
CORINFO_EH_CLAUSE clause;
clause.ClassToken = hndTyp; /* filter offset is passed back here for filter-based exception handlers */
clause.Flags = flags;
clause.TryOffset = tryBeg;
clause.TryLength = tryEnd;
clause.HandlerOffset = hndBeg;
clause.HandlerLength = hndEnd;
assert(XTnum < EHCount);
// Tell the VM about this EH clause.
compiler->eeSetEHinfo(XTnum, &clause);
++XTnum;
}
#if defined(FEATURE_EH_FUNCLETS)
// Now output duplicated clauses.
//
// If a funclet has been created by moving a handler out of a try region that it was originally nested
// within, then we need to report a "duplicate" clause representing the fact that an exception in that
// handler can be caught by the 'try' it has been moved out of. This is because the original 'try' region
// descriptor can only specify a single, contiguous protected range, but the funclet we've moved out is
// no longer contiguous with the original 'try' region. The new EH descriptor will have the same handler
// region as the enclosing try region's handler region. This is the sense in which it is duplicated:
// there is now a "duplicate" clause with the same handler region as another, but a different 'try'
// region.
//
// For example, consider this (capital letters represent an unknown code sequence, numbers identify a
// try or handler region):
//
// A
// try (1) {
// B
// try (2) {
// C
// } catch (3) {
// D
// } catch (4) {
// E
// }
// F
// } catch (5) {
// G
// }
// H
//
// Here, we have try region (1) BCDEF protected by catch (5) G, and region (2) C protected
// by catch (3) D and catch (4) E. Note that catch (4) E does *NOT* protect the code "D".
// This is an example of 'mutually protect' regions. First, we move handlers (3) and (4)
// to the end of the code. However, (3) and (4) are nested inside, and protected by, try (1). Again
// note that (3) is not nested inside (4), despite ebdEnclosingTryIndex indicating that.
// The code "D" and "E" won't be contiguous with the protected region for try (1) (which
// will, after moving catch (3) AND (4), be BCF). Thus, we need to add a new EH descriptor
// representing try (1) protecting the new funclets catch (3) and (4).
// The code will be generated as follows:
//
// ABCFH // "main" code
// D // funclet
// E // funclet
// G // funclet
//
// The EH regions are:
//
// C -> D
// C -> E
// BCF -> G
// D -> G // "duplicate" clause
// E -> G // "duplicate" clause
//
// Note that we actually need to generate one of these additional "duplicate" clauses for every
// region the funclet is nested in. Take this example:
//
// A
// try (1) {
// B
// try (2,3) {
// C
// try (4) {
// D
// try (5,6) {
// E
// } catch {
// F
// } catch {
// G
// }
// H
// } catch {
// I
// }
// J
// } catch {
// K
// } catch {
// L
// }
// M
// } catch {
// N
// }
// O
//
// When we pull out funclets, we get the following generated code:
//
// ABCDEHJMO // "main" function
// F // funclet
// G // funclet
// I // funclet
// K // funclet
// L // funclet
// N // funclet
//
// And the EH regions we report to the VM are (in order; main clauses
// first in most-to-least nested order, funclets ("duplicated clauses")
// last, in most-to-least nested) are:
//
// E -> F
// E -> G
// DEH -> I
// CDEHJ -> K
// CDEHJ -> L
// BCDEHJM -> N
// F -> I // funclet clause #1 for F
// F -> K // funclet clause #2 for F
// F -> L // funclet clause #3 for F
// F -> N // funclet clause #4 for F
// G -> I // funclet clause #1 for G
// G -> K // funclet clause #2 for G
// G -> L // funclet clause #3 for G
// G -> N // funclet clause #4 for G
// I -> K // funclet clause #1 for I
// I -> L // funclet clause #2 for I
// I -> N // funclet clause #3 for I
// K -> N // funclet clause #1 for K
// L -> N // funclet clause #1 for L
//
// So whereas the IL had 6 EH clauses, we need to report 19 EH clauses to the VM.
// Note that due to the nature of 'mutually protect' clauses, it would be incorrect
// to add a clause "F -> G" because F is NOT protected by G, but we still have
// both "F -> K" and "F -> L" because F IS protected by both of those handlers.
//
// The overall ordering of the clauses is still the same most-to-least nesting
// after front-to-back start offset. Because we place the funclets at the end
// these new clauses should also go at the end by this ordering.
//
if (duplicateClauseCount > 0)
{
unsigned reportedDuplicateClauseCount = 0; // How many duplicated clauses have we reported?
unsigned XTnum2;
EHblkDsc* HBtab;
for (XTnum2 = 0, HBtab = compiler->compHndBBtab; XTnum2 < compiler->compHndBBtabCount; XTnum2++, HBtab++)
{
unsigned enclosingTryIndex;
EHblkDsc* fletTab = compiler->ehGetDsc(XTnum2);
for (enclosingTryIndex = compiler->ehTrueEnclosingTryIndexIL(XTnum2); // find the true enclosing try index,
// ignoring 'mutual protect' trys
enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX;
enclosingTryIndex = compiler->ehGetEnclosingTryIndex(enclosingTryIndex))
{
// The funclet we moved out is nested in a try region, so create a new EH descriptor for the funclet
// that will have the enclosing try protecting the funclet.
noway_assert(XTnum2 < enclosingTryIndex); // the enclosing region must be less nested, and hence have a
// greater EH table index
EHblkDsc* encTab = compiler->ehGetDsc(enclosingTryIndex);
// The try region is the handler of the funclet. Note that for filters, we don't protect the
// filter region, only the filter handler region. This is because exceptions in filters never
// escape; the VM swallows them.
BasicBlock* bbTryBeg = fletTab->ebdHndBeg;
BasicBlock* bbTryLast = fletTab->ebdHndLast;
BasicBlock* bbHndBeg = encTab->ebdHndBeg; // The handler region is the same as the enclosing try
BasicBlock* bbHndLast = encTab->ebdHndLast;
UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
tryBeg = compiler->ehCodeOffset(bbTryBeg);
hndBeg = compiler->ehCodeOffset(bbHndBeg);
tryEnd = (bbTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(bbTryLast->bbNext);
hndEnd = (bbHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(bbHndLast->bbNext);
if (encTab->HasFilter())
{
hndTyp = compiler->ehCodeOffset(encTab->ebdFilter);
}
else
{
hndTyp = encTab->ebdTyp;
}
CORINFO_EH_CLAUSE_FLAGS flags = ToCORINFO_EH_CLAUSE_FLAGS(encTab->ebdHandlerType);
// Tell the VM this is an extra clause caused by moving funclets out of line.
flags = (CORINFO_EH_CLAUSE_FLAGS)(flags | CORINFO_EH_CLAUSE_DUPLICATE);
// Note that the JIT-EE interface reuses the CORINFO_EH_CLAUSE type, even though the names of
// the fields aren't really accurate. For example, we set "TryLength" to the offset of the
// instruction immediately after the 'try' body. So, it really could be more accurately named
// "TryEndOffset".
CORINFO_EH_CLAUSE clause;
clause.ClassToken = hndTyp; /* filter offset is passed back here for filter-based exception handlers */
clause.Flags = flags;
clause.TryOffset = tryBeg;
clause.TryLength = tryEnd;
clause.HandlerOffset = hndBeg;
clause.HandlerLength = hndEnd;
assert(XTnum < EHCount);
// Tell the VM about this EH clause (a duplicated clause).
compiler->eeSetEHinfo(XTnum, &clause);
++XTnum;
++reportedDuplicateClauseCount;
#ifndef DEBUG
if (duplicateClauseCount == reportedDuplicateClauseCount)
{
break; // we've reported all of them; no need to continue looking
}
#endif // !DEBUG
} // for each 'true' enclosing 'try'
} // for each EH table entry
assert(duplicateClauseCount == reportedDuplicateClauseCount);
} // if (duplicateClauseCount > 0)
#if FEATURE_EH_CALLFINALLY_THUNKS
if (clonedFinallyCount > 0)
{
unsigned reportedClonedFinallyCount = 0;
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
UNATIVE_OFFSET hndBeg, hndEnd;
hndBeg = compiler->ehCodeOffset(block);
// How big is it? The BBJ_ALWAYS has a null bbEmitCookie! Look for the block after, which must be
// a label or jump target, since the BBJ_CALLFINALLY doesn't fall through.
BasicBlock* bbLabel = block->bbNext;
if (block->isBBCallAlwaysPair())
{
bbLabel = bbLabel->bbNext; // skip the BBJ_ALWAYS
}
if (bbLabel == nullptr)
{
hndEnd = compiler->info.compNativeCodeSize;
}
else
{
assert(bbLabel->bbEmitCookie != nullptr);
hndEnd = compiler->ehCodeOffset(bbLabel);
}
CORINFO_EH_CLAUSE clause;
clause.ClassToken = 0; // unused
clause.Flags = (CORINFO_EH_CLAUSE_FLAGS)(CORINFO_EH_CLAUSE_FINALLY | CORINFO_EH_CLAUSE_DUPLICATE);
clause.TryOffset = hndBeg;
clause.TryLength = hndBeg;
clause.HandlerOffset = hndBeg;
clause.HandlerLength = hndEnd;
assert(XTnum < EHCount);
// Tell the VM about this EH clause (a cloned finally clause).
compiler->eeSetEHinfo(XTnum, &clause);
++XTnum;
++reportedClonedFinallyCount;
#ifndef DEBUG
if (clonedFinallyCount == reportedClonedFinallyCount)
{
break; // we're done; no need to keep looking
}
#endif // !DEBUG
} // block is BBJ_CALLFINALLY
} // for each block
assert(clonedFinallyCount == reportedClonedFinallyCount);
} // if (clonedFinallyCount > 0)
#endif // FEATURE_EH_CALLFINALLY_THUNKS
#endif // FEATURE_EH_FUNCLETS
assert(XTnum == EHCount);
}
//----------------------------------------------------------------------
// genUseOptimizedWriteBarriers: Determine if an optimized write barrier
// helper should be used.
//
// Arguments:
// wbf - The WriteBarrierForm of the write (GT_STOREIND) that is happening.
//
// Return Value:
// true if an optimized write barrier helper should be used, false otherwise.
// Note: only x86 implements register-specific source optimized write
// barriers currently.
//
bool CodeGenInterface::genUseOptimizedWriteBarriers(GCInfo::WriteBarrierForm wbf)
{
#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS
#ifdef DEBUG
return (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
#else
return true;
#endif
#else
return false;
#endif
}
//----------------------------------------------------------------------
// genUseOptimizedWriteBarriers: Determine if an optimized write barrier
// helper should be used.
//
// This has the same functionality as the version of
// genUseOptimizedWriteBarriers that takes a WriteBarrierForm, but avoids
// determining what the required write barrier form is, if possible.
//
// Arguments:
// tgt - target tree of write (e.g., GT_STOREIND)
// assignVal - tree with value to write
//
// Return Value:
// true if an optimized write barrier helper should be used, false otherwise.
// Note: only x86 implements register-specific source optimized write
// barriers currently.
//
bool CodeGenInterface::genUseOptimizedWriteBarriers(GenTree* tgt, GenTree* assignVal)
{
#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS
#ifdef DEBUG
GCInfo::WriteBarrierForm wbf = compiler->codeGen->gcInfo.gcIsWriteBarrierCandidate(tgt, assignVal);
return (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
#else
return true;
#endif
#else
return false;
#endif
}
//----------------------------------------------------------------------
// genWriteBarrierHelperForWriteBarrierForm: Given a write node requiring a write
// barrier, and the write barrier form required, determine the helper to call.
//
// Arguments:
// tgt - target tree of write (e.g., GT_STOREIND)
// wbf - already computed write barrier form to use
//
// Return Value:
// Write barrier helper to use.
//
// Note: do not call this function to get an optimized write barrier helper (e.g.,
// for x86).
//
CorInfoHelpFunc CodeGenInterface::genWriteBarrierHelperForWriteBarrierForm(GenTree* tgt, GCInfo::WriteBarrierForm wbf)
{
noway_assert(tgt->gtOper == GT_STOREIND);
CorInfoHelpFunc helper = CORINFO_HELP_ASSIGN_REF;
#ifdef DEBUG
if (wbf == GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)
{
helper = CORINFO_HELP_ASSIGN_REF_ENSURE_NONHEAP;
}
else
#endif
if (tgt->gtOper != GT_CLS_VAR)
{
if (wbf != GCInfo::WBF_BarrierUnchecked) // This overrides the tests below.
{
if (tgt->gtFlags & GTF_IND_TGTANYWHERE)
{
helper = CORINFO_HELP_CHECKED_ASSIGN_REF;
}
else if (tgt->AsOp()->gtOp1->TypeGet() == TYP_I_IMPL)
{
helper = CORINFO_HELP_CHECKED_ASSIGN_REF;
}
}
}
assert(((helper == CORINFO_HELP_ASSIGN_REF_ENSURE_NONHEAP) && (wbf == GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)) ||
((helper == CORINFO_HELP_CHECKED_ASSIGN_REF) &&
(wbf == GCInfo::WBF_BarrierChecked || wbf == GCInfo::WBF_BarrierUnknown)) ||
((helper == CORINFO_HELP_ASSIGN_REF) &&
(wbf == GCInfo::WBF_BarrierUnchecked || wbf == GCInfo::WBF_BarrierUnknown)));
return helper;
}
//----------------------------------------------------------------------
// genGCWriteBarrier: Generate a write barrier for a node.
//
// Arguments:
// tgt - target tree of write (e.g., GT_STOREIND)
// wbf - already computed write barrier form to use
//
void CodeGen::genGCWriteBarrier(GenTree* tgt, GCInfo::WriteBarrierForm wbf)
{
CorInfoHelpFunc helper = genWriteBarrierHelperForWriteBarrierForm(tgt, wbf);
#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
// We classify the "tgt" trees as follows:
// If "tgt" is of the form (where [ x ] indicates an optional x, and { x1, ..., xn } means "one of the x_i forms"):
// IND [-> ADDR -> IND] -> { GT_LCL_VAR, ADD({GT_LCL_VAR}, X), ADD(X, (GT_LCL_VAR)) }
// then let "v" be the GT_LCL_VAR.
// * If "v" is the return buffer argument, classify as CWBKind_RetBuf.
// * If "v" is another by-ref argument, classify as CWBKind_ByRefArg.
// * Otherwise, classify as CWBKind_OtherByRefLocal.
// If "tgt" is of the form IND -> ADDR -> GT_LCL_VAR, clasify as CWBKind_AddrOfLocal.
// Otherwise, classify as CWBKind_Unclassified.
CheckedWriteBarrierKinds wbKind = CWBKind_Unclassified;
if (tgt->gtOper == GT_IND)
{
GenTree* lcl = NULL;
GenTree* indArg = tgt->AsOp()->gtOp1;
if (indArg->gtOper == GT_ADDR && indArg->AsOp()->gtOp1->gtOper == GT_IND)
{
indArg = indArg->AsOp()->gtOp1->AsOp()->gtOp1;
}
if (indArg->gtOper == GT_LCL_VAR)
{
lcl = indArg;
}
else if (indArg->gtOper == GT_ADD)
{
if (indArg->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
lcl = indArg->AsOp()->gtOp1;
}
else if (indArg->AsOp()->gtOp2->gtOper == GT_LCL_VAR)
{
lcl = indArg->AsOp()->gtOp2;
}
}
if (lcl != NULL)
{
wbKind = CWBKind_OtherByRefLocal; // Unclassified local variable.
unsigned lclNum = lcl->AsLclVar()->GetLclNum();
if (lclNum == compiler->info.compRetBuffArg)
{
wbKind = CWBKind_RetBuf; // Ret buff. Can happen if the struct exceeds the size limit.
}
else
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
if (varDsc->lvIsParam && varDsc->lvType == TYP_BYREF)
{
wbKind = CWBKind_ByRefArg; // Out (or in/out) arg
}
}
}
else
{
// We should have eliminated the barrier for this case.
assert(!(indArg->gtOper == GT_ADDR && indArg->AsOp()->gtOp1->gtOper == GT_LCL_VAR));
}
}
if (helper == CORINFO_HELP_CHECKED_ASSIGN_REF)
{
#if 0
#ifdef DEBUG
// Enable this to sample the unclassified trees.
static int unclassifiedBarrierSite = 0;
if (wbKind == CWBKind_Unclassified)
{
unclassifiedBarrierSite++;
printf("unclassifiedBarrierSite = %d:\n", unclassifiedBarrierSite); compiler->gtDispTree(tgt); printf(""); printf("\n");
}
#endif // DEBUG
#endif // 0
AddStackLevel(4);
inst_IV(INS_push, wbKind);
genEmitHelperCall(helper,
4, // argSize
EA_PTRSIZE); // retSize
SubtractStackLevel(4);
}
else
{
genEmitHelperCall(helper,
0, // argSize
EA_PTRSIZE); // retSize
}
#else // !FEATURE_COUNT_GC_WRITE_BARRIERS
genEmitHelperCall(helper,
0, // argSize
EA_PTRSIZE); // retSize
#endif // !FEATURE_COUNT_GC_WRITE_BARRIERS
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Prolog / Epilog XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************
*
* Generates code for moving incoming register arguments to their
* assigned location, in the function prolog.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFnPrologCalleeRegArgs() for %s regs\n", regState->rsIsFloat ? "float" : "int");
}
#endif
unsigned argMax; // maximum argNum value plus 1, (including the RetBuffArg)
unsigned argNum; // current argNum, always in [0..argMax-1]
unsigned fixedRetBufIndex; // argNum value used by the fixed return buffer argument (ARM64)
unsigned regArgNum; // index into the regArgTab[] table
regMaskTP regArgMaskLive = regState->rsCalleeRegArgMaskLiveIn;
bool doingFloat = regState->rsIsFloat;
// We should be generating the prolog block when we are called
assert(compiler->compGeneratingProlog);
// We expect to have some registers of the type we are doing, that are LiveIn, otherwise we don't need to be called.
noway_assert(regArgMaskLive != 0);
// If a method has 3 args (and no fixed return buffer) then argMax is 3 and valid indexes are 0,1,2
// If a method has a fixed return buffer (on ARM64) then argMax gets set to 9 and valid index are 0-8
//
// The regArgTab can always have unused entries,
// for example if an architecture always increments the arg register number but uses either
// an integer register or a floating point register to hold the next argument
// then with a mix of float and integer args you could have:
//
// sampleMethod(int i, float x, int j, float y, int k, float z);
// r0, r2 and r4 as valid integer arguments with argMax as 5
// and f1, f3 and f5 and valid floating point arguments with argMax as 6
// The first one is doingFloat==false and the second one is doingFloat==true
//
// If a fixed return buffer (in r8) was also present then the first one would become:
// r0, r2, r4 and r8 as valid integer arguments with argMax as 9
//
argMax = regState->rsCalleeRegArgCount;
fixedRetBufIndex = (unsigned)-1; // Invalid value
// If necessary we will select a correct xtraReg for circular floating point args later.
if (doingFloat)
{
xtraReg = REG_NA;
noway_assert(argMax <= MAX_FLOAT_REG_ARG);
}
else // we are doing the integer registers
{
noway_assert(argMax <= MAX_REG_ARG);
if (hasFixedRetBuffReg())
{
fixedRetBufIndex = theFixedRetBuffArgNum();
// We have an additional integer register argument when hasFixedRetBuffReg() is true
argMax = fixedRetBufIndex + 1;
assert(argMax == (MAX_REG_ARG + 1));
}
}
//
// Construct a table with the register arguments, for detecting circular and
// non-circular dependencies between the register arguments. A dependency is when
// an argument register Rn needs to be moved to register Rm that is also an argument
// register. The table is constructed in the order the arguments are passed in
// registers: the first register argument is in regArgTab[0], the second in
// regArgTab[1], etc. Note that on ARM, a TYP_DOUBLE takes two entries, starting
// at an even index. The regArgTab is indexed from 0 to argMax - 1.
// Note that due to an extra argument register for ARM64 (i.e theFixedRetBuffReg())
// we have increased the allocated size of the regArgTab[] by one.
//
struct regArgElem
{
unsigned varNum; // index into compiler->lvaTable[] for this register argument
#if defined(UNIX_AMD64_ABI)
var_types type; // the Jit type of this regArgTab entry
#endif // defined(UNIX_AMD64_ABI)
unsigned trashBy; // index into this regArgTab[] table of the register that will be copied to this register.
// That is, for regArgTab[x].trashBy = y, argument register number 'y' will be copied to
// argument register number 'x'. Only used when circular = true.
char slot; // 0 means the register is not used for a register argument
// 1 means the first part of a register argument
// 2, 3 or 4 means the second,third or fourth part of a multireg argument
bool stackArg; // true if the argument gets homed to the stack
bool writeThru; // true if the argument gets homed to both stack and register
bool processed; // true after we've processed the argument (and it is in its final location)
bool circular; // true if this register participates in a circular dependency loop.
#ifdef UNIX_AMD64_ABI
// For UNIX AMD64 struct passing, the type of the register argument slot can differ from
// the type of the lclVar in ways that are not ascertainable from lvType.
// So, for that case we retain the type of the register in the regArgTab.
var_types getRegType(Compiler* compiler)
{
return type; // UNIX_AMD64 implementation
}
#else // !UNIX_AMD64_ABI
// In other cases, we simply use the type of the lclVar to determine the type of the register.
var_types getRegType(Compiler* compiler)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
// Check if this is an HFA register arg and return the HFA type
if (varDsc->lvIsHfaRegArg())
{
// Cannot have hfa types on windows arm targets
// in vararg methods.
assert(!TargetOS::IsWindows || !compiler->info.compIsVarArgs);
return varDsc->GetHfaType();
}
return compiler->mangleVarArgsType(varDsc->lvType);
}
#endif // !UNIX_AMD64_ABI
} regArgTab[max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {};
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0; varNum < compiler->lvaCount; ++varNum)
{
varDsc = compiler->lvaGetDesc(varNum);
// Is this variable a register arg?
if (!varDsc->lvIsParam)
{
continue;
}
if (!varDsc->lvIsRegArg)
{
continue;
}
// When we have a promoted struct we have two possible LclVars that can represent the incoming argument
// in the regArgTab[], either the original TYP_STRUCT argument or the introduced lvStructField.
// We will use the lvStructField if we have a TYPE_INDEPENDENT promoted struct field otherwise
// use the the original TYP_STRUCT argument.
//
if (varDsc->lvPromoted || varDsc->lvIsStructField)
{
LclVarDsc* parentVarDsc = varDsc;
if (varDsc->lvIsStructField)
{
assert(!varDsc->lvPromoted);
parentVarDsc = compiler->lvaGetDesc(varDsc->lvParentLcl);
}
Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(parentVarDsc);
if (promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT)
{
// For register arguments that are independent promoted structs we put the promoted field varNum in the
// regArgTab[]
if (varDsc->lvPromoted)
{
continue;
}
}
else
{
// For register arguments that are not independent promoted structs we put the parent struct varNum in
// the regArgTab[]
if (varDsc->lvIsStructField)
{
continue;
}
}
}
var_types regType = compiler->mangleVarArgsType(varDsc->TypeGet());
// Change regType to the HFA type when we have a HFA argument
if (varDsc->lvIsHfaRegArg())
{
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows && compiler->info.compIsVarArgs)
{
assert(!"Illegal incoming HFA arg encountered in Vararg method.");
}
#endif // defined(TARGET_ARM64)
regType = varDsc->GetHfaType();
}
#if defined(UNIX_AMD64_ABI)
if (!varTypeIsStruct(regType))
#endif // defined(UNIX_AMD64_ABI)
{
// A struct might be passed partially in XMM register for System V calls.
// So a single arg might use both register files.
if (emitter::isFloatReg(varDsc->GetArgReg()) != doingFloat)
{
continue;
}
}
int slots = 0;
#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
if (!structDesc.passedInRegisters)
{
// The var is not passed in registers.
continue;
}
unsigned firstRegSlot = 0;
for (unsigned slotCounter = 0; slotCounter < structDesc.eightByteCount; slotCounter++)
{
regNumber regNum = varDsc->lvRegNumForSlot(slotCounter);
var_types regType;
#ifdef FEATURE_SIMD
// Assumption 1:
// RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off
// to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for
// reading and writing purposes. Hence while homing a Vector3 type arg on stack we should
// home entire 16-bytes so that the upper-most 4-bytes will be zeroed when written to stack.
//
// Assumption 2:
// RyuJit backend is making another implicit assumption that Vector3 type args when passed in
// registers or on stack, the upper most 4-bytes will be zero.
//
// For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
// that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
// invalid.
//
// RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
// bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and
// passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason,
// there is no need to clear upper 4-bytes of Vector3 type args.
//
// RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
// Vector3 return values are returned two return registers and Caller assembles them into a
// single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
// type args in prolog and Vector3 type return value of a call
if (varDsc->lvType == TYP_SIMD12)
{
regType = TYP_DOUBLE;
}
else
#endif
{
regType = compiler->GetEightByteType(structDesc, slotCounter);
}
regArgNum = genMapRegNumToRegArgNum(regNum, regType);
if ((!doingFloat && (structDesc.IsIntegralSlot(slotCounter))) ||
(doingFloat && (structDesc.IsSseSlot(slotCounter))))
{
// Store the reg for the first slot.
if (slots == 0)
{
firstRegSlot = regArgNum;
}
// Bingo - add it to our table
noway_assert(regArgNum < argMax);
noway_assert(regArgTab[regArgNum].slot == 0); // we better not have added it already (there better
// not be multiple vars representing this argument
// register)
regArgTab[regArgNum].varNum = varNum;
regArgTab[regArgNum].slot = (char)(slotCounter + 1);
regArgTab[regArgNum].type = regType;
slots++;
}
}
if (slots == 0)
{
continue; // Nothing to do for this regState set.
}
regArgNum = firstRegSlot;
}
else
#endif // defined(UNIX_AMD64_ABI)
{
// Bingo - add it to our table
regArgNum = genMapRegNumToRegArgNum(varDsc->GetArgReg(), regType);
noway_assert(regArgNum < argMax);
// We better not have added it already (there better not be multiple vars representing this argument
// register)
noway_assert(regArgTab[regArgNum].slot == 0);
#if defined(UNIX_AMD64_ABI)
// Set the register type.
regArgTab[regArgNum].type = regType;
#endif // defined(UNIX_AMD64_ABI)
regArgTab[regArgNum].varNum = varNum;
regArgTab[regArgNum].slot = 1;
slots = 1;
#if FEATURE_MULTIREG_ARGS
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
if (varDsc->lvIsHfaRegArg())
{
// We have an HFA argument, set slots to the number of registers used
slots = varDsc->lvHfaSlots();
}
else
{
// Currently all non-HFA multireg structs are two registers in size (i.e. two slots)
assert(varDsc->lvSize() == (2 * TARGET_POINTER_SIZE));
// We have a non-HFA multireg argument, set slots to two
slots = 2;
}
// Note that regArgNum+1 represents an argument index not an actual argument register.
// see genMapRegArgNumToRegNum(unsigned argNum, var_types type)
// This is the setup for the rest of a multireg struct arg
for (int i = 1; i < slots; i++)
{
noway_assert((regArgNum + i) < argMax);
// We better not have added it already (there better not be multiple vars representing this argument
// register)
noway_assert(regArgTab[regArgNum + i].slot == 0);
regArgTab[regArgNum + i].varNum = varNum;
regArgTab[regArgNum + i].slot = (char)(i + 1);
}
}
#endif // FEATURE_MULTIREG_ARGS
}
#ifdef TARGET_ARM
int lclSize = compiler->lvaLclSize(varNum);
if (lclSize > REGSIZE_BYTES)
{
unsigned maxRegArgNum = doingFloat ? MAX_FLOAT_REG_ARG : MAX_REG_ARG;
slots = lclSize / REGSIZE_BYTES;
if (regArgNum + slots > maxRegArgNum)
{
slots = maxRegArgNum - regArgNum;
}
}
C_ASSERT((char)MAX_REG_ARG == MAX_REG_ARG);
assert(slots < INT8_MAX);
for (char i = 1; i < slots; i++)
{
regArgTab[regArgNum + i].varNum = varNum;
regArgTab[regArgNum + i].slot = i + 1;
}
#endif // TARGET_ARM
for (int i = 0; i < slots; i++)
{
regType = regArgTab[regArgNum + i].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(regArgNum + i, regType);
#if !defined(UNIX_AMD64_ABI)
assert((i > 0) || (regNum == varDsc->GetArgReg()));
#endif // defined(UNIX_AMD64_ABI)
// Is the arg dead on entry to the method ?
if ((regArgMaskLive & genRegMask(regNum)) == 0)
{
if (varDsc->lvTrackedNonStruct())
{
// We may now see some tracked locals with zero refs.
// See Lowering::DoPhase. Tolerate these.
if (varDsc->lvRefCnt() > 0)
{
noway_assert(!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex));
}
}
else
{
#ifdef TARGET_X86
noway_assert(varDsc->lvType == TYP_STRUCT);
#else // !TARGET_X86
// For LSRA, it may not be in regArgMaskLive if it has a zero
// refcnt. This is in contrast with the non-LSRA case in which all
// non-tracked args are assumed live on entry.
noway_assert((varDsc->lvRefCnt() == 0) || (varDsc->lvType == TYP_STRUCT) ||
(varDsc->IsAddressExposed() && compiler->info.compIsVarArgs) ||
(varDsc->IsAddressExposed() && compiler->opts.compUseSoftFP));
#endif // !TARGET_X86
}
// Mark it as processed and be done with it
regArgTab[regArgNum + i].processed = true;
goto NON_DEP;
}
#ifdef TARGET_ARM
// On the ARM when the varDsc is a struct arg (or pre-spilled due to varargs) the initReg/xtraReg
// could be equal to GetArgReg(). The pre-spilled registers are also not considered live either since
// they've already been spilled.
//
if ((regSet.rsMaskPreSpillRegs(false) & genRegMask(regNum)) == 0)
#endif // TARGET_ARM
{
#if !defined(UNIX_AMD64_ABI)
noway_assert(xtraReg != (varDsc->GetArgReg() + i));
#endif
noway_assert(regArgMaskLive & genRegMask(regNum));
}
regArgTab[regArgNum + i].processed = false;
regArgTab[regArgNum + i].writeThru = (varDsc->lvIsInReg() && varDsc->lvLiveInOutOfHndlr);
/* mark stack arguments since we will take care of those first */
regArgTab[regArgNum + i].stackArg = (varDsc->lvIsInReg()) ? false : true;
/* If it goes on the stack or in a register that doesn't hold
* an argument anymore -> CANNOT form a circular dependency */
if (varDsc->lvIsInReg() && (genRegMask(regNum) & regArgMaskLive))
{
/* will trash another argument -> possible dependency
* We may need several passes after the table is constructed
* to decide on that */
/* Maybe the argument stays in the register (IDEAL) */
if ((i == 0) && (varDsc->GetRegNum() == regNum))
{
goto NON_DEP;
}
#if !defined(TARGET_64BIT)
if ((i == 1) && varTypeIsStruct(varDsc) && (varDsc->GetOtherReg() == regNum))
{
goto NON_DEP;
}
if ((i == 1) && (genActualType(varDsc->TypeGet()) == TYP_LONG) && (varDsc->GetOtherReg() == regNum))
{
goto NON_DEP;
}
if ((i == 1) && (genActualType(varDsc->TypeGet()) == TYP_DOUBLE) &&
(REG_NEXT(varDsc->GetRegNum()) == regNum))
{
goto NON_DEP;
}
#endif // !defined(TARGET_64BIT)
regArgTab[regArgNum + i].circular = true;
}
else
{
NON_DEP:
regArgTab[regArgNum + i].circular = false;
/* mark the argument register as free */
regArgMaskLive &= ~genRegMask(regNum);
}
}
}
/* Find the circular dependencies for the argument registers, if any.
* A circular dependency is a set of registers R1, R2, ..., Rn
* such that R1->R2 (that is, R1 needs to be moved to R2), R2->R3, ..., Rn->R1 */
bool change = true;
if (regArgMaskLive)
{
/* Possible circular dependencies still exist; the previous pass was not enough
* to filter them out. Use a "sieve" strategy to find all circular dependencies. */
while (change)
{
change = false;
for (argNum = 0; argNum < argMax; argNum++)
{
// If we already marked the argument as non-circular then continue
if (!regArgTab[argNum].circular)
{
continue;
}
if (regArgTab[argNum].slot == 0) // Not a register argument
{
continue;
}
varNum = regArgTab[argNum].varNum;
varDsc = compiler->lvaGetDesc(varNum);
const var_types varRegType = varDsc->GetRegisterType();
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
/* cannot possibly have stack arguments */
noway_assert(varDsc->lvIsInReg());
noway_assert(!regArgTab[argNum].stackArg);
var_types regType = regArgTab[argNum].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
regNumber destRegNum = REG_NA;
if (varTypeIsStruct(varDsc) &&
(compiler->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
assert(regArgTab[argNum].slot <= varDsc->lvFieldCnt);
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + regArgTab[argNum].slot - 1);
destRegNum = fieldVarDsc->GetRegNum();
}
else if (regArgTab[argNum].slot == 1)
{
destRegNum = varDsc->GetRegNum();
}
#if defined(TARGET_ARM64) && defined(FEATURE_SIMD)
else if (varDsc->lvIsHfa())
{
// This must be a SIMD type that's fully enregistered, but is passed as an HFA.
// Each field will be inserted into the same destination register.
assert(varTypeIsSIMD(varDsc) &&
!compiler->isOpaqueSIMDType(varDsc->lvVerTypeInfo.GetClassHandle()));
assert(regArgTab[argNum].slot <= (int)varDsc->lvHfaSlots());
assert(argNum > 0);
assert(regArgTab[argNum - 1].varNum == varNum);
regArgMaskLive &= ~genRegMask(regNum);
regArgTab[argNum].circular = false;
change = true;
continue;
}
#elif defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
else
{
assert(regArgTab[argNum].slot == 2);
assert(argNum > 0);
assert(regArgTab[argNum - 1].slot == 1);
assert(regArgTab[argNum - 1].varNum == varNum);
assert((varRegType == TYP_SIMD12) || (varRegType == TYP_SIMD16));
regArgMaskLive &= ~genRegMask(regNum);
regArgTab[argNum].circular = false;
change = true;
continue;
}
#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#if !defined(TARGET_64BIT)
else if (regArgTab[argNum].slot == 2 && genActualType(varDsc->TypeGet()) == TYP_LONG)
{
destRegNum = varDsc->GetOtherReg();
}
else
{
assert(regArgTab[argNum].slot == 2);
assert(varDsc->TypeGet() == TYP_DOUBLE);
destRegNum = REG_NEXT(varDsc->GetRegNum());
}
#endif // !defined(TARGET_64BIT)
noway_assert(destRegNum != REG_NA);
if (genRegMask(destRegNum) & regArgMaskLive)
{
/* we are trashing a live argument register - record it */
unsigned destRegArgNum = genMapRegNumToRegArgNum(destRegNum, regType);
noway_assert(destRegArgNum < argMax);
regArgTab[destRegArgNum].trashBy = argNum;
}
else
{
/* argument goes to a free register */
regArgTab[argNum].circular = false;
change = true;
/* mark the argument register as free */
regArgMaskLive &= ~genRegMask(regNum);
}
}
}
}
/* At this point, everything that has the "circular" flag
* set to "true" forms a circular dependency */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (regArgMaskLive)
{
if (verbose)
{
printf("Circular dependencies found while home-ing the incoming arguments.\n");
}
}
#endif
// LSRA allocates registers to incoming parameters in order and will not overwrite
// a register still holding a live parameter.
noway_assert(((regArgMaskLive & RBM_FLTARG_REGS) == 0) &&
"Homing of float argument registers with circular dependencies not implemented.");
// Now move the arguments to their locations.
// First consider ones that go on the stack since they may free some registers.
// Also home writeThru args, since they're also homed to the stack.
regArgMaskLive = regState->rsCalleeRegArgMaskLiveIn; // reset the live in to what it was at the start
for (argNum = 0; argNum < argMax; argNum++)
{
emitAttr size;
#if defined(UNIX_AMD64_ABI)
// If this is the wrong register file, just continue.
if (regArgTab[argNum].type == TYP_UNDEF)
{
// This could happen if the reg in regArgTab[argNum] is of the other register file -
// for System V register passed structs where the first reg is GPR and the second an XMM reg.
// The next register file processing will process it.
continue;
}
#endif // defined(UNIX_AMD64_ABI)
// If the arg is dead on entry to the method, skip it
if (regArgTab[argNum].processed)
{
continue;
}
if (regArgTab[argNum].slot == 0) // Not a register argument
{
continue;
}
varNum = regArgTab[argNum].varNum;
varDsc = compiler->lvaGetDesc(varNum);
#ifndef TARGET_64BIT
// If this arg is never on the stack, go to the next one.
if (varDsc->lvType == TYP_LONG)
{
if (regArgTab[argNum].slot == 1 && !regArgTab[argNum].stackArg && !regArgTab[argNum].writeThru)
{
continue;
}
else if (varDsc->GetOtherReg() != REG_STK)
{
continue;
}
}
else
#endif // !TARGET_64BIT
{
// If this arg is never on the stack, go to the next one.
if (!regArgTab[argNum].stackArg && !regArgTab[argNum].writeThru)
{
continue;
}
}
#if defined(TARGET_ARM)
if (varDsc->lvType == TYP_DOUBLE)
{
if (regArgTab[argNum].slot == 2)
{
// We handled the entire double when processing the first half (slot == 1)
continue;
}
}
#endif
noway_assert(regArgTab[argNum].circular == false);
noway_assert(varDsc->lvIsParam);
noway_assert(varDsc->lvIsRegArg);
noway_assert(varDsc->lvIsInReg() == false || varDsc->lvLiveInOutOfHndlr ||
(varDsc->lvType == TYP_LONG && varDsc->GetOtherReg() == REG_STK && regArgTab[argNum].slot == 2));
var_types storeType = TYP_UNDEF;
unsigned slotSize = TARGET_POINTER_SIZE;
if (varTypeIsStruct(varDsc))
{
storeType = TYP_I_IMPL; // Default store type for a struct type is a pointer sized integer
#if FEATURE_MULTIREG_ARGS
// Must be <= MAX_PASS_MULTIREG_BYTES or else it wouldn't be passed in registers
noway_assert(varDsc->lvSize() <= MAX_PASS_MULTIREG_BYTES);
#endif // FEATURE_MULTIREG_ARGS
#ifdef UNIX_AMD64_ABI
storeType = regArgTab[argNum].type;
#endif // !UNIX_AMD64_ABI
if (varDsc->lvIsHfaRegArg())
{
#ifdef TARGET_ARM
// On ARM32 the storeType for HFA args is always TYP_FLOAT
storeType = TYP_FLOAT;
slotSize = (unsigned)emitActualTypeSize(storeType);
#else // TARGET_ARM64
storeType = genActualType(varDsc->GetHfaType());
slotSize = (unsigned)emitActualTypeSize(storeType);
#endif // TARGET_ARM64
}
}
else // Not a struct type
{
storeType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
}
size = emitActualTypeSize(storeType);
#ifdef TARGET_X86
noway_assert(genTypeSize(storeType) == TARGET_POINTER_SIZE);
#endif // TARGET_X86
regNumber srcRegNum = genMapRegArgNumToRegNum(argNum, storeType);
// Stack argument - if the ref count is 0 don't care about it
if (!varDsc->lvOnFrame)
{
noway_assert(varDsc->lvRefCnt() == 0);
}
else
{
// Since slot is typically 1, baseOffset is typically 0
int baseOffset = (regArgTab[argNum].slot - 1) * slotSize;
GetEmitter()->emitIns_S_R(ins_Store(storeType), size, srcRegNum, varNum, baseOffset);
#ifndef UNIX_AMD64_ABI
// Check if we are writing past the end of the struct
if (varTypeIsStruct(varDsc))
{
assert(varDsc->lvSize() >= baseOffset + (unsigned)size);
}
#endif // !UNIX_AMD64_ABI
#ifdef USING_SCOPE_INFO
if (regArgTab[argNum].slot == 1)
{
psiMoveToStack(varNum);
}
#endif // USING_SCOPE_INFO
}
// Mark the argument as processed, and set it as no longer live in srcRegNum,
// unless it is a writeThru var, in which case we home it to the stack, but
// don't mark it as processed until below.
if (!regArgTab[argNum].writeThru)
{
regArgTab[argNum].processed = true;
regArgMaskLive &= ~genRegMask(srcRegNum);
}
#if defined(TARGET_ARM)
if ((storeType == TYP_DOUBLE) && !regArgTab[argNum].writeThru)
{
regArgTab[argNum + 1].processed = true;
regArgMaskLive &= ~genRegMask(REG_NEXT(srcRegNum));
}
#endif
}
/* Process any circular dependencies */
if (regArgMaskLive)
{
unsigned begReg, destReg, srcReg;
unsigned varNumDest, varNumSrc;
LclVarDsc* varDscDest;
LclVarDsc* varDscSrc;
instruction insCopy = INS_mov;
if (doingFloat)
{
#ifndef UNIX_AMD64_ABI
if (GlobalJitOptions::compFeatureHfa)
#endif // !UNIX_AMD64_ABI
{
insCopy = ins_Copy(TYP_DOUBLE);
// Compute xtraReg here when we have a float argument
assert(xtraReg == REG_NA);
regMaskTP fpAvailMask;
fpAvailMask = RBM_FLT_CALLEE_TRASH & ~regArgMaskLive;
if (GlobalJitOptions::compFeatureHfa)
{
fpAvailMask &= RBM_ALLDOUBLE;
}
if (fpAvailMask == RBM_NONE)
{
fpAvailMask = RBM_ALLFLOAT & ~regArgMaskLive;
if (GlobalJitOptions::compFeatureHfa)
{
fpAvailMask &= RBM_ALLDOUBLE;
}
}
assert(fpAvailMask != RBM_NONE);
// We pick the lowest avail register number
regMaskTP tempMask = genFindLowestBit(fpAvailMask);
xtraReg = genRegNumFromMask(tempMask);
}
#if defined(TARGET_X86)
// This case shouldn't occur on x86 since NYI gets converted to an assert
NYI("Homing circular FP registers via xtraReg");
#endif
}
for (argNum = 0; argNum < argMax; argNum++)
{
// If not a circular dependency then continue
if (!regArgTab[argNum].circular)
{
continue;
}
// If already processed the dependency then continue
if (regArgTab[argNum].processed)
{
continue;
}
if (regArgTab[argNum].slot == 0) // Not a register argument
{
continue;
}
destReg = begReg = argNum;
srcReg = regArgTab[argNum].trashBy;
varNumDest = regArgTab[destReg].varNum;
varDscDest = compiler->lvaGetDesc(varNumDest);
noway_assert(varDscDest->lvIsParam && varDscDest->lvIsRegArg);
noway_assert(srcReg < argMax);
varNumSrc = regArgTab[srcReg].varNum;
varDscSrc = compiler->lvaGetDesc(varNumSrc);
noway_assert(varDscSrc->lvIsParam && varDscSrc->lvIsRegArg);
emitAttr size = EA_PTRSIZE;
#ifdef TARGET_XARCH
//
// The following code relies upon the target architecture having an
// 'xchg' instruction which directly swaps the values held in two registers.
// On the ARM architecture we do not have such an instruction.
//
if (destReg == regArgTab[srcReg].trashBy)
{
/* only 2 registers form the circular dependency - use "xchg" */
varNum = regArgTab[argNum].varNum;
varDsc = compiler->lvaGetDesc(varNum);
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
noway_assert(genTypeSize(genActualType(varDscSrc->TypeGet())) <= REGSIZE_BYTES);
/* Set "size" to indicate GC if one and only one of
* the operands is a pointer
* RATIONALE: If both are pointers, nothing changes in
* the GC pointer tracking. If only one is a pointer we
* have to "swap" the registers in the GC reg pointer mask
*/
if (varTypeGCtype(varDscSrc->TypeGet()) != varTypeGCtype(varDscDest->TypeGet()))
{
size = EA_GCREF;
}
noway_assert(varDscDest->GetArgReg() == varDscSrc->GetRegNum());
GetEmitter()->emitIns_R_R(INS_xchg, size, varDscSrc->GetRegNum(), varDscSrc->GetArgReg());
regSet.verifyRegUsed(varDscSrc->GetRegNum());
regSet.verifyRegUsed(varDscSrc->GetArgReg());
/* mark both arguments as processed */
regArgTab[destReg].processed = true;
regArgTab[srcReg].processed = true;
regArgMaskLive &= ~genRegMask(varDscSrc->GetArgReg());
regArgMaskLive &= ~genRegMask(varDscDest->GetArgReg());
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNumSrc);
psiMoveToReg(varNumDest);
#endif // USING_SCOPE_INFO
}
else
#endif // TARGET_XARCH
{
var_types destMemType = varDscDest->TypeGet();
#ifdef TARGET_ARM
bool cycleAllDouble = true; // assume the best
unsigned iter = begReg;
do
{
if (compiler->lvaGetDesc(regArgTab[iter].varNum)->TypeGet() != TYP_DOUBLE)
{
cycleAllDouble = false;
break;
}
iter = regArgTab[iter].trashBy;
} while (iter != begReg);
// We may treat doubles as floats for ARM because we could have partial circular
// dependencies of a float with a lo/hi part of the double. We mark the
// trashBy values for each slot of the double, so let the circular dependency
// logic work its way out for floats rather than doubles. If a cycle has all
// doubles, then optimize so that instead of two vmov.f32's to move a double,
// we can use one vmov.f64.
//
if (!cycleAllDouble && destMemType == TYP_DOUBLE)
{
destMemType = TYP_FLOAT;
}
#endif // TARGET_ARM
if (destMemType == TYP_REF)
{
size = EA_GCREF;
}
else if (destMemType == TYP_BYREF)
{
size = EA_BYREF;
}
else if (destMemType == TYP_DOUBLE)
{
size = EA_8BYTE;
}
else if (destMemType == TYP_FLOAT)
{
size = EA_4BYTE;
}
/* move the dest reg (begReg) in the extra reg */
assert(xtraReg != REG_NA);
regNumber begRegNum = genMapRegArgNumToRegNum(begReg, destMemType);
GetEmitter()->emitIns_Mov(insCopy, size, xtraReg, begRegNum, /* canSkip */ false);
regSet.verifyRegUsed(xtraReg);
*pXtraRegClobbered = true;
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNumDest, xtraReg);
#endif // USING_SCOPE_INFO
/* start moving everything to its right place */
while (srcReg != begReg)
{
/* mov dest, src */
regNumber destRegNum = genMapRegArgNumToRegNum(destReg, destMemType);
regNumber srcRegNum = genMapRegArgNumToRegNum(srcReg, destMemType);
GetEmitter()->emitIns_Mov(insCopy, size, destRegNum, srcRegNum, /* canSkip */ false);
regSet.verifyRegUsed(destRegNum);
/* mark 'src' as processed */
noway_assert(srcReg < argMax);
regArgTab[srcReg].processed = true;
#ifdef TARGET_ARM
if (size == EA_8BYTE)
regArgTab[srcReg + 1].processed = true;
#endif
regArgMaskLive &= ~genMapArgNumToRegMask(srcReg, destMemType);
/* move to the next pair */
destReg = srcReg;
srcReg = regArgTab[srcReg].trashBy;
varDscDest = varDscSrc;
destMemType = varDscDest->TypeGet();
#ifdef TARGET_ARM
if (!cycleAllDouble && destMemType == TYP_DOUBLE)
{
destMemType = TYP_FLOAT;
}
#endif
varNumSrc = regArgTab[srcReg].varNum;
varDscSrc = compiler->lvaGetDesc(varNumSrc);
noway_assert(varDscSrc->lvIsParam && varDscSrc->lvIsRegArg);
if (destMemType == TYP_REF)
{
size = EA_GCREF;
}
else if (destMemType == TYP_DOUBLE)
{
size = EA_8BYTE;
}
else
{
size = EA_4BYTE;
}
}
/* take care of the beginning register */
noway_assert(srcReg == begReg);
/* move the dest reg (begReg) in the extra reg */
regNumber destRegNum = genMapRegArgNumToRegNum(destReg, destMemType);
GetEmitter()->emitIns_Mov(insCopy, size, destRegNum, xtraReg, /* canSkip */ false);
regSet.verifyRegUsed(destRegNum);
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNumSrc);
#endif // USING_SCOPE_INFO
/* mark the beginning register as processed */
regArgTab[srcReg].processed = true;
#ifdef TARGET_ARM
if (size == EA_8BYTE)
regArgTab[srcReg + 1].processed = true;
#endif
regArgMaskLive &= ~genMapArgNumToRegMask(srcReg, destMemType);
}
}
}
/* Finally take care of the remaining arguments that must be enregistered */
while (regArgMaskLive)
{
regMaskTP regArgMaskLiveSave = regArgMaskLive;
for (argNum = 0; argNum < argMax; argNum++)
{
/* If already processed go to the next one */
if (regArgTab[argNum].processed)
{
continue;
}
if (regArgTab[argNum].slot == 0)
{ // Not a register argument
continue;
}
varNum = regArgTab[argNum].varNum;
varDsc = compiler->lvaGetDesc(varNum);
const var_types regType = regArgTab[argNum].getRegType(compiler);
const regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
const var_types varRegType = varDsc->GetRegisterType();
#if defined(UNIX_AMD64_ABI)
if (regType == TYP_UNDEF)
{
// This could happen if the reg in regArgTab[argNum] is of the other register file -
// for System V register passed structs where the first reg is GPR and the second an XMM reg.
// The next register file processing will process it.
regArgMaskLive &= ~genRegMask(regNum);
continue;
}
#endif // defined(UNIX_AMD64_ABI)
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
#ifdef TARGET_X86
// On x86 we don't enregister args that are not pointer sized.
noway_assert(genTypeSize(varDsc->GetActualRegisterType()) == TARGET_POINTER_SIZE);
#endif // TARGET_X86
noway_assert(varDsc->lvIsInReg() && !regArgTab[argNum].circular);
/* Register argument - hopefully it stays in the same register */
regNumber destRegNum = REG_NA;
var_types destMemType = varDsc->GetRegisterType();
if (regArgTab[argNum].slot == 1)
{
destRegNum = varDsc->GetRegNum();
#ifdef TARGET_ARM
if (genActualType(destMemType) == TYP_DOUBLE && regArgTab[argNum + 1].processed)
{
// The second half of the double has already been processed! Treat this as a single.
destMemType = TYP_FLOAT;
}
#endif // TARGET_ARM
}
#ifndef TARGET_64BIT
else if (regArgTab[argNum].slot == 2 && genActualType(destMemType) == TYP_LONG)
{
assert(genActualType(varDsc->TypeGet()) == TYP_LONG || genActualType(varDsc->TypeGet()) == TYP_DOUBLE);
if (genActualType(varDsc->TypeGet()) == TYP_DOUBLE)
{
destRegNum = regNum;
}
else
{
destRegNum = varDsc->GetOtherReg();
}
assert(destRegNum != REG_STK);
}
else
{
assert(regArgTab[argNum].slot == 2);
assert(destMemType == TYP_DOUBLE);
// For doubles, we move the entire double using the argNum representing
// the first half of the double. There are two things we won't do:
// (1) move the double when the 1st half of the destination is free but the
// 2nd half is occupied, and (2) move the double when the 2nd half of the
// destination is free but the 1st half is occupied. Here we consider the
// case where the first half can't be moved initially because its target is
// still busy, but the second half can be moved. We wait until the entire
// double can be moved, if possible. For example, we have F0/F1 double moving to F2/F3,
// and F2 single moving to F16. When we process F0, its target F2 is busy,
// so we skip it on the first pass. When we process F1, its target F3 is
// available. However, we want to move F0/F1 all at once, so we skip it here.
// We process F2, which frees up F2. The next pass through, we process F0 and
// F2/F3 are empty, so we move it. Note that if half of a double is involved
// in a circularity with a single, then we will have already moved that half
// above, so we go ahead and move the remaining half as a single.
// Because there are no circularities left, we are guaranteed to terminate.
assert(argNum > 0);
assert(regArgTab[argNum - 1].slot == 1);
if (!regArgTab[argNum - 1].processed)
{
// The first half of the double hasn't been processed; try to be processed at the same time
continue;
}
// The first half of the double has been processed but the second half hasn't!
// This could happen for double F2/F3 moving to F0/F1, and single F0 moving to F2.
// In that case, there is a F0/F2 loop that is not a double-only loop. The circular
// dependency logic above will move them as singles, leaving just F3 to move. Treat
// it as a single to finish the shuffling.
destMemType = TYP_FLOAT;
destRegNum = REG_NEXT(varDsc->GetRegNum());
}
#endif // !TARGET_64BIT
#if (defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64)) && defined(FEATURE_SIMD)
else
{
assert(regArgTab[argNum].slot == 2);
assert(argNum > 0);
assert(regArgTab[argNum - 1].slot == 1);
assert((varRegType == TYP_SIMD12) || (varRegType == TYP_SIMD16));
destRegNum = varDsc->GetRegNum();
noway_assert(regNum != destRegNum);
continue;
}
#endif // (defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64)) && defined(FEATURE_SIMD)
noway_assert(destRegNum != REG_NA);
if (destRegNum != regNum)
{
/* Cannot trash a currently live register argument.
* Skip this one until its target will be free
* which is guaranteed to happen since we have no circular dependencies. */
regMaskTP destMask = genRegMask(destRegNum);
#ifdef TARGET_ARM
// Don't process the double until both halves of the destination are clear.
if (genActualType(destMemType) == TYP_DOUBLE)
{
assert((destMask & RBM_DBL_REGS) != 0);
destMask |= genRegMask(REG_NEXT(destRegNum));
}
#endif
if (destMask & regArgMaskLive)
{
continue;
}
/* Move it to the new register */
emitAttr size = emitActualTypeSize(destMemType);
#if defined(TARGET_ARM64)
if (varTypeIsSIMD(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
// For a SIMD type that is passed in two integer registers,
// Limit the copy below to the first 8 bytes from the first integer register.
// Handle the remaining 8 bytes from the second slot in the code further below
assert(EA_SIZE(size) >= 8);
size = EA_8BYTE;
}
#endif
inst_Mov(destMemType, destRegNum, regNum, /* canSkip */ false, size);
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNum);
#endif // USING_SCOPE_INFO
}
/* mark the argument as processed */
assert(!regArgTab[argNum].processed);
regArgTab[argNum].processed = true;
regArgMaskLive &= ~genRegMask(regNum);
#if FEATURE_MULTIREG_ARGS
int argRegCount = 1;
#ifdef TARGET_ARM
if (genActualType(destMemType) == TYP_DOUBLE)
{
argRegCount = 2;
}
#endif
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
if (varTypeIsStruct(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
argRegCount = 2;
int nextArgNum = argNum + 1;
regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].getRegType(compiler));
noway_assert(regArgTab[nextArgNum].varNum == varNum);
// Emit a shufpd with a 0 immediate, which preserves the 0th element of the dest reg
// and moves the 0th element of the src reg into the 1st element of the dest reg.
GetEmitter()->emitIns_R_R_I(INS_shufpd, emitActualTypeSize(varRegType), destRegNum, nextRegNum, 0);
// Set destRegNum to regNum so that we skip the setting of the register below,
// but mark argNum as processed and clear regNum from the live mask.
destRegNum = regNum;
}
#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#ifdef TARGET_ARMARCH
if (varDsc->lvIsHfa())
{
// This includes both fixed-size SIMD types that are independently promoted, as well
// as other HFA structs.
argRegCount = varDsc->lvHfaSlots();
if (argNum < (argMax - argRegCount + 1))
{
if (compiler->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT)
{
// For an HFA type that is passed in multiple registers and promoted, we copy each field to its
// destination register.
for (int i = 0; i < argRegCount; i++)
{
int nextArgNum = argNum + i;
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i);
regNumber nextRegNum =
genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].getRegType(compiler));
destRegNum = fieldVarDsc->GetRegNum();
noway_assert(regArgTab[nextArgNum].varNum == varNum);
noway_assert(genIsValidFloatReg(nextRegNum));
noway_assert(genIsValidFloatReg(destRegNum));
GetEmitter()->emitIns_Mov(INS_mov, EA_8BYTE, destRegNum, nextRegNum, /* canSkip */ false);
}
}
#if defined(TARGET_ARM64) && defined(FEATURE_SIMD)
else
{
// For a SIMD type that is passed in multiple registers but enregistered as a vector,
// the code above copies the first argument register into the lower 4 or 8 bytes
// of the target register. Here we must handle the subsequent fields by
// inserting them into the upper bytes of the target SIMD floating point register.
argRegCount = varDsc->lvHfaSlots();
for (int i = 1; i < argRegCount; i++)
{
int nextArgNum = argNum + i;
regArgElem* nextArgElem = ®ArgTab[nextArgNum];
var_types nextArgType = nextArgElem->getRegType(compiler);
regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, nextArgType);
noway_assert(nextArgElem->varNum == varNum);
noway_assert(genIsValidFloatReg(nextRegNum));
noway_assert(genIsValidFloatReg(destRegNum));
GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_4BYTE, destRegNum, nextRegNum, i, 0);
}
}
#endif // defined(TARGET_ARM64) && defined(FEATURE_SIMD)
}
}
#endif // TARGET_ARMARCH
// Mark the rest of the argument registers corresponding to this multi-reg type as
// being processed and no longer live.
for (int regSlot = 1; regSlot < argRegCount; regSlot++)
{
int nextArgNum = argNum + regSlot;
assert(!regArgTab[nextArgNum].processed);
regArgTab[nextArgNum].processed = true;
regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].getRegType(compiler));
regArgMaskLive &= ~genRegMask(nextRegNum);
}
#endif // FEATURE_MULTIREG_ARGS
}
noway_assert(regArgMaskLiveSave != regArgMaskLive); // if it doesn't change, we have an infinite loop
}
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
/*****************************************************************************
* If any incoming stack arguments live in registers, load them.
*/
void CodeGen::genEnregisterIncomingStackArgs()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genEnregisterIncomingStackArgs()\n");
}
#endif
// OSR handles this specially -- see genEnregisterOSRArgsAndLocals
//
assert(!compiler->opts.IsOSR());
assert(compiler->compGeneratingProlog);
unsigned varNum = 0;
for (LclVarDsc *varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
/* Is this variable a parameter? */
if (!varDsc->lvIsParam)
{
continue;
}
/* If it's a register argument then it's already been taken care of.
But, on Arm when under a profiler, we would have prespilled a register argument
and hence here we need to load it from its prespilled location.
*/
bool isPrespilledForProfiling = false;
#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED)
isPrespilledForProfiling =
compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(varNum, regSet.rsMaskPreSpillRegs(false));
#endif
if (varDsc->lvIsRegArg && !isPrespilledForProfiling)
{
continue;
}
/* Has the parameter been assigned to a register? */
if (!varDsc->lvIsInReg())
{
continue;
}
/* Is the variable dead on entry */
if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
continue;
}
/* Load the incoming parameter into the register */
/* Figure out the home offset of the incoming argument */
regNumber regNum = varDsc->GetArgInitReg();
assert(regNum != REG_STK);
var_types regType = varDsc->GetActualRegisterType();
GetEmitter()->emitIns_R_S(ins_Load(regType), emitTypeSize(regType), regNum, varNum, 0);
regSet.verifyRegUsed(regNum);
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNum);
#endif // USING_SCOPE_INFO
}
}
/*-------------------------------------------------------------------------
*
* We have to decide whether we're going to use block initialization
* in the prolog before we assign final stack offsets. This is because
* when using block initialization we may need additional callee-saved
* registers which need to be saved on the frame, thus increasing the
* frame size.
*
* We'll count the number of locals we have to initialize,
* and if there are lots of them we'll use block initialization.
* Thus, the local variable table must have accurate register location
* information for enregistered locals for their register state on entry
* to the function.
*
* At the same time we set lvMustInit for locals (enregistered or on stack)
* that must be initialized (e.g. initialize memory (comInitMem),
* untracked pointers or disable DFA)
*/
void CodeGen::genCheckUseBlockInit()
{
assert(!compiler->compGeneratingProlog);
unsigned initStkLclCnt = 0; // The number of int-sized stack local variables that need to be initialized (variables
// larger than int count for more than 1).
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
// The logic below is complex. Make sure we are not
// double-counting the initialization impact of any locals.
bool counted = false;
if (!varDsc->lvIsInReg() && !varDsc->lvOnFrame)
{
noway_assert(varDsc->lvRefCnt() == 0);
varDsc->lvMustInit = 0;
continue;
}
// Initialization of OSR locals must be handled specially
if (compiler->lvaIsOSRLocal(varNum))
{
varDsc->lvMustInit = 0;
continue;
}
if (compiler->fgVarIsNeverZeroInitializedInProlog(varNum))
{
varDsc->lvMustInit = 0;
continue;
}
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
// For Compiler::PROMOTION_TYPE_DEPENDENT type of promotion, the whole struct should have been
// initialized by the parent struct. No need to set the lvMustInit bit in the
// field locals.
varDsc->lvMustInit = 0;
continue;
}
if (varDsc->lvHasExplicitInit)
{
varDsc->lvMustInit = 0;
continue;
}
const bool isTemp = varDsc->lvIsTemp;
const bool hasGCPtr = varDsc->HasGCPtr();
const bool isTracked = varDsc->lvTracked;
const bool isStruct = varTypeIsStruct(varDsc);
const bool compInitMem = compiler->info.compInitMem;
if (isTemp && !hasGCPtr)
{
varDsc->lvMustInit = 0;
continue;
}
if (compInitMem || hasGCPtr || varDsc->lvMustInit)
{
if (isTracked)
{
/* For uninitialized use of tracked variables, the liveness
* will bubble to the top (compiler->fgFirstBB) in fgInterBlockLocalVarLiveness()
*/
if (varDsc->lvMustInit ||
VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
/* This var must be initialized */
varDsc->lvMustInit = 1;
/* See if the variable is on the stack will be initialized
* using rep stos - compute the total size to be zero-ed */
if (varDsc->lvOnFrame)
{
if (!varDsc->lvRegister)
{
if (!varDsc->lvIsInReg() || varDsc->lvLiveInOutOfHndlr)
{
// Var is on the stack at entry.
initStkLclCnt +=
roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int);
counted = true;
}
}
else
{
// Var is partially enregistered
noway_assert(genTypeSize(varDsc->TypeGet()) > sizeof(int) &&
varDsc->GetOtherReg() == REG_STK);
initStkLclCnt += genTypeStSz(TYP_INT);
counted = true;
}
}
}
}
if (varDsc->lvOnFrame)
{
bool mustInitThisVar = false;
if (hasGCPtr && !isTracked)
{
JITDUMP("must init V%02u because it has a GC ref\n", varNum);
mustInitThisVar = true;
}
else if (hasGCPtr && isStruct)
{
// TODO-1stClassStructs: support precise liveness reporting for such structs.
JITDUMP("must init a tracked V%02u because it a struct with a GC ref\n", varNum);
mustInitThisVar = true;
}
else
{
// We are done with tracked or GC vars, now look at untracked vars without GC refs.
if (!isTracked)
{
assert(!hasGCPtr && !isTemp);
if (compInitMem)
{
JITDUMP("must init V%02u because compInitMem is set and it is not a temp\n", varNum);
mustInitThisVar = true;
}
}
}
if (mustInitThisVar)
{
varDsc->lvMustInit = true;
if (!counted)
{
initStkLclCnt += roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int);
counted = true;
}
}
}
}
}
/* Don't forget about spill temps that hold pointers */
assert(regSet.tmpAllFree());
for (TempDsc* tempThis = regSet.tmpListBeg(); tempThis != nullptr; tempThis = regSet.tmpListNxt(tempThis))
{
if (varTypeIsGC(tempThis->tdTempType()))
{
initStkLclCnt++;
}
}
// Record number of 4 byte slots that need zeroing.
genInitStkLclCnt = initStkLclCnt;
// Decide if we will do block initialization in the prolog, or use
// a series of individual stores.
//
// Primary factor is the number of slots that need zeroing. We've
// been counting by sizeof(int) above. We assume for now we can
// only zero register width bytes per store.
//
// Current heuristic is to use block init when more than 4 stores
// are required.
//
// TODO: Consider taking into account the presence of large structs that
// potentially only need some fields set to zero.
//
// Compiler::fgVarNeedsExplicitZeroInit relies on this logic to
// find structs that are guaranteed to be block initialized.
// If this logic changes, Compiler::fgVarNeedsExplicitZeroInit needs
// to be modified.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
#if defined(TARGET_AMD64)
// We can clear using aligned SIMD so the threshold is lower,
// and clears in order which is better for auto-prefetching
genUseBlockInit = (genInitStkLclCnt > 4);
#else // !defined(TARGET_AMD64)
genUseBlockInit = (genInitStkLclCnt > 8);
#endif
#else
genUseBlockInit = (genInitStkLclCnt > 4);
#endif // TARGET_64BIT
if (genUseBlockInit)
{
regMaskTP maskCalleeRegArgMask = intRegState.rsCalleeRegArgMaskLiveIn;
// If there is a secret stub param, don't count it, as it will no longer
// be live when we do block init.
if (compiler->info.compPublishStubParam)
{
maskCalleeRegArgMask &= ~RBM_SECRET_STUB_PARAM;
}
#ifdef TARGET_ARM
//
// On the Arm if we are using a block init to initialize, then we
// must force spill R4/R5/R6 so that we can use them during
// zero-initialization process.
//
int forceSpillRegCount = genCountBits(maskCalleeRegArgMask & ~regSet.rsMaskPreSpillRegs(false)) - 1;
if (forceSpillRegCount > 0)
regSet.rsSetRegsModified(RBM_R4);
if (forceSpillRegCount > 1)
regSet.rsSetRegsModified(RBM_R5);
if (forceSpillRegCount > 2)
regSet.rsSetRegsModified(RBM_R6);
#endif // TARGET_ARM
}
}
/*****************************************************************************
*
* initFltRegs -- The mask of float regs to be zeroed.
* initDblRegs -- The mask of double regs to be zeroed.
* initReg -- A zero initialized integer reg to copy from.
*
* Does best effort to move between VFP/xmm regs if one is already
* initialized to 0. (Arm Only) Else copies from the integer register which
* is slower.
*/
void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& initDblRegs, const regNumber& initReg)
{
assert(compiler->compGeneratingProlog);
// The first float/double reg that is initialized to 0. So they can be used to
// initialize the remaining registers.
regNumber fltInitReg = REG_NA;
regNumber dblInitReg = REG_NA;
// Iterate through float/double registers and initialize them to 0 or
// copy from already initialized register of the same type.
regMaskTP regMask = genRegMask(REG_FP_FIRST);
for (regNumber reg = REG_FP_FIRST; reg <= REG_FP_LAST; reg = REG_NEXT(reg), regMask <<= 1)
{
if (regMask & initFltRegs)
{
// Do we have a float register already set to 0?
if (fltInitReg != REG_NA)
{
// Copy from float.
inst_Mov(TYP_FLOAT, reg, fltInitReg, /* canSkip */ false);
}
else
{
#ifdef TARGET_ARM
// Do we have a double register initialized to 0?
if (dblInitReg != REG_NA)
{
// Copy from double.
inst_RV_RV(INS_vcvt_d2f, reg, dblInitReg, TYP_FLOAT);
}
else
{
// Copy from int.
inst_Mov(TYP_FLOAT, reg, initReg, /* canSkip */ false);
}
#elif defined(TARGET_XARCH)
// XORPS is the fastest and smallest way to initialize a XMM register to zero.
inst_RV_RV(INS_xorps, reg, reg, TYP_DOUBLE);
dblInitReg = reg;
#elif defined(TARGET_ARM64)
// We will just zero out the entire vector register. This sets it to a double/float zero value
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
#else // TARGET*
#error Unsupported or unset target architecture
#endif
fltInitReg = reg;
}
}
else if (regMask & initDblRegs)
{
// Do we have a double register already set to 0?
if (dblInitReg != REG_NA)
{
// Copy from double.
inst_Mov(TYP_DOUBLE, reg, dblInitReg, /* canSkip */ false);
}
else
{
#ifdef TARGET_ARM
// Do we have a float register initialized to 0?
if (fltInitReg != REG_NA)
{
// Copy from float.
inst_RV_RV(INS_vcvt_f2d, reg, fltInitReg, TYP_DOUBLE);
}
else
{
// Copy from int.
inst_RV_RV_RV(INS_vmov_i2d, reg, initReg, initReg, EA_8BYTE);
}
#elif defined(TARGET_XARCH)
// XORPS is the fastest and smallest way to initialize a XMM register to zero.
inst_RV_RV(INS_xorps, reg, reg, TYP_DOUBLE);
fltInitReg = reg;
#elif defined(TARGET_ARM64)
// We will just zero out the entire vector register. This sets it to a double/float zero value
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
#else // TARGET*
#error Unsupported or unset target architecture
#endif
dblInitReg = reg;
}
}
}
}
// We need a register with value zero. Zero the initReg, if necessary, and set *pInitRegZeroed if so.
// Return the register to use. On ARM64, we never touch the initReg, and always just return REG_ZR.
regNumber CodeGen::genGetZeroReg(regNumber initReg, bool* pInitRegZeroed)
{
#ifdef TARGET_ARM64
return REG_ZR;
#else // !TARGET_ARM64
if (*pInitRegZeroed == false)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg);
*pInitRegZeroed = true;
}
return initReg;
#endif // !TARGET_ARM64
}
//-----------------------------------------------------------------------------
// genZeroInitFrame: Zero any untracked pointer locals and/or initialize memory for locspace
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (genUseBlockInit)
{
genZeroInitFrameUsingBlockInit(untrLclHi, untrLclLo, initReg, pInitRegZeroed);
}
else if (genInitStkLclCnt > 0)
{
assert((genRegMask(initReg) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // initReg is not a live incoming
// argument reg
/* Initialize any lvMustInit vars on the stack */
LclVarDsc* varDsc;
unsigned varNum;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (!varDsc->lvMustInit)
{
continue;
}
// TODO-Review: I'm not sure that we're correctly handling the mustInit case for
// partially-enregistered vars in the case where we don't use a block init.
noway_assert(varDsc->lvIsInReg() || varDsc->lvOnFrame);
// lvMustInit can only be set for GC types or TYP_STRUCT types
// or when compInitMem is true
// or when in debug code
noway_assert(varTypeIsGC(varDsc->TypeGet()) || (varDsc->TypeGet() == TYP_STRUCT) ||
compiler->info.compInitMem || compiler->opts.compDbgCode);
if (!varDsc->lvOnFrame)
{
continue;
}
if ((varDsc->TypeGet() == TYP_STRUCT) && !compiler->info.compInitMem &&
(varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
// We only initialize the GC variables in the TYP_STRUCT
const unsigned slots = (unsigned)compiler->lvaLclSize(varNum) / REGSIZE_BYTES;
ClassLayout* layout = varDsc->GetLayout();
for (unsigned i = 0; i < slots; i++)
{
if (layout->IsGCPtr(i))
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE,
genGetZeroReg(initReg, pInitRegZeroed), varNum, i * REGSIZE_BYTES);
}
}
}
else
{
regNumber zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
// zero out the whole thing rounded up to a single stack slot size
unsigned lclSize = roundUp(compiler->lvaLclSize(varNum), (unsigned)sizeof(int));
unsigned i;
for (i = 0; i + REGSIZE_BYTES <= lclSize; i += REGSIZE_BYTES)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, varNum, i);
}
#ifdef TARGET_64BIT
assert(i == lclSize || (i + sizeof(int) == lclSize));
if (i != lclSize)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, varNum, i);
i += sizeof(int);
}
#endif // TARGET_64BIT
assert(i == lclSize);
}
}
assert(regSet.tmpAllFree());
for (TempDsc* tempThis = regSet.tmpListBeg(); tempThis != nullptr; tempThis = regSet.tmpListNxt(tempThis))
{
if (!varTypeIsGC(tempThis->tdTempType()))
{
continue;
}
// printf("initialize untracked spillTmp [EBP-%04X]\n", stkOffs);
inst_ST_RV(ins_Store(TYP_I_IMPL), tempThis, 0, genGetZeroReg(initReg, pInitRegZeroed), TYP_I_IMPL);
}
}
}
//-----------------------------------------------------------------------------
// genEnregisterOSRArgsAndLocals: Initialize any enregistered args or locals
// that get values from the tier0 frame.
//
// Arguments:
// initReg -- scratch register to use if needed
// pInitRegZeroed -- [IN,OUT] if init reg is zero (on entry/exit)
//
#if defined(TARGET_ARM64)
void CodeGen::genEnregisterOSRArgsAndLocals(regNumber initReg, bool* pInitRegZeroed)
#else
void CodeGen::genEnregisterOSRArgsAndLocals()
#endif
{
assert(compiler->opts.IsOSR());
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
// basic sanity checks (make sure we're OSRing the right method)
assert(patchpointInfo->NumberOfLocals() == compiler->info.compLocalsCount);
const int originalFrameSize = patchpointInfo->TotalFrameSize();
const unsigned patchpointInfoLen = patchpointInfo->NumberOfLocals();
for (unsigned varNum = 0; varNum < compiler->lvaCount; varNum++)
{
if (!compiler->lvaIsOSRLocal(varNum))
{
// This local was not part of the tier0 method's state.
// No work required.
//
continue;
}
LclVarDsc* const varDsc = compiler->lvaGetDesc(varNum);
if (!varDsc->lvIsInReg())
{
// For args/locals in memory, the OSR frame will continue to access
// that memory location. No work required.
//
JITDUMP("---OSR--- V%02u in memory\n", varNum);
continue;
}
// This local was part of the live tier0 state and is enregistered in the
// OSR method. Initialize the register from the right frame slot.
//
// If we ever enable promotion we'll need to generalize what follows to copy each
// field from the tier0 frame to its OSR home.
//
if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
// This arg or local is not live at entry to the OSR method.
// No work required.
//
JITDUMP("---OSR--- V%02u (reg) not live at entry\n", varNum);
continue;
}
int fieldOffset = 0;
unsigned lclNum = varNum;
if (varDsc->lvIsStructField)
{
lclNum = varDsc->lvParentLcl;
assert(lclNum < patchpointInfoLen);
fieldOffset = varDsc->lvFldOffset;
JITDUMP("---OSR--- V%02u is promoted field of V%02u at offset %d\n", varNum, lclNum, fieldOffset);
}
// Note we are always reading from the tier0 frame here
//
const var_types lclTyp = varDsc->GetActualRegisterType();
const emitAttr size = emitTypeSize(lclTyp);
const int stkOffs = patchpointInfo->Offset(lclNum) + fieldOffset;
#if defined(TARGET_AMD64)
// Original frames always use frame pointers, so
// stkOffs is the tier0 frame's frame-relative offset
// to the variable.
//
// We need to determine the stack or frame-pointer relative
// offset for this variable in the current frame.
//
// If current frame does not use a frame pointer, we need to
// add the SP-to-FP delta of this frame and the SP-to-FP delta
// of the original frame; that translates from this frame's
// stack pointer the old frame frame pointer.
//
// We then add the original frame's frame-pointer relative
// offset (note this offset is usually negative -- the stack
// grows down, so locals are below the frame pointer).
//
// /-----original frame-----/
// / return address /
// / saved RBP --+ / <--- Original frame ptr --+
// / ... | / |
// / ... (stkOffs) / |
// / ... | / |
// / variable --+ / |
// / ... / (original frame sp-fp delta)
// / ... / |
// /-----OSR frame ---------/ |
// / pseudo return address / --+
// / ... / |
// / ... / (this frame sp-fp delta)
// / ... / |
// /------------------------/ <--- Stack ptr --+
//
// If the current frame is using a frame pointer, we need to
// add the SP-to-FP delta of/ the original frame and then add
// the original frame's frame-pointer relative offset.
//
// /-----original frame-----/
// / return address /
// / saved RBP --+ / <--- Original frame ptr --+
// / ... | / |
// / ... (stkOffs) / |
// / ... | / |
// / variable --+ / |
// / ... / (original frame sp-fp delta)
// / ... / |
// /-----OSR frame ---------/ |
// / pseudo return address / --+
// / saved RBP / <--- Frame ptr --+
// / ... /
// / ... /
// / ... /
// /------------------------/
//
int offset = originalFrameSize + stkOffs;
if (isFramePointerUsed())
{
// also adjust for saved RPB on this frame
offset += TARGET_POINTER_SIZE;
}
else
{
offset += genSPtoFPdelta();
}
JITDUMP("---OSR--- V%02u (reg) old rbp offset %d old frame %d this frame sp-fp %d new offset %d (%02xH)\n",
varNum, stkOffs, originalFrameSize, genSPtoFPdelta(), offset, offset);
GetEmitter()->emitIns_R_AR(ins_Load(lclTyp), size, varDsc->GetRegNum(), genFramePointerReg(), offset);
#elif defined(TARGET_ARM64)
// Patchpoint offset is from top of Tier0 frame
//
// We need to determine the frame-pointer relative
// offset for this variable in the osr frame.
//
// First add the Tier0 frame size
//
const int tier0FrameSize = compiler->info.compPatchpointInfo->TotalFrameSize();
// then add the OSR frame size
//
const int osrFrameSize = genTotalFrameSize();
// then subtract OSR SP-FP delta
//
const int osrSpToFpDelta = genSPtoFPdelta();
// | => tier0 top of frame relative
// | + => tier0 bottom of frame relative
// | | + => osr bottom of frame (sp) relative
// | | | - => osr fp relative
// | | | |
const int offset = stkOffs + tier0FrameSize + osrFrameSize - osrSpToFpDelta;
JITDUMP("---OSR--- V%02u (reg) Tier0 virtual offset %d OSR frame size %d OSR sp-fp "
"delta %d total offset %d (0x%x)\n",
varNum, stkOffs, osrFrameSize, osrSpToFpDelta, offset, offset);
genInstrWithConstant(ins_Load(lclTyp), size, varDsc->GetRegNum(), genFramePointerReg(), offset, initReg);
*pInitRegZeroed = false;
#endif
}
}
/*-----------------------------------------------------------------------------
*
* Save the generic context argument.
*
* We need to do this within the "prolog" in case anyone tries to inspect
* the param-type-arg/this (which can be done after the prolog) using
* ICodeManager::GetParamTypeArg().
*/
void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
const bool reportArg = compiler->lvaReportParamTypeArg();
if (compiler->opts.IsOSR())
{
PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
if (reportArg)
{
// OSR method will use Tier0 slot to report context arg.
//
assert(ppInfo->HasGenericContextArgOffset());
JITDUMP("OSR method will use Tier0 frame slot for generics context arg.\n");
}
else if (compiler->lvaKeepAliveAndReportThis())
{
// OSR method will use Tier0 slot to report `this` as context.
//
assert(ppInfo->HasKeptAliveThis());
JITDUMP("OSR method will use Tier0 frame slot for generics context `this`.\n");
}
return;
}
// We should report either generic context arg or "this" when used so.
if (!reportArg)
{
#ifndef JIT32_GCENCODER
if (!compiler->lvaKeepAliveAndReportThis())
#endif
{
return;
}
}
// For JIT32_GCENCODER, we won't be here if reportArg is false.
unsigned contextArg = reportArg ? compiler->info.compTypeCtxtArg : compiler->info.compThisArg;
noway_assert(contextArg != BAD_VAR_NUM);
LclVarDsc* varDsc = compiler->lvaGetDesc(contextArg);
// We are still in the prolog and compiler->info.compTypeCtxtArg has not been
// moved to its final home location. So we need to use it from the
// incoming location.
regNumber reg;
bool isPrespilledForProfiling = false;
#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED)
isPrespilledForProfiling =
compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(contextArg, regSet.rsMaskPreSpillRegs(false));
#endif
// Load from the argument register only if it is not prespilled.
if (compiler->lvaIsRegArgument(contextArg) && !isPrespilledForProfiling)
{
reg = varDsc->GetArgReg();
}
else
{
if (isFramePointerUsed())
{
#if defined(TARGET_ARM)
// GetStackOffset() is always valid for incoming stack-arguments, even if the argument
// will become enregistered.
// On Arm compiler->compArgSize doesn't include r11 and lr sizes and hence we need to add 2*REGSIZE_BYTES
noway_assert((2 * REGSIZE_BYTES <= varDsc->GetStackOffset()) &&
(size_t(varDsc->GetStackOffset()) < compiler->compArgSize + 2 * REGSIZE_BYTES));
#else
// GetStackOffset() is always valid for incoming stack-arguments, even if the argument
// will become enregistered.
noway_assert((0 < varDsc->GetStackOffset()) && (size_t(varDsc->GetStackOffset()) < compiler->compArgSize));
#endif
}
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
reg = initReg;
*pInitRegZeroed = false;
// mov reg, [compiler->info.compTypeCtxtArg]
GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
varDsc->GetStackOffset());
regSet.verifyRegUsed(reg);
}
#if defined(TARGET_ARM64)
genInstrWithConstant(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset(), rsGetRsvdReg());
#elif defined(TARGET_ARM)
// ARM's emitIns_R_R_I automatically uses the reserved register if necessary.
GetEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset());
#else // !ARM64 !ARM
// mov [ebp-lvaCachedGenericContextArgOffset()], reg
GetEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset());
#endif // !ARM64 !ARM
}
/*****************************************************************************
Esp frames :
----------
These instructions are just a reordering of the instructions used today.
push ebp
push esi
push edi
push ebx
sub esp, LOCALS_SIZE / push dummyReg if LOCALS_SIZE=sizeof(void*)
...
add esp, LOCALS_SIZE / pop dummyReg
pop ebx
pop edi
pop esi
pop ebp
ret
Ebp frames :
----------
The epilog does "add esp, LOCALS_SIZE" instead of "mov ebp, esp".
Everything else is similar, though in a different order.
The security object will no longer be at a fixed offset. However, the
offset can still be determined by looking up the GC-info and determining
how many callee-saved registers are pushed.
push ebp
mov ebp, esp
push esi
push edi
push ebx
sub esp, LOCALS_SIZE / push dummyReg if LOCALS_SIZE=sizeof(void*)
...
add esp, LOCALS_SIZE / pop dummyReg
pop ebx
pop edi
pop esi
(mov esp, ebp if there are no callee-saved registers)
pop ebp
ret
Double-aligned frame :
--------------------
LOCALS_SIZE_ADJUSTED needs to include an unused DWORD if an odd number
of callee-saved registers are pushed on the stack so that the locals
themselves are qword-aligned. The instructions are the same as today,
just in a different order.
push ebp
mov ebp, esp
and esp, 0xFFFFFFFC
push esi
push edi
push ebx
sub esp, LOCALS_SIZE_ADJUSTED / push dummyReg if LOCALS_SIZE=sizeof(void*)
...
add esp, LOCALS_SIZE_ADJUSTED / pop dummyReg
pop ebx
pop edi
pop esi
pop ebp
mov esp, ebp
pop ebp
ret
localloc (with ebp) frames :
--------------------------
The instructions are the same as today, just in a different order.
Also, today the epilog does "lea esp, [ebp-LOCALS_SIZE-calleeSavedRegsPushedSize]"
which will change to "lea esp, [ebp-calleeSavedRegsPushedSize]".
push ebp
mov ebp, esp
push esi
push edi
push ebx
sub esp, LOCALS_SIZE / push dummyReg if LOCALS_SIZE=sizeof(void*)
...
lea esp, [ebp-calleeSavedRegsPushedSize]
pop ebx
pop edi
pop esi
(mov esp, ebp if there are no callee-saved registers)
pop ebp
ret
*****************************************************************************/
/*****************************************************************************
*
* Reserve space for a function prolog.
*/
void CodeGen::genReserveProlog(BasicBlock* block)
{
assert(block != nullptr);
JITDUMP("Reserving prolog IG for block " FMT_BB "\n", block->bbNum);
/* Nothing is live on entry to the prolog */
GetEmitter()->emitCreatePlaceholderIG(IGPT_PROLOG, block, VarSetOps::MakeEmpty(compiler), 0, 0, false);
}
/*****************************************************************************
*
* Reserve space for a function epilog.
*/
void CodeGen::genReserveEpilog(BasicBlock* block)
{
regMaskTP gcrefRegsArg = gcInfo.gcRegGCrefSetCur;
regMaskTP byrefRegsArg = gcInfo.gcRegByrefSetCur;
/* The return value is special-cased: make sure it goes live for the epilog */
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
if (IsFullPtrRegMapRequired() && !jmpEpilog)
{
if (varTypeIsGC(compiler->info.compRetNativeType))
{
noway_assert(genTypeStSz(compiler->info.compRetNativeType) == genTypeStSz(TYP_I_IMPL));
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
switch (compiler->info.compRetNativeType)
{
case TYP_REF:
gcrefRegsArg |= RBM_INTRET;
break;
case TYP_BYREF:
byrefRegsArg |= RBM_INTRET;
break;
default:
break;
}
JITDUMP("Extending return value GC liveness to epilog\n");
}
}
JITDUMP("Reserving epilog IG for block " FMT_BB "\n", block->bbNum);
assert(block != nullptr);
const VARSET_TP& gcrefVarsArg(GetEmitter()->emitThisGCrefVars);
bool last = (block->bbNext == nullptr);
GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last);
}
#if defined(FEATURE_EH_FUNCLETS)
/*****************************************************************************
*
* Reserve space for a funclet prolog.
*/
void CodeGen::genReserveFuncletProlog(BasicBlock* block)
{
assert(block != nullptr);
/* Currently, no registers are live on entry to the prolog, except maybe
the exception object. There might be some live stack vars, but they
cannot be accessed until after the frame pointer is re-established.
In order to potentially prevent emitting a death before the prolog
and a birth right after it, we just report it as live during the
prolog, and rely on the prolog being non-interruptible. Trust
genCodeForBBlist to correctly initialize all the sets.
We might need to relax these asserts if the VM ever starts
restoring any registers, then we could have live-in reg vars...
*/
noway_assert((gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT) == gcInfo.gcRegGCrefSetCur);
noway_assert(gcInfo.gcRegByrefSetCur == 0);
JITDUMP("Reserving funclet prolog IG for block " FMT_BB "\n", block->bbNum);
GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_PROLOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false);
}
/*****************************************************************************
*
* Reserve space for a funclet epilog.
*/
void CodeGen::genReserveFuncletEpilog(BasicBlock* block)
{
assert(block != nullptr);
JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum);
bool last = (block->bbNext == nullptr);
GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, last);
}
#endif // FEATURE_EH_FUNCLETS
/*****************************************************************************
* Finalize the frame size and offset assignments.
*
* No changes can be made to the modified register set after this, since that can affect how many
* callee-saved registers get saved.
*/
void CodeGen::genFinalizeFrame()
{
JITDUMP("Finalizing stack frame\n");
// Initializations need to happen based on the var locations at the start
// of the first basic block, so load those up. In particular, the determination
// of whether or not to use block init in the prolog is dependent on the variable
// locations on entry to the function.
compiler->m_pLinearScan->recordVarLocationsAtStartOfBB(compiler->fgFirstBB);
genCheckUseBlockInit();
// Set various registers as "modified" for special code generation scenarios: Edit & Continue, P/Invoke calls, etc.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
if (compiler->compTailCallUsed)
{
// If we are generating a helper-based tailcall, we've set the tailcall helper "flags"
// argument to "1", indicating to the tailcall helper that we've saved the callee-saved
// registers (ebx, esi, edi). So, we need to make sure all the callee-saved registers
// actually get saved.
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED);
}
#endif // TARGET_X86
#ifdef TARGET_ARM
// Make sure that callee-saved registers used by call to a stack probing helper generated are pushed on stack.
if (compiler->compLclFrameSize >= compiler->eeGetPageSize())
{
regSet.rsSetRegsModified(RBM_STACK_PROBE_HELPER_ARG | RBM_STACK_PROBE_HELPER_CALL_TARGET |
RBM_STACK_PROBE_HELPER_TRASH);
}
// If there are any reserved registers, add them to the modified set.
if (regSet.rsMaskResvd != RBM_NONE)
{
regSet.rsSetRegsModified(regSet.rsMaskResvd);
}
#endif // TARGET_ARM
#ifdef DEBUG
if (verbose)
{
printf("Modified regs: ");
dspRegMask(regSet.rsGetModifiedRegsMask());
printf("\n");
}
#endif // DEBUG
// Set various registers as "modified" for special code generation scenarios: Edit & Continue, P/Invoke calls, etc.
if (compiler->opts.compDbgEnC)
{
// We always save FP.
noway_assert(isFramePointerUsed());
#ifdef TARGET_AMD64
// On x64 we always save exactly RBP, RSI and RDI for EnC.
regMaskTP okRegs = (RBM_CALLEE_TRASH | RBM_FPBASE | RBM_RSI | RBM_RDI);
regSet.rsSetRegsModified(RBM_RSI | RBM_RDI);
noway_assert((regSet.rsGetModifiedRegsMask() & ~okRegs) == 0);
#else // !TARGET_AMD64
// On x86 we save all callee saved regs so the saved reg area size is consistent
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE);
#endif // !TARGET_AMD64
}
/* If we have any pinvoke calls, we might potentially trash everything */
if (compiler->compMethodRequiresPInvokeFrame())
{
noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE);
}
#ifdef UNIX_AMD64_ABI
// On Unix x64 we also save R14 and R15 for ELT profiler hook generation.
if (compiler->compIsProfilerHookNeeded())
{
regSet.rsSetRegsModified(RBM_PROFILER_ENTER_ARG_0 | RBM_PROFILER_ENTER_ARG_1);
}
#endif
/* Count how many callee-saved registers will actually be saved (pushed) */
// EBP cannot be (directly) modified for EBP frame and double-aligned frames
noway_assert(!doubleAlignOrFramePointerUsed() || !regSet.rsRegsModified(RBM_FPBASE));
#if ETW_EBP_FRAMED
// EBP cannot be (directly) modified
noway_assert(!regSet.rsRegsModified(RBM_FPBASE));
#endif
regMaskTP maskCalleeRegsPushed = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
#ifdef TARGET_ARMARCH
if (isFramePointerUsed())
{
// For a FP based frame we have to push/pop the FP register
//
maskCalleeRegsPushed |= RBM_FPBASE;
// This assert check that we are not using REG_FP
// as both the frame pointer and as a codegen register
//
assert(!regSet.rsRegsModified(RBM_FPBASE));
}
// we always push LR. See genPushCalleeSavedRegisters
//
maskCalleeRegsPushed |= RBM_LR;
#if defined(TARGET_ARM)
// TODO-ARM64-Bug?: enable some variant of this for FP on ARM64?
regMaskTP maskPushRegsFloat = maskCalleeRegsPushed & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = maskCalleeRegsPushed & ~maskPushRegsFloat;
if ((maskPushRegsFloat != RBM_NONE) ||
(compiler->opts.MinOpts() && (regSet.rsMaskResvd & maskCalleeRegsPushed & RBM_OPT_RSVD)))
{
// Here we try to keep stack double-aligned before the vpush
if ((genCountBits(regSet.rsMaskPreSpillRegs(true) | maskPushRegsInt) % 2) != 0)
{
regNumber extraPushedReg = REG_R4;
while (maskPushRegsInt & genRegMask(extraPushedReg))
{
extraPushedReg = REG_NEXT(extraPushedReg);
}
if (extraPushedReg < REG_R11)
{
maskPushRegsInt |= genRegMask(extraPushedReg);
regSet.rsSetRegsModified(genRegMask(extraPushedReg));
}
}
maskCalleeRegsPushed = maskPushRegsInt | maskPushRegsFloat;
}
// We currently only expect to push/pop consecutive FP registers
// and these have to be double-sized registers as well.
// Here we will insure that maskPushRegsFloat obeys these requirements.
//
if (maskPushRegsFloat != RBM_NONE)
{
regMaskTP contiguousMask = genRegMaskFloat(REG_F16, TYP_DOUBLE);
while (maskPushRegsFloat > contiguousMask)
{
contiguousMask <<= 2;
contiguousMask |= genRegMaskFloat(REG_F16, TYP_DOUBLE);
}
if (maskPushRegsFloat != contiguousMask)
{
regMaskTP maskExtraRegs = contiguousMask - maskPushRegsFloat;
maskPushRegsFloat |= maskExtraRegs;
regSet.rsSetRegsModified(maskExtraRegs);
maskCalleeRegsPushed |= maskExtraRegs;
}
}
#endif // TARGET_ARM
#endif // TARGET_ARMARCH
#if defined(TARGET_XARCH)
// Compute the count of callee saved float regs saved on stack.
// On Amd64 we push only integer regs. Callee saved float (xmm6-xmm15)
// regs are stack allocated and preserved in their stack locations.
compiler->compCalleeFPRegsSavedMask = maskCalleeRegsPushed & RBM_FLT_CALLEE_SAVED;
maskCalleeRegsPushed &= ~RBM_FLT_CALLEE_SAVED;
#endif // defined(TARGET_XARCH)
compiler->compCalleeRegsPushed = genCountBits(maskCalleeRegsPushed);
#ifdef DEBUG
if (verbose)
{
printf("Callee-saved registers pushed: %d ", compiler->compCalleeRegsPushed);
dspRegMask(maskCalleeRegsPushed);
printf("\n");
}
#endif // DEBUG
/* Assign the final offsets to things living on the stack frame */
compiler->lvaAssignFrameOffsets(Compiler::FINAL_FRAME_LAYOUT);
/* We want to make sure that the prolog size calculated here is accurate
(that is instructions will not shrink because of conservative stack
frame approximations). We do this by filling in the correct size
here (where we have committed to the final numbers for the frame offsets)
This will ensure that the prolog size is always correct
*/
GetEmitter()->emitMaxTmpSize = regSet.tmpGetTotalSize();
#ifdef DEBUG
if (compiler->opts.dspCode || compiler->opts.disAsm || compiler->opts.disAsm2 || verbose)
{
compiler->lvaTableDump();
}
#endif
}
/*****************************************************************************
*
* Generates code for a function prolog.
*
* NOTE REGARDING CHANGES THAT IMPACT THE DEBUGGER:
*
* The debugger relies on decoding ARM instructions to be able to successfully step through code. It does not
* implement decoding all ARM instructions. It only implements decoding the instructions which the JIT emits, and
* only instructions which result in control not going to the next instruction. Basically, any time execution would
* not continue at the next instruction (such as B, BL, BX, BLX, POP{pc}, etc.), the debugger has to be able to
* decode that instruction. If any of this is changed on ARM, the debugger team needs to be notified so that it
* can ensure stepping isn't broken. This is also a requirement for x86 and amd64.
*
* If any changes are made in the prolog, epilog, calls, returns, and branches, it is a good idea to notify the
* debugger team to ensure that stepping still works.
*
* ARM stepping code is here: debug\ee\arm\armwalker.cpp, vm\arm\armsinglestepper.cpp.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
void CodeGen::genFnProlog()
{
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
compiler->funSetCurrentFunc(0);
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFnProlog()\n");
}
#endif
#ifdef DEBUG
genInterruptibleUsed = true;
#endif
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT);
/* Ready to start on the prolog proper */
GetEmitter()->emitBegProlog();
compiler->unwindBegProlog();
// Do this so we can put the prolog instruction group ahead of
// other instruction groups
genIPmappingAddToFront(IPmappingDscKind::Prolog, DebugInfo(), true);
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n__prolog:\n");
}
#endif
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
// Create new scopes for the method-parameters for the prolog-block.
psiBegProlog();
}
#if defined(TARGET_ARM64)
// For arm64 OSR, emit a "phantom prolog" to account for the actions taken
// in the tier0 frame that impact FP and SP on entry to the OSR method.
//
// x64 handles this differently; the phantom prolog unwind is emitted in
// genOSRRecordTier0CalleeSavedRegistersAndFrame.
//
if (compiler->opts.IsOSR())
{
PatchpointInfo* patchpointInfo = compiler->info.compPatchpointInfo;
const int tier0FrameSize = patchpointInfo->TotalFrameSize();
// SP is tier0 method's SP.
compiler->unwindAllocStack(tier0FrameSize);
}
#endif // defined(TARGET_ARM64)
#ifdef DEBUG
if (compiler->compJitHaltMethod())
{
/* put a nop first because the debugger and other tools are likely to
put an int3 at the beginning and we don't want to confuse them */
instGen(INS_nop);
instGen(INS_BREAKPOINT);
#ifdef TARGET_ARMARCH
// Avoid asserts in the unwind info because these instructions aren't accounted for.
compiler->unwindPadding();
#endif // TARGET_ARMARCH
}
#endif // DEBUG
#if defined(FEATURE_EH_FUNCLETS) && defined(DEBUG)
// We cannot force 0-initialization of the PSPSym
// as it will overwrite the real value
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(compiler->lvaPSPSym);
assert(!varDsc->lvMustInit);
}
#endif // FEATURE_EH_FUNCLETS && DEBUG
/*-------------------------------------------------------------------------
*
* Record the stack frame ranges that will cover all of the tracked
* and untracked pointer variables.
* Also find which registers will need to be zero-initialized.
*
* 'initRegs': - Generally, enregistered variables should not need to be
* zero-inited. They only need to be zero-inited when they
* have a possibly uninitialized read on some control
* flow path. Apparently some of the IL_STUBs that we
* generate have this property.
*/
int untrLclLo = +INT_MAX;
int untrLclHi = -INT_MAX;
// 'hasUntrLcl' is true if there are any stack locals which must be init'ed.
// Note that they may be tracked, but simply not allocated to a register.
bool hasUntrLcl = false;
int GCrefLo = +INT_MAX;
int GCrefHi = -INT_MAX;
bool hasGCRef = false;
regMaskTP initRegs = RBM_NONE; // Registers which must be init'ed.
regMaskTP initFltRegs = RBM_NONE; // FP registers which must be init'ed.
regMaskTP initDblRegs = RBM_NONE;
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (varDsc->lvIsParam && !varDsc->lvIsRegArg)
{
continue;
}
if (!varDsc->lvIsInReg() && !varDsc->lvOnFrame)
{
noway_assert(varDsc->lvRefCnt() == 0);
continue;
}
signed int loOffs = varDsc->GetStackOffset();
signed int hiOffs = varDsc->GetStackOffset() + compiler->lvaLclSize(varNum);
/* We need to know the offset range of tracked stack GC refs */
/* We assume that the GC reference can be anywhere in the TYP_STRUCT */
if (varDsc->HasGCPtr() && varDsc->lvTrackedNonStruct() && varDsc->lvOnFrame)
{
// For fields of PROMOTION_TYPE_DEPENDENT type of promotion, they should have been
// taken care of by the parent struct.
if (!compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
hasGCRef = true;
if (loOffs < GCrefLo)
{
GCrefLo = loOffs;
}
if (hiOffs > GCrefHi)
{
GCrefHi = hiOffs;
}
}
}
/* For lvMustInit vars, gather pertinent info */
if (!varDsc->lvMustInit)
{
continue;
}
bool isInReg = varDsc->lvIsInReg();
bool isInMemory = !isInReg || varDsc->lvLiveInOutOfHndlr;
// Note that 'lvIsInReg()' will only be accurate for variables that are actually live-in to
// the first block. This will include all possibly-uninitialized locals, whose liveness
// will naturally propagate up to the entry block. However, we also set 'lvMustInit' for
// locals that are live-in to a finally block, and those may not be live-in to the first
// block. For those, we don't want to initialize the register, as it will not actually be
// occupying it on entry.
if (isInReg)
{
if (compiler->lvaEnregEHVars && varDsc->lvLiveInOutOfHndlr)
{
isInReg = VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex);
}
else
{
assert(VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex));
}
}
if (isInReg)
{
regNumber regForVar = varDsc->GetRegNum();
regMaskTP regMask = genRegMask(regForVar);
if (!genIsValidFloatReg(regForVar))
{
initRegs |= regMask;
if (varTypeIsMultiReg(varDsc))
{
if (varDsc->GetOtherReg() != REG_STK)
{
initRegs |= genRegMask(varDsc->GetOtherReg());
}
else
{
/* Upper DWORD is on the stack, and needs to be inited */
loOffs += sizeof(int);
goto INIT_STK;
}
}
}
else if (varDsc->TypeGet() == TYP_DOUBLE)
{
initDblRegs |= regMask;
}
else
{
initFltRegs |= regMask;
}
}
if (isInMemory)
{
INIT_STK:
hasUntrLcl = true;
if (loOffs < untrLclLo)
{
untrLclLo = loOffs;
}
if (hiOffs > untrLclHi)
{
untrLclHi = hiOffs;
}
}
}
/* Don't forget about spill temps that hold pointers */
assert(regSet.tmpAllFree());
for (TempDsc* tempThis = regSet.tmpListBeg(); tempThis != nullptr; tempThis = regSet.tmpListNxt(tempThis))
{
if (!varTypeIsGC(tempThis->tdTempType()))
{
continue;
}
signed int loOffs = tempThis->tdTempOffs();
signed int hiOffs = loOffs + TARGET_POINTER_SIZE;
// If there is a frame pointer used, due to frame pointer chaining it will point to the stored value of the
// previous frame pointer. Thus, stkOffs can't be zero.
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_AMD64)
// However, on amd64 there is no requirement to chain frame pointers.
noway_assert(!isFramePointerUsed() || loOffs != 0);
#endif // !defined(TARGET_AMD64)
// printf(" Untracked tmp at [EBP-%04X]\n", -stkOffs);
hasUntrLcl = true;
if (loOffs < untrLclLo)
{
untrLclLo = loOffs;
}
if (hiOffs > untrLclHi)
{
untrLclHi = hiOffs;
}
}
// TODO-Cleanup: Add suitable assert for the OSR case.
assert(compiler->opts.IsOSR() || ((genInitStkLclCnt > 0) == hasUntrLcl));
#ifdef DEBUG
if (verbose)
{
if (genInitStkLclCnt > 0)
{
printf("Found %u lvMustInit int-sized stack slots, frame offsets %d through %d\n", genInitStkLclCnt,
-untrLclLo, -untrLclHi);
}
}
#endif
#ifdef TARGET_ARM
// On the ARM we will spill any incoming struct args in the first instruction in the prolog
// Ditto for all enregistered user arguments in a varargs method.
// These registers will be available to use for the initReg. We just remove
// all of these registers from the rsCalleeRegArgMaskLiveIn.
//
intRegState.rsCalleeRegArgMaskLiveIn &= ~regSet.rsMaskPreSpillRegs(false);
#endif
/* Choose the register to use for zero initialization */
regNumber initReg = REG_SCRATCH; // Unless we find a better register below
// Track if initReg holds non-zero value. Start conservative and assume it has non-zero value.
// If initReg is ever set to zero, this variable is set to true and zero initializing initReg
// will be skipped.
bool initRegZeroed = false;
regMaskTP excludeMask = intRegState.rsCalleeRegArgMaskLiveIn;
regMaskTP tempMask;
// We should not use the special PINVOKE registers as the initReg
// since they are trashed by the jithelper call to setup the PINVOKE frame
if (compiler->compMethodRequiresPInvokeFrame())
{
excludeMask |= RBM_PINVOKE_FRAME;
assert((!compiler->opts.ShouldUsePInvokeHelpers()) || (compiler->info.compLvFrameListRoot == BAD_VAR_NUM));
if (!compiler->opts.ShouldUsePInvokeHelpers())
{
excludeMask |= (RBM_PINVOKE_TCB | RBM_PINVOKE_SCRATCH);
// We also must exclude the register used by compLvFrameListRoot when it is enregistered
//
const LclVarDsc* varDsc = compiler->lvaGetDesc(compiler->info.compLvFrameListRoot);
if (varDsc->lvRegister)
{
excludeMask |= genRegMask(varDsc->GetRegNum());
}
}
}
#ifdef TARGET_ARM
// If we have a variable sized frame (compLocallocUsed is true)
// then using REG_SAVED_LOCALLOC_SP in the prolog is not allowed
if (compiler->compLocallocUsed)
{
excludeMask |= RBM_SAVED_LOCALLOC_SP;
}
#endif // TARGET_ARM
const bool isRoot = (compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
#ifdef TARGET_AMD64
const bool isOSRx64Root = isRoot && compiler->opts.IsOSR();
#else
const bool isOSRx64Root = false;
#endif // TARGET_AMD64
tempMask = initRegs & ~excludeMask & ~regSet.rsMaskResvd;
if (tempMask != RBM_NONE)
{
// We will use one of the registers that we were planning to zero init anyway.
// We pick the lowest register number.
tempMask = genFindLowestBit(tempMask);
initReg = genRegNumFromMask(tempMask);
}
// Next we prefer to use one of the unused argument registers.
// If they aren't available we use one of the caller-saved integer registers.
else
{
tempMask = regSet.rsGetModifiedRegsMask() & RBM_ALLINT & ~excludeMask & ~regSet.rsMaskResvd;
if (tempMask != RBM_NONE)
{
// We pick the lowest register number
tempMask = genFindLowestBit(tempMask);
initReg = genRegNumFromMask(tempMask);
}
}
#if defined(TARGET_AMD64)
// For x64 OSR root frames, we can't use any as of yet unsaved
// callee save as initReg, as we defer saving these until later in
// the prolog, and we don't have normal arg regs.
if (isOSRx64Root)
{
initReg = REG_SCRATCH; // REG_EAX
}
#elif defined(TARGET_ARM64)
// For arm64 OSR root frames, we may need a scratch register for large
// offset addresses. Use a register that won't be allocated.
//
if (isRoot && compiler->opts.IsOSR())
{
initReg = REG_IP1;
}
#endif
noway_assert(!compiler->compMethodRequiresPInvokeFrame() || (initReg != REG_PINVOKE_FRAME));
#if defined(TARGET_AMD64)
// If we are a varargs call, in order to set up the arguments correctly this
// must be done in a 2 step process. As per the x64 ABI:
// a) The caller sets up the argument shadow space (just before the return
// address, 4 pointer sized slots).
// b) The callee is responsible to home the arguments on the shadow space
// provided by the caller.
// This way, the varargs iterator will be able to retrieve the
// call arguments properly since both the arg regs and the stack allocated
// args will be contiguous.
//
// OSR methods can skip this, as the setup is done by the orignal method.
if (compiler->info.compIsVarArgs && !compiler->opts.IsOSR())
{
GetEmitter()->spillIntArgRegsToShadowSlots();
}
#endif // TARGET_AMD64
#ifdef TARGET_ARM
/*-------------------------------------------------------------------------
*
* Now start emitting the part of the prolog which sets up the frame
*/
if (regSet.rsMaskPreSpillRegs(true) != RBM_NONE)
{
inst_IV(INS_push, (int)regSet.rsMaskPreSpillRegs(true));
compiler->unwindPushMaskInt(regSet.rsMaskPreSpillRegs(true));
}
#endif // TARGET_ARM
unsigned extraFrameSize = 0;
#ifdef TARGET_XARCH
#ifdef TARGET_AMD64
if (isOSRx64Root)
{
// Account for the Tier0 callee saves
//
genOSRRecordTier0CalleeSavedRegistersAndFrame();
// We don't actually push any callee saves on the OSR frame,
// but we still reserve space, so account for this when
// allocating the local frame.
//
extraFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
}
#endif // TARGET_ARM64
if (doubleAlignOrFramePointerUsed())
{
// OSR methods handle "saving" FP specially.
//
// For epilog and unwind, we restore the RBP saved by the
// Tier0 method. The save we do here is just to set up a
// proper RBP-based frame chain link.
//
if (isOSRx64Root && isFramePointerUsed())
{
GetEmitter()->emitIns_R_AR(INS_mov, EA_8BYTE, initReg, REG_FPBASE, 0);
inst_RV(INS_push, initReg, TYP_REF);
initRegZeroed = false;
// We account for the SP movement in unwind, but not for
// the "save" of RBP.
//
compiler->unwindAllocStack(REGSIZE_BYTES);
}
else
{
inst_RV(INS_push, REG_FPBASE, TYP_REF);
compiler->unwindPush(REG_FPBASE);
}
#ifdef USING_SCOPE_INFO
psiAdjustStackLevel(REGSIZE_BYTES);
#endif // USING_SCOPE_INFO
#ifndef TARGET_AMD64 // On AMD64, establish the frame pointer after the "sub rsp"
genEstablishFramePointer(0, /*reportUnwindData*/ true);
#endif // !TARGET_AMD64
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
{
noway_assert(isFramePointerUsed() == false);
noway_assert(!regSet.rsRegsModified(RBM_FPBASE)); /* Trashing EBP is out. */
inst_RV_IV(INS_AND, REG_SPBASE, -8, EA_PTRSIZE);
}
#endif // DOUBLE_ALIGN
}
#endif // TARGET_XARCH
#ifdef TARGET_ARM64
genPushCalleeSavedRegisters(initReg, &initRegZeroed);
#else // !TARGET_ARM64
if (!isOSRx64Root)
{
genPushCalleeSavedRegisters();
}
#endif // !TARGET_ARM64
#ifdef TARGET_ARM
bool needToEstablishFP = false;
int afterLclFrameSPtoFPdelta = 0;
if (doubleAlignOrFramePointerUsed())
{
needToEstablishFP = true;
// If the local frame is small enough, we establish the frame pointer after the OS-reported prolog.
// This makes the prolog and epilog match, giving us smaller unwind data. If the frame size is
// too big, we go ahead and do it here.
int SPtoFPdelta = (compiler->compCalleeRegsPushed - 2) * REGSIZE_BYTES;
afterLclFrameSPtoFPdelta = SPtoFPdelta + compiler->compLclFrameSize;
if (!arm_Valid_Imm_For_Add_SP(afterLclFrameSPtoFPdelta))
{
// Oh well, it looks too big. Go ahead and establish the frame pointer here.
genEstablishFramePointer(SPtoFPdelta, /*reportUnwindData*/ true);
needToEstablishFP = false;
}
}
#endif // TARGET_ARM
//-------------------------------------------------------------------------
//
// Subtract the local frame size from SP.
//
//-------------------------------------------------------------------------
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_ARM64
regMaskTP maskStackAlloc = RBM_NONE;
#ifdef TARGET_ARM
maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize + extraFrameSize,
regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED);
#endif // TARGET_ARM
if (maskStackAlloc == RBM_NONE)
{
genAllocLclFrame(compiler->compLclFrameSize + extraFrameSize, initReg, &initRegZeroed,
intRegState.rsCalleeRegArgMaskLiveIn);
}
#endif // !TARGET_ARM64
#ifdef TARGET_AMD64
// For x64 OSR we have to finish saving int callee saves.
//
if (isOSRx64Root)
{
genOSRSaveRemainingCalleeSavedRegisters();
}
#endif // TARGET_AMD64
//-------------------------------------------------------------------------
#ifdef TARGET_ARM
if (compiler->compLocallocUsed)
{
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, REG_SAVED_LOCALLOC_SP, REG_SPBASE, /* canSkip */ false);
regSet.verifyRegUsed(REG_SAVED_LOCALLOC_SP);
compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0);
}
#endif // TARGET_ARMARCH
#if defined(TARGET_XARCH)
// Preserve callee saved float regs to stack.
genPreserveCalleeSavedFltRegs(compiler->compLclFrameSize);
#endif // defined(TARGET_XARCH)
#ifdef TARGET_AMD64
// Establish the AMD64 frame pointer after the OS-reported prolog.
if (doubleAlignOrFramePointerUsed())
{
const bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
genEstablishFramePointer(compiler->codeGen->genSPtoFPdelta(), reportUnwindData);
}
#endif // TARGET_AMD64
//-------------------------------------------------------------------------
//
// This is the end of the OS-reported prolog for purposes of unwinding
//
//-------------------------------------------------------------------------
#ifdef TARGET_ARM
if (needToEstablishFP)
{
genEstablishFramePointer(afterLclFrameSPtoFPdelta, /*reportUnwindData*/ false);
needToEstablishFP = false; // nobody uses this later, but set it anyway, just to be explicit
}
#endif // TARGET_ARM
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM,
compiler->lvaStubArgumentVar, 0);
assert(intRegState.rsCalleeRegArgMaskLiveIn & RBM_SECRET_STUB_PARAM);
// It's no longer live; clear it out so it can be used after this in the prolog
intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SECRET_STUB_PARAM;
}
//
// Zero out the frame as needed
//
genZeroInitFrame(untrLclHi, untrLclLo, initReg, &initRegZeroed);
#if defined(FEATURE_EH_FUNCLETS)
genSetPSPSym(initReg, &initRegZeroed);
#else // !FEATURE_EH_FUNCLETS
// when compInitMem is true the genZeroInitFrame will zero out the shadow SP slots
if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem)
{
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE;
// Zero out the slot for nesting level 0
unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE;
if (!initRegZeroed)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg);
initRegZeroed = true;
}
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar,
firstSlotOffs);
}
#endif // !FEATURE_EH_FUNCLETS
genReportGenericContextArg(initReg, &initRegZeroed);
#ifdef JIT32_GCENCODER
// Initialize the LocalAllocSP slot if there is localloc in the function.
if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
}
#endif // JIT32_GCENCODER
// Set up the GS security cookie
genSetGSSecurityCookie(initReg, &initRegZeroed);
#ifdef PROFILING_SUPPORTED
// Insert a function entry callback for profiling, if requested.
// OSR methods aren't called, so don't have enter hooks.
if (!compiler->opts.IsOSR())
{
genProfilingEnterCallback(initReg, &initRegZeroed);
}
#endif // PROFILING_SUPPORTED
// For OSR we may have a zero-length prolog. That's not supported
// when the method must report a generics context,/ so add a nop if so.
//
if (compiler->opts.IsOSR() && (GetEmitter()->emitGetPrologOffsetEstimate() == 0) &&
(compiler->lvaReportParamTypeArg() || compiler->lvaKeepAliveAndReportThis()))
{
JITDUMP("OSR: prolog was zero length and has generic context to report: adding nop to pad prolog.\n");
instGen(INS_nop);
}
if (!GetInterruptible())
{
// The 'real' prolog ends here for non-interruptible methods.
// For fully-interruptible methods, we extend the prolog so that
// we do not need to track GC inforation while shuffling the
// arguments.
GetEmitter()->emitMarkPrologEnd();
}
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
// The unused bits of Vector3 arguments must be cleared
// since native compiler doesn't initize the upper bits to zeros.
//
// TODO-Cleanup: This logic can be implemented in
// genFnPrologCalleeRegArgs() for argument registers and
// genEnregisterIncomingStackArgs() for stack arguments.
genClearStackVec3ArgUpperBits();
#endif // UNIX_AMD64_ABI && FEATURE_SIMD
/*-----------------------------------------------------------------------------
* Take care of register arguments first
*/
// Home incoming arguments and generate any required inits.
// OSR handles this by moving the values from the original frame.
//
// Update the arg initial register locations.
//
if (compiler->opts.IsOSR())
{
// For OSR we defer updating "initial reg" for args until
// we've set the live-in regs with values from the Tier0 frame.
//
// Otherwise we'll do some of these fetches twice.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
genEnregisterOSRArgsAndLocals(initReg, &initRegZeroed);
#else
genEnregisterOSRArgsAndLocals();
#endif
compiler->lvaUpdateArgsWithInitialReg();
}
else
{
compiler->lvaUpdateArgsWithInitialReg();
auto assignIncomingRegisterArgs = [this, initReg, &initRegZeroed](RegState* regState) {
if (regState->rsCalleeRegArgMaskLiveIn)
{
// If we need an extra register to shuffle around the incoming registers
// we will use xtraReg (initReg) and set the xtraRegClobbered flag,
// if we don't need to use the xtraReg then this flag will stay false
//
regNumber xtraReg;
bool xtraRegClobbered = false;
if (genRegMask(initReg) & RBM_ARG_REGS)
{
xtraReg = initReg;
}
else
{
xtraReg = REG_SCRATCH;
initRegZeroed = false;
}
genFnPrologCalleeRegArgs(xtraReg, &xtraRegClobbered, regState);
if (xtraRegClobbered)
{
initRegZeroed = false;
}
}
};
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM)
assignIncomingRegisterArgs(&intRegState);
assignIncomingRegisterArgs(&floatRegState);
#else
assignIncomingRegisterArgs(&intRegState);
#endif
// Home the incoming arguments.
genEnregisterIncomingStackArgs();
}
/* Initialize any must-init registers variables now */
if (initRegs)
{
regMaskTP regMask = 0x1;
for (regNumber reg = REG_INT_FIRST; reg <= REG_INT_LAST; reg = REG_NEXT(reg), regMask <<= 1)
{
if (regMask & initRegs)
{
// Check if we have already zeroed this register
if ((reg == initReg) && initRegZeroed)
{
continue;
}
else
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, reg);
if (reg == initReg)
{
initRegZeroed = true;
}
}
}
}
}
if (initFltRegs | initDblRegs)
{
// If initReg is not in initRegs then we will use REG_SCRATCH
if ((genRegMask(initReg) & initRegs) == 0)
{
initReg = REG_SCRATCH;
initRegZeroed = false;
}
#ifdef TARGET_ARM
// This is needed only for Arm since it can use a zero initialized int register
// to initialize vfp registers.
if (!initRegZeroed)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg);
initRegZeroed = true;
}
#endif // TARGET_ARM
genZeroInitFltRegs(initFltRegs, initDblRegs, initReg);
}
//-----------------------------------------------------------------------------
//
// Increase the prolog size here only if fully interruptible.
//
if (GetInterruptible())
{
GetEmitter()->emitMarkPrologEnd();
}
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
psiEndProlog();
}
if (hasGCRef)
{
GetEmitter()->emitSetFrameRangeGCRs(GCrefLo, GCrefHi);
}
else
{
noway_assert(GCrefLo == +INT_MAX);
noway_assert(GCrefHi == -INT_MAX);
}
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n");
}
#endif
#ifdef TARGET_X86
// On non-x86 the VARARG cookie does not need any special treatment.
// Load up the VARARG argument pointer register so it doesn't get clobbered.
// only do this if we actually access any statically declared args
// (our argument pointer register has a refcount > 0).
unsigned argsStartVar = compiler->lvaVarargsBaseOfStkArgs;
if (compiler->info.compIsVarArgs && compiler->lvaGetDesc(argsStartVar)->lvRefCnt() > 0)
{
varDsc = compiler->lvaGetDesc(argsStartVar);
noway_assert(compiler->info.compArgsCount > 0);
// MOV EAX, <VARARGS HANDLE>
GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, compiler->info.compArgsCount - 1, 0);
regSet.verifyRegUsed(REG_EAX);
// MOV EAX, [EAX]
GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, REG_EAX, 0);
// EDX might actually be holding something here. So make sure to only use EAX for this code
// sequence.
const LclVarDsc* lastArg = compiler->lvaGetDesc(compiler->info.compArgsCount - 1);
noway_assert(!lastArg->lvRegister);
signed offset = lastArg->GetStackOffset();
assert(offset != BAD_STK_OFFS);
noway_assert(lastArg->lvFramePointerBased);
// LEA EAX, &<VARARGS HANDLE> + EAX
GetEmitter()->emitIns_R_ARR(INS_lea, EA_PTRSIZE, REG_EAX, genFramePointerReg(), REG_EAX, offset);
if (varDsc->lvIsInReg())
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, varDsc->GetRegNum(), REG_EAX, /* canSkip */ true);
regSet.verifyRegUsed(varDsc->GetRegNum());
}
else
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, argsStartVar, 0);
}
}
#endif // TARGET_X86
#if defined(DEBUG) && defined(TARGET_XARCH)
if (compiler->opts.compStackCheckOnRet)
{
noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
}
#endif // defined(DEBUG) && defined(TARGET_XARCH)
GetEmitter()->emitEndProlog();
compiler->unwindEndProlog();
noway_assert(GetEmitter()->emitMaxTmpSize == regSet.tmpGetTotalSize());
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//------------------------------------------------------------------------
// getCallTarget - Get the node that evalutes to the call target
//
// Arguments:
// call - the GT_CALL node
//
// Returns:
// The node. Note that for direct calls this may still return non-null if the direct call
// requires a 'complex' tree to load the target (e.g. in R2R or because we go through a stub).
//
GenTree* CodeGen::getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd)
{
// all virtuals should have been expanded into a control expression by this point.
assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
if (call->gtCallType == CT_INDIRECT)
{
assert(call->gtControlExpr == nullptr);
if (methHnd != nullptr)
{
*methHnd = nullptr;
}
return call->gtCallAddr;
}
if (methHnd != nullptr)
{
*methHnd = call->gtCallMethHnd;
}
return call->gtControlExpr;
}
//------------------------------------------------------------------------
// getCallIndirectionCellReg - Get the register containing the indirection cell for a call
//
// Arguments:
// call - the node
//
// Returns:
// The register containing the indirection cell, or REG_NA if this call does not use an indirection cell argument.
//
// Notes:
// We currently use indirection cells for VSD on all platforms and for R2R calls on ARM architectures.
//
regNumber CodeGen::getCallIndirectionCellReg(const GenTreeCall* call)
{
regNumber result = REG_NA;
switch (call->GetIndirectionCellArgKind())
{
case NonStandardArgKind::None:
break;
case NonStandardArgKind::R2RIndirectionCell:
result = REG_R2R_INDIRECT_PARAM;
break;
case NonStandardArgKind::VirtualStubCell:
result = compiler->virtualStubParamInfo->GetReg();
break;
default:
unreached();
}
#ifdef DEBUG
regNumber foundReg = REG_NA;
unsigned argCount = call->fgArgInfo->ArgCount();
fgArgTabEntry** argTable = call->fgArgInfo->ArgTable();
for (unsigned i = 0; i < argCount; i++)
{
NonStandardArgKind kind = argTable[i]->nonStandardArgKind;
if ((kind == NonStandardArgKind::R2RIndirectionCell) || (kind == NonStandardArgKind::VirtualStubCell))
{
foundReg = argTable[i]->GetRegNum();
break;
}
}
assert(foundReg == result);
#endif
return result;
}
/*****************************************************************************
*
* Generates code for all the function and funclet prologs and epilogs.
*/
void CodeGen::genGeneratePrologsAndEpilogs()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** Before prolog / epilog generation\n");
GetEmitter()->emitDispIGlist(false);
}
#endif
// Before generating the prolog, we need to reset the variable locations to what they will be on entry.
// This affects our code that determines which untracked locals need to be zero initialized.
compiler->m_pLinearScan->recordVarLocationsAtStartOfBB(compiler->fgFirstBB);
// Tell the emitter we're done with main code generation, and are going to start prolog and epilog generation.
GetEmitter()->emitStartPrologEpilogGeneration();
gcInfo.gcResetForBB();
genFnProlog();
// Generate all the prologs and epilogs.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(FEATURE_EH_FUNCLETS)
// Capture the data we're going to use in the funclet prolog and epilog generation. This is
// information computed during codegen, or during function prolog generation, like
// frame offsets. It must run after main function prolog generation.
genCaptureFuncletPrologEpilogInfo();
#endif // FEATURE_EH_FUNCLETS
// Walk the list of prologs and epilogs and generate them.
// We maintain a list of prolog and epilog basic blocks in
// the insGroup structure in the emitter. This list was created
// during code generation by the genReserve*() functions.
//
// TODO: it seems like better design would be to create a list of prologs/epilogs
// in the code generator (not the emitter), and then walk that list. But we already
// have the insGroup list, which serves well, so we don't need the extra allocations
// for a prolog/epilog list in the code generator.
GetEmitter()->emitGeneratePrologEpilog();
// Tell the emitter we're done with all prolog and epilog generation.
GetEmitter()->emitFinishPrologEpilogGeneration();
#ifdef DEBUG
if (verbose)
{
printf("*************** After prolog / epilog generation\n");
GetEmitter()->emitDispIGlist(false);
}
#endif
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX End Prolog / Epilog XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//-----------------------------------------------------------------------------------
// IsMultiRegReturnedType: Returns true if the type is returned in multiple registers
//
// Arguments:
// hClass - type handle
//
// Return Value:
// true if type is returned in multiple registers, false otherwise.
//
bool Compiler::IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv)
{
if (hClass == NO_CLASS_HANDLE)
{
return false;
}
structPassingKind howToReturnStruct;
var_types returnType = getReturnTypeForStruct(hClass, callConv, &howToReturnStruct);
#ifdef TARGET_ARM64
return (varTypeIsStruct(returnType) && (howToReturnStruct != SPK_PrimitiveType));
#else
return (varTypeIsStruct(returnType));
#endif
}
//----------------------------------------------
// Methods that support HFA's for ARM32/ARM64
//----------------------------------------------
bool Compiler::IsHfa(CORINFO_CLASS_HANDLE hClass)
{
return varTypeIsValidHfaType(GetHfaType(hClass));
}
bool Compiler::IsHfa(GenTree* tree)
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(gtGetStructHandleIfPresent(tree));
}
else
{
return false;
}
}
var_types Compiler::GetHfaType(GenTree* tree)
{
if (GlobalJitOptions::compFeatureHfa)
{
return GetHfaType(gtGetStructHandleIfPresent(tree));
}
else
{
return TYP_UNDEF;
}
}
unsigned Compiler::GetHfaCount(GenTree* tree)
{
return GetHfaCount(gtGetStructHandle(tree));
}
var_types Compiler::GetHfaType(CORINFO_CLASS_HANDLE hClass)
{
if (GlobalJitOptions::compFeatureHfa)
{
if (hClass != NO_CLASS_HANDLE)
{
CorInfoHFAElemType elemKind = info.compCompHnd->getHFAType(hClass);
if (elemKind != CORINFO_HFA_ELEM_NONE)
{
// This type may not appear elsewhere, but it will occupy a floating point register.
compFloatingPointUsed = true;
}
return HfaTypeFromElemKind(elemKind);
}
}
return TYP_UNDEF;
}
//------------------------------------------------------------------------
// GetHfaCount: Given a class handle for an HFA struct
// return the number of registers needed to hold the HFA
//
// Note that on ARM32 the single precision registers overlap with
// the double precision registers and for that reason each
// double register is considered to be two single registers.
// Thus for ARM32 an HFA of 4 doubles this function will return 8.
// On ARM64 given an HFA of 4 singles or 4 doubles this function will
// will return 4 for both.
// Arguments:
// hClass: the class handle of a HFA struct
//
unsigned Compiler::GetHfaCount(CORINFO_CLASS_HANDLE hClass)
{
assert(IsHfa(hClass));
#ifdef TARGET_ARM
// A HFA of doubles is twice as large as an HFA of singles for ARM32
// (i.e. uses twice the number of single precison registers)
return info.compCompHnd->getClassSize(hClass) / REGSIZE_BYTES;
#else // TARGET_ARM64
var_types hfaType = GetHfaType(hClass);
unsigned classSize = info.compCompHnd->getClassSize(hClass);
// Note that the retail build issues a warning about a potential divsion by zero without the Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
return classSize / elemSize;
#endif // TARGET_ARM64
}
//------------------------------------------------------------------------------------------------ //
// getFirstArgWithStackSlot - returns the first argument with stack slot on the caller's frame.
//
// Return value:
// The number of the first argument with stack slot on the caller's frame.
//
// Note:
// On x64 Windows the caller always creates slots (homing space) in its frame for the
// first 4 arguments of a callee (register passed args). So, the the variable number
// (lclNum) for the first argument with a stack slot is always 0.
// For System V systems or armarch, there is no such calling convention requirement, and the code
// needs to find the first stack passed argument from the caller. This is done by iterating over
// all the lvParam variables and finding the first with GetArgReg() equals to REG_STK.
//
unsigned CodeGen::getFirstArgWithStackSlot()
{
#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARMARCH)
unsigned baseVarNum = 0;
// Iterate over all the lvParam variables in the Lcl var table until we find the first one
// that's passed on the stack.
LclVarDsc* varDsc = nullptr;
for (unsigned i = 0; i < compiler->info.compArgsCount; i++)
{
varDsc = compiler->lvaGetDesc(i);
// We should have found a stack parameter (and broken out of this loop) before
// we find any non-parameters.
assert(varDsc->lvIsParam);
if (varDsc->GetArgReg() == REG_STK)
{
baseVarNum = i;
break;
}
}
assert(varDsc != nullptr);
return baseVarNum;
#elif defined(TARGET_AMD64)
return 0;
#else // TARGET_X86
// Not implemented for x86.
NYI_X86("getFirstArgWithStackSlot not yet implemented for x86.");
return BAD_VAR_NUM;
#endif // TARGET_X86
}
//------------------------------------------------------------------------
// genSinglePush: Report a change in stack level caused by a single word-sized push instruction
//
void CodeGen::genSinglePush()
{
AddStackLevel(REGSIZE_BYTES);
}
//------------------------------------------------------------------------
// genSinglePop: Report a change in stack level caused by a single word-sized pop instruction
//
void CodeGen::genSinglePop()
{
SubtractStackLevel(REGSIZE_BYTES);
}
//------------------------------------------------------------------------
// genPushRegs: Push the given registers.
//
// Arguments:
// regs - mask or registers to push
// byrefRegs - OUT arg. Set to byref registers that were pushed.
// noRefRegs - OUT arg. Set to non-GC ref registers that were pushed.
//
// Return Value:
// Mask of registers pushed.
//
// Notes:
// This function does not check if the register is marked as used, etc.
//
regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs)
{
*byrefRegs = RBM_NONE;
*noRefRegs = RBM_NONE;
if (regs == RBM_NONE)
{
return RBM_NONE;
}
#if FEATURE_FIXED_OUT_ARGS
NYI("Don't call genPushRegs with real regs!");
return RBM_NONE;
#else // FEATURE_FIXED_OUT_ARGS
noway_assert(genTypeStSz(TYP_REF) == genTypeStSz(TYP_I_IMPL));
noway_assert(genTypeStSz(TYP_BYREF) == genTypeStSz(TYP_I_IMPL));
regMaskTP pushedRegs = regs;
for (regNumber reg = REG_INT_FIRST; regs != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = regMaskTP(1) << reg;
if ((regBit & regs) == RBM_NONE)
continue;
var_types type;
if (regBit & gcInfo.gcRegGCrefSetCur)
{
type = TYP_REF;
}
else if (regBit & gcInfo.gcRegByrefSetCur)
{
*byrefRegs |= regBit;
type = TYP_BYREF;
}
else if (noRefRegs != NULL)
{
*noRefRegs |= regBit;
type = TYP_I_IMPL;
}
else
{
continue;
}
inst_RV(INS_push, reg, type);
genSinglePush();
gcInfo.gcMarkRegSetNpt(regBit);
regs &= ~regBit;
}
return pushedRegs;
#endif // FEATURE_FIXED_OUT_ARGS
}
//------------------------------------------------------------------------
// genPopRegs: Pop the registers that were pushed by genPushRegs().
//
// Arguments:
// regs - mask of registers to pop
// byrefRegs - The byref registers that were pushed by genPushRegs().
// noRefRegs - The non-GC ref registers that were pushed by genPushRegs().
//
// Return Value:
// None
//
void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs)
{
if (regs == RBM_NONE)
{
return;
}
#if FEATURE_FIXED_OUT_ARGS
NYI("Don't call genPopRegs with real regs!");
#else // FEATURE_FIXED_OUT_ARGS
noway_assert((regs & byrefRegs) == byrefRegs);
noway_assert((regs & noRefRegs) == noRefRegs);
noway_assert((regs & (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur)) == RBM_NONE);
noway_assert(genTypeStSz(TYP_REF) == genTypeStSz(TYP_INT));
noway_assert(genTypeStSz(TYP_BYREF) == genTypeStSz(TYP_INT));
// Walk the registers in the reverse order as genPushRegs()
for (regNumber reg = REG_INT_LAST; regs != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = regMaskTP(1) << reg;
if ((regBit & regs) == RBM_NONE)
continue;
var_types type;
if (regBit & byrefRegs)
{
type = TYP_BYREF;
}
else if (regBit & noRefRegs)
{
type = TYP_INT;
}
else
{
type = TYP_REF;
}
inst_RV(INS_pop, reg, type);
genSinglePop();
if (type != TYP_INT)
gcInfo.gcMarkRegPtrVal(reg, type);
regs &= ~regBit;
}
#endif // FEATURE_FIXED_OUT_ARGS
}
/*****************************************************************************
* genSetScopeInfo
*
* This function should be called only after the sizes of the emitter blocks
* have been finalized.
*/
void CodeGen::genSetScopeInfo()
{
if (!compiler->opts.compScopeInfo)
{
return;
}
#ifdef DEBUG
if (verbose)
{
printf("*************** In genSetScopeInfo()\n");
}
#endif
unsigned varsLocationsCount = 0;
#ifdef USING_SCOPE_INFO
if (compiler->info.compVarScopesCount > 0)
{
varsLocationsCount = siScopeCnt + psiScopeCnt;
}
#else // USING_SCOPE_INFO
#ifdef USING_VARIABLE_LIVE_RANGE
varsLocationsCount = (unsigned int)varLiveKeeper->getLiveRangesCount();
#endif // USING_VARIABLE_LIVE_RANGE
#endif // USING_SCOPE_INFO
if (varsLocationsCount == 0)
{
// No variable home to report
compiler->eeSetLVcount(0);
compiler->eeSetLVdone();
return;
}
noway_assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0));
// Initialize the table where the reported variables' home will be placed.
compiler->eeSetLVcount(varsLocationsCount);
#ifdef DEBUG
genTrnslLocalVarCount = varsLocationsCount;
if (varsLocationsCount)
{
genTrnslLocalVarInfo = new (compiler, CMK_DebugOnly) TrnslLocalVarInfo[varsLocationsCount];
}
#endif
#ifdef USING_SCOPE_INFO
genSetScopeInfoUsingsiScope();
#else // USING_SCOPE_INFO
#ifdef USING_VARIABLE_LIVE_RANGE
// We can have one of both flags defined, both, or none. Specially if we need to compare both
// both results. But we cannot report both to the debugger, since there would be overlapping
// intervals, and may not indicate the same variable location.
genSetScopeInfoUsingVariableRanges();
#endif // USING_VARIABLE_LIVE_RANGE
#endif // USING_SCOPE_INFO
compiler->eeSetLVdone();
}
#ifdef USING_SCOPE_INFO
void CodeGen::genSetScopeInfoUsingsiScope()
{
noway_assert(psiOpenScopeList.scNext == nullptr);
// Record the scopes found for the parameters over the prolog.
// The prolog needs to be treated differently as a variable may not
// have the same info in the prolog block as is given by compiler->lvaTable.
// eg. A register parameter is actually on the stack, before it is loaded to reg.
CodeGen::psiScope* scopeP;
unsigned i;
for (i = 0, scopeP = psiScopeList.scNext; i < psiScopeCnt; i++, scopeP = scopeP->scNext)
{
noway_assert(scopeP != nullptr);
noway_assert(scopeP->scStartLoc.Valid());
noway_assert(scopeP->scEndLoc.Valid());
UNATIVE_OFFSET startOffs = scopeP->scStartLoc.CodeOffset(GetEmitter());
UNATIVE_OFFSET endOffs = scopeP->scEndLoc.CodeOffset(GetEmitter());
unsigned varNum = scopeP->scSlotNum;
noway_assert(startOffs <= endOffs);
// The range may be 0 if the prolog is empty. For such a case,
// report the liveness of arguments to span at least the first
// instruction in the method. This will be incorrect (except on
// entry to the method) if the very first instruction of the method
// is part of a loop. However, this should happen
// very rarely, and the incorrectness is worth being able to look
// at the argument on entry to the method.
if (startOffs == endOffs)
{
noway_assert(startOffs == 0);
endOffs++;
}
siVarLoc varLoc = scopeP->getSiVarLoc();
genSetScopeInfo(i, startOffs, endOffs - startOffs, varNum, scopeP->scLVnum, true, &varLoc);
}
// Record the scopes for the rest of the method.
// Check that the LocalVarInfo scopes look OK
noway_assert(siOpenScopeList.scNext == nullptr);
CodeGen::siScope* scopeL;
for (i = 0, scopeL = siScopeList.scNext; i < siScopeCnt; i++, scopeL = scopeL->scNext)
{
noway_assert(scopeL != nullptr);
noway_assert(scopeL->scStartLoc.Valid());
noway_assert(scopeL->scEndLoc.Valid());
// Find the start and end IP
UNATIVE_OFFSET startOffs = scopeL->scStartLoc.CodeOffset(GetEmitter());
UNATIVE_OFFSET endOffs = scopeL->scEndLoc.CodeOffset(GetEmitter());
noway_assert(scopeL->scStartLoc != scopeL->scEndLoc);
LclVarDsc* varDsc = compiler->lvaGetDesc(scopeL->scVarNum);
siVarLoc varLoc = getSiVarLoc(varDsc, scopeL);
genSetScopeInfo(psiScopeCnt + i, startOffs, endOffs - startOffs, scopeL->scVarNum, scopeL->scLVnum, false,
&varLoc);
}
}
#endif // USING_SCOPE_INFO
#ifdef USING_VARIABLE_LIVE_RANGE
//------------------------------------------------------------------------
// genSetScopeInfoUsingVariableRanges: Call "genSetScopeInfo" with the
// "VariableLiveRanges" created for the arguments, special arguments and
// IL local variables.
//
// Notes:
// This function is called from "genSetScopeInfo" once the code is generated
// and we want to send debug info to the debugger.
//
void CodeGen::genSetScopeInfoUsingVariableRanges()
{
unsigned int liveRangeIndex = 0;
for (unsigned int varNum = 0; varNum < compiler->info.compLocalsCount; varNum++)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
if (compiler->compMap2ILvarNum(varNum) == (unsigned int)ICorDebugInfo::UNKNOWN_ILNUM)
{
continue;
}
auto reportRange = [this, varDsc, varNum, &liveRangeIndex](siVarLoc* loc, UNATIVE_OFFSET start,
UNATIVE_OFFSET end) {
if (varDsc->lvIsParam && (start == end))
{
// If the length is zero, it means that the prolog is empty. In that case,
// CodeGen::genSetScopeInfo will report the liveness of all arguments
// as spanning the first instruction in the method, so that they can
// at least be inspected on entry to the method.
end++;
}
genSetScopeInfo(liveRangeIndex, start, end - start, varNum, varNum, true, loc);
liveRangeIndex++;
};
siVarLoc* curLoc = nullptr;
UNATIVE_OFFSET curStart = 0;
UNATIVE_OFFSET curEnd = 0;
for (int rangeIndex = 0; rangeIndex < 2; rangeIndex++)
{
VariableLiveKeeper::LiveRangeList* liveRanges;
if (rangeIndex == 0)
{
liveRanges = varLiveKeeper->getLiveRangesForVarForProlog(varNum);
}
else
{
liveRanges = varLiveKeeper->getLiveRangesForVarForBody(varNum);
}
for (VariableLiveKeeper::VariableLiveRange& liveRange : *liveRanges)
{
UNATIVE_OFFSET startOffs = liveRange.m_StartEmitLocation.CodeOffset(GetEmitter());
UNATIVE_OFFSET endOffs = liveRange.m_EndEmitLocation.CodeOffset(GetEmitter());
assert(startOffs <= endOffs);
assert(startOffs >= curEnd);
if ((curLoc != nullptr) && (startOffs == curEnd) && siVarLoc::Equals(curLoc, &liveRange.m_VarLocation))
{
// Extend current range.
curEnd = endOffs;
continue;
}
// Report old range if any.
if (curLoc != nullptr)
{
reportRange(curLoc, curStart, curEnd);
}
// Start a new range.
curLoc = &liveRange.m_VarLocation;
curStart = startOffs;
curEnd = endOffs;
}
}
// Report last range
if (curLoc != nullptr)
{
reportRange(curLoc, curStart, curEnd);
}
}
compiler->eeVarsCount = liveRangeIndex;
}
#endif // USING_VARIABLE_LIVE_RANGE
//------------------------------------------------------------------------
// genSetScopeInfo: Record scope information for debug info
//
// Arguments:
// which
// startOffs - the starting offset for this scope
// length - the length of this scope
// varNum - the lclVar for this scope info
// LVnum
// avail - a bool indicating if it has a home
// varLoc - the position (reg or stack) of the variable
//
// Notes:
// Called for every scope info piece to record by the main genSetScopeInfo()
void CodeGen::genSetScopeInfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
unsigned LVnum,
bool avail,
siVarLoc* varLoc)
{
// We need to do some mapping while reporting back these variables.
unsigned ilVarNum = compiler->compMap2ILvarNum(varNum);
noway_assert((int)ilVarNum != ICorDebugInfo::UNKNOWN_ILNUM);
#ifdef TARGET_X86
// Non-x86 platforms are allowed to access all arguments directly
// so we don't need this code.
// Is this a varargs function?
if (compiler->info.compIsVarArgs && varNum != compiler->lvaVarargsHandleArg &&
varNum < compiler->info.compArgsCount && !compiler->lvaGetDesc(varNum)->lvIsRegArg)
{
noway_assert(varLoc->vlType == VLT_STK || varLoc->vlType == VLT_STK2);
// All stack arguments (except the varargs handle) have to be
// accessed via the varargs cookie. Discard generated info,
// and just find its position relative to the varargs handle
PREFIX_ASSUME(compiler->lvaVarargsHandleArg < compiler->info.compArgsCount);
if (!compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->lvOnFrame)
{
noway_assert(!compiler->opts.compDbgCode);
return;
}
// Can't check compiler->lvaTable[varNum].lvOnFrame as we don't set it for
// arguments of vararg functions to avoid reporting them to GC.
noway_assert(!compiler->lvaGetDesc(varNum)->lvRegister);
unsigned cookieOffset = compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->GetStackOffset();
unsigned varOffset = compiler->lvaGetDesc(varNum)->GetStackOffset();
noway_assert(cookieOffset < varOffset);
unsigned offset = varOffset - cookieOffset;
unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(offset < stkArgSize);
offset = stkArgSize - offset;
varLoc->vlType = VLT_FIXED_VA;
varLoc->vlFixedVarArg.vlfvOffset = offset;
}
#endif // TARGET_X86
VarName name = nullptr;
#ifdef DEBUG
for (unsigned scopeNum = 0; scopeNum < compiler->info.compVarScopesCount; scopeNum++)
{
if (LVnum == compiler->info.compVarScopes[scopeNum].vsdLVnum)
{
name = compiler->info.compVarScopes[scopeNum].vsdName;
}
}
// Hang on to this compiler->info.
TrnslLocalVarInfo& tlvi = genTrnslLocalVarInfo[which];
tlvi.tlviVarNum = ilVarNum;
tlvi.tlviLVnum = LVnum;
tlvi.tlviName = name;
tlvi.tlviStartPC = startOffs;
tlvi.tlviLength = length;
tlvi.tlviAvailable = avail;
tlvi.tlviVarLoc = *varLoc;
#endif // DEBUG
compiler->eeSetLVinfo(which, startOffs, length, ilVarNum, *varLoc);
}
/*****************************************************************************/
#ifdef LATE_DISASM
#if defined(DEBUG)
/*****************************************************************************
* CompilerRegName
*
* Can be called only after lviSetLocalVarInfo() has been called
*/
/* virtual */
const char* CodeGen::siRegVarName(size_t offs, size_t size, unsigned reg)
{
if (!compiler->opts.compScopeInfo)
return nullptr;
if (compiler->info.compVarScopesCount == 0)
return nullptr;
noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo);
for (unsigned i = 0; i < genTrnslLocalVarCount; i++)
{
if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsInReg((regNumber)reg)) &&
(genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) &&
(genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs))
{
return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL;
}
}
return NULL;
}
/*****************************************************************************
* CompilerStkName
*
* Can be called only after lviSetLocalVarInfo() has been called
*/
/* virtual */
const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs)
{
if (!compiler->opts.compScopeInfo)
return nullptr;
if (compiler->info.compVarScopesCount == 0)
return nullptr;
noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo);
for (unsigned i = 0; i < genTrnslLocalVarCount; i++)
{
if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsOnStack((regNumber)reg, stkOffs)) &&
(genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) &&
(genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs))
{
return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL;
}
}
return NULL;
}
/*****************************************************************************/
#endif // defined(DEBUG)
#endif // LATE_DISASM
#ifdef DEBUG
/*****************************************************************************
* Display a IPmappingDsc. Pass -1 as mappingNum to not display a mapping number.
*/
void CodeGen::genIPmappingDisp(unsigned mappingNum, IPmappingDsc* ipMapping)
{
if (mappingNum != unsigned(-1))
{
printf("%d: ", mappingNum);
}
switch (ipMapping->ipmdKind)
{
case IPmappingDscKind::Prolog:
printf("PROLOG");
break;
case IPmappingDscKind::Epilog:
printf("EPILOG");
break;
case IPmappingDscKind::NoMapping:
printf("NO_MAP");
break;
case IPmappingDscKind::Normal:
const ILLocation& loc = ipMapping->ipmdLoc;
Compiler::eeDispILOffs(loc.GetOffset());
if (loc.IsStackEmpty())
{
printf(" STACK_EMPTY");
}
if (loc.IsCall())
{
printf(" CALL_INSTRUCTION");
}
break;
}
printf(" ");
ipMapping->ipmdNativeLoc.Print(compiler->compMethodID);
// We can only call this after code generation. Is there any way to tell when it's legal to call?
// printf(" [%x]", ipMapping->ipmdNativeLoc.CodeOffset(GetEmitter()));
if (ipMapping->ipmdIsLabel)
{
printf(" label");
}
printf("\n");
}
void CodeGen::genIPmappingListDisp()
{
unsigned mappingNum = 0;
for (IPmappingDsc& dsc : compiler->genIPmappings)
{
genIPmappingDisp(mappingNum, &dsc);
++mappingNum;
}
}
#endif // DEBUG
/*****************************************************************************
*
* Append an IPmappingDsc struct to the list that we're maintaining
* for the debugger.
* Record the instr offset as being at the current code gen position.
*/
void CodeGen::genIPmappingAdd(IPmappingDscKind kind, const DebugInfo& di, bool isLabel)
{
if (!compiler->opts.compDbgInfo)
{
return;
}
assert((kind == IPmappingDscKind::Normal) == di.IsValid());
switch (kind)
{
case IPmappingDscKind::Prolog:
case IPmappingDscKind::Epilog:
break;
default:
if (kind == IPmappingDscKind::Normal)
{
noway_assert(di.GetLocation().GetOffset() <= compiler->info.compILCodeSize);
}
// Ignore this one if it's the same IL location as the last one we saw.
// Note that we'll let through two identical IL offsets if the flag bits
// differ, or two identical "special" mappings (e.g., PROLOG).
if ((compiler->genIPmappings.size() > 0) && (kind == compiler->genIPmappings.back().ipmdKind) &&
(di.GetLocation() == compiler->genIPmappings.back().ipmdLoc))
{
JITDUMP("genIPmappingAdd: ignoring duplicate IL offset 0x%x\n", di.GetLocation().GetOffset());
return;
}
break;
}
IPmappingDsc addMapping;
addMapping.ipmdNativeLoc.CaptureLocation(GetEmitter());
addMapping.ipmdKind = kind;
addMapping.ipmdLoc = di.GetLocation();
addMapping.ipmdIsLabel = isLabel;
assert((kind == IPmappingDscKind::Normal) == addMapping.ipmdLoc.IsValid());
compiler->genIPmappings.push_back(addMapping);
#ifdef DEBUG
if (verbose)
{
printf("Added IP mapping: ");
genIPmappingDisp(unsigned(-1), &addMapping);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Prepend an IPmappingDsc struct to the list that we're maintaining
* for the debugger.
*/
void CodeGen::genIPmappingAddToFront(IPmappingDscKind kind, const DebugInfo& di, bool isLabel)
{
if (!compiler->opts.compDbgInfo)
{
return;
}
noway_assert((kind != IPmappingDscKind::Normal) ||
(di.IsValid() && (di.GetLocation().GetOffset() <= compiler->info.compILCodeSize)));
/* Create a mapping entry and prepend it to the list */
IPmappingDsc addMapping;
addMapping.ipmdNativeLoc.CaptureLocation(GetEmitter());
addMapping.ipmdKind = kind;
addMapping.ipmdLoc = di.GetLocation();
addMapping.ipmdIsLabel = isLabel;
compiler->genIPmappings.push_front(addMapping);
#ifdef DEBUG
if (verbose)
{
printf("Added IP mapping to front: ");
genIPmappingDisp(unsigned(-1), &addMapping);
}
#endif // DEBUG
}
/*****************************************************************************/
void CodeGen::genEnsureCodeEmitted(const DebugInfo& di)
{
if (!compiler->opts.compDbgCode)
{
return;
}
if (!di.IsValid())
{
return;
}
// If other IL were offsets reported, skip
if (compiler->genIPmappings.size() <= 0)
{
return;
}
const IPmappingDsc& prev = compiler->genIPmappings.back();
if (prev.ipmdLoc != di.GetLocation())
{
return;
}
// di represents the last reported offset. Make sure that we generated native code
if (prev.ipmdNativeLoc.IsCurrentLocation(GetEmitter()))
{
instGen(INS_nop);
}
}
//------------------------------------------------------------------------
// genIPmappingGen: Shut down the IP-mapping logic, report the info to the EE.
//
void CodeGen::genIPmappingGen()
{
if (!compiler->opts.compDbgInfo)
{
return;
}
#ifdef DEBUG
if (verbose)
{
printf("*************** In genIPmappingGen()\n");
}
#endif
if (compiler->genIPmappings.size() <= 0)
{
compiler->eeSetLIcount(0);
compiler->eeSetLIdone();
return;
}
UNATIVE_OFFSET prevNativeOfs = UNATIVE_OFFSET(~0);
for (jitstd::list<IPmappingDsc>::iterator it = compiler->genIPmappings.begin();
it != compiler->genIPmappings.end();)
{
UNATIVE_OFFSET dscNativeOfs = it->ipmdNativeLoc.CodeOffset(GetEmitter());
if (dscNativeOfs != prevNativeOfs)
{
prevNativeOfs = dscNativeOfs;
++it;
continue;
}
// If we have a previous offset we should have a previous mapping.
assert(it != compiler->genIPmappings.begin());
jitstd::list<IPmappingDsc>::iterator prev = it;
--prev;
// Prev and current mappings have same native offset.
// If one does not map to IL then remove that one.
if (prev->ipmdKind == IPmappingDscKind::NoMapping)
{
compiler->genIPmappings.erase(prev);
++it;
continue;
}
if (it->ipmdKind == IPmappingDscKind::NoMapping)
{
it = compiler->genIPmappings.erase(it);
continue;
}
// Both have mappings.
// If previous is the prolog, keep both if this one is at IL offset 0.
// (TODO: Why? Debugger has no problem breaking on the prolog mapping
// it seems.)
if ((prev->ipmdKind == IPmappingDscKind::Prolog) && (it->ipmdKind == IPmappingDscKind::Normal) &&
(it->ipmdLoc.GetOffset() == 0))
{
++it;
continue;
}
// For the special case of an IL instruction with no body followed by
// the epilog (say ret void immediately preceding the method end), we
// leave both entries in, so that we'll stop at the (empty) ret
// statement if the user tries to put a breakpoint there, and then have
// the option of seeing the epilog or not based on SetUnmappedStopMask
// for the stepper.
if (it->ipmdKind == IPmappingDscKind::Epilog)
{
++it;
continue;
}
// For managed return values we store all calls. Keep both in this case
// too.
if (((prev->ipmdKind == IPmappingDscKind::Normal) && (prev->ipmdLoc.IsCall())) ||
((it->ipmdKind == IPmappingDscKind::Normal) && (it->ipmdLoc.IsCall())))
{
++it;
continue;
}
// Otherwise report the higher offset unless the previous mapping is a
// label.
if (prev->ipmdIsLabel)
{
it = compiler->genIPmappings.erase(it);
}
else
{
compiler->genIPmappings.erase(prev);
++it;
}
}
// Tell them how many mapping records we've got
compiler->eeSetLIcount(static_cast<unsigned int>(compiler->genIPmappings.size()));
// Now tell them about the mappings
unsigned int mappingIdx = 0;
for (const IPmappingDsc& dsc : compiler->genIPmappings)
{
compiler->eeSetLIinfo(mappingIdx++, dsc.ipmdNativeLoc.CodeOffset(GetEmitter()), dsc.ipmdKind, dsc.ipmdLoc);
}
#if 0
// TODO-Review:
//This check is disabled. It is always true that any time this check asserts, the debugger would have a
//problem with IL source level debugging. However, for a C# file, it only matters if things are on
//different source lines. As a result, we have all sorts of latent problems with how we emit debug
//info, but very few actual ones. Whenever someone wants to tackle that problem in general, turn this
//assert back on.
if (compiler->opts.compDbgCode)
{
//Assert that the first instruction of every basic block with more than one incoming edge has a
//different sequence point from each incoming block.
//
//It turns out that the only thing we really have to assert is that the first statement in each basic
//block has an IL offset and appears in eeBoundaries.
for (BasicBlock* const block : compiler->Blocks())
{
Statement* stmt = block->firstStmt();
if ((block->bbRefs > 1) && (stmt != nullptr))
{
bool found = false;
DebugInfo rootInfo = stmt->GetDebugInfo().GetRoot();
if (rootInfo.IsValid())
{
for (unsigned i = 0; i < compiler->eeBoundariesCount; ++i)
{
if (compiler->eeBoundaries[i].ilOffset == rootInfo.GetLocation().GetOffset())
{
found = true;
break;
}
}
}
noway_assert(found && "A basic block that is a jump target did not start a new sequence point.");
}
}
}
#endif // 0
compiler->eeSetLIdone();
}
#ifdef DEBUG
void CodeGen::genDumpPreciseDebugInfoInlineTree(FILE* file, InlineContext* context, bool* first)
{
if (context->GetSibling() != nullptr)
{
genDumpPreciseDebugInfoInlineTree(file, context->GetSibling(), first);
}
if (context->IsSuccess())
{
if (!*first)
{
fprintf(file, ",");
}
*first = false;
fprintf(file, "{\"Ordinal\":%u,", context->GetOrdinal());
fprintf(file, "\"MethodID\":%lld,", (INT64)context->GetCallee());
const char* className;
const char* methodName = compiler->eeGetMethodName(context->GetCallee(), &className);
fprintf(file, "\"MethodName\":\"%s\",", methodName);
fprintf(file, "\"Inlinees\":[");
if (context->GetChild() != nullptr)
{
bool childFirst = true;
genDumpPreciseDebugInfoInlineTree(file, context->GetChild(), &childFirst);
}
fprintf(file, "]}");
}
}
void CodeGen::genDumpPreciseDebugInfo()
{
if (JitConfig.JitDumpPreciseDebugInfoFile() == nullptr)
return;
static CritSecObject s_critSect;
CritSecHolder holder(s_critSect);
FILE* file = _wfopen(JitConfig.JitDumpPreciseDebugInfoFile(), W("a"));
if (file == nullptr)
return;
// MethodID in ETW events are the method handles.
fprintf(file, "{\"MethodID\":%lld,", (INT64)compiler->info.compMethodHnd);
// Print inline tree.
fprintf(file, "\"InlineTree\":");
bool first = true;
genDumpPreciseDebugInfoInlineTree(file, compiler->compInlineContext, &first);
fprintf(file, ",\"Mappings\":[");
first = true;
for (PreciseIPMapping& mapping : compiler->genPreciseIPmappings)
{
if (!first)
{
fprintf(file, ",");
}
first = false;
fprintf(file, "{\"NativeOffset\":%u,\"InlineContext\":%u,\"ILOffset\":%u}",
mapping.nativeLoc.CodeOffset(GetEmitter()), mapping.debugInfo.GetInlineContext()->GetOrdinal(),
mapping.debugInfo.GetLocation().GetOffset());
}
fprintf(file, "]}\n");
fclose(file);
}
void CodeGen::genAddPreciseIPMappingHere(const DebugInfo& di)
{
PreciseIPMapping mapping;
mapping.nativeLoc.CaptureLocation(GetEmitter());
mapping.debugInfo = di;
compiler->genPreciseIPmappings.push_back(mapping);
}
#endif
/*============================================================================
*
* These are empty stubs to help the late dis-assembler to compile
* if the late disassembler is being built into a non-DEBUG build.
*
*============================================================================
*/
#if defined(LATE_DISASM)
#if !defined(DEBUG)
/* virtual */
const char* CodeGen::siRegVarName(size_t offs, size_t size, unsigned reg)
{
return NULL;
}
/* virtual */
const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs)
{
return NULL;
}
/*****************************************************************************/
#endif // !defined(DEBUG)
#endif // defined(LATE_DISASM)
//------------------------------------------------------------------------
// indirForm: Make a temporary indir we can feed to pattern matching routines
// in cases where we don't want to instantiate all the indirs that happen.
//
/* static */ GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
{
GenTreeIndir i(GT_IND, type, base, nullptr);
i.SetRegNum(REG_NA);
i.SetContained();
return i;
}
//------------------------------------------------------------------------
// indirForm: Make a temporary indir we can feed to pattern matching routines
// in cases where we don't want to instantiate all the indirs that happen.
//
/* static */ GenTreeStoreInd CodeGen::storeIndirForm(var_types type, GenTree* base, GenTree* data)
{
GenTreeStoreInd i(type, base, data);
i.SetRegNum(REG_NA);
return i;
}
//------------------------------------------------------------------------
// intForm: Make a temporary int we can feed to pattern matching routines
// in cases where we don't want to instantiate.
//
GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
{
GenTreeIntCon i(type, value);
i.SetRegNum(REG_NA);
return i;
}
#if defined(TARGET_X86) || defined(TARGET_ARM)
//------------------------------------------------------------------------
// genLongReturn: Generates code for long return statement for x86 and arm.
//
// Note: treeNode's and op1's registers are already consumed.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node with LONG return type.
//
// Return Value:
// None
//
void CodeGen::genLongReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
assert(treeNode->TypeGet() == TYP_LONG);
GenTree* op1 = treeNode->gtGetOp1();
var_types targetType = treeNode->TypeGet();
assert(op1 != nullptr);
assert(op1->OperGet() == GT_LONG);
GenTree* loRetVal = op1->gtGetOp1();
GenTree* hiRetVal = op1->gtGetOp2();
assert((loRetVal->GetRegNum() != REG_NA) && (hiRetVal->GetRegNum() != REG_NA));
genConsumeReg(loRetVal);
genConsumeReg(hiRetVal);
inst_Mov(targetType, REG_LNGRET_LO, loRetVal->GetRegNum(), /* canSkip */ true, emitActualTypeSize(TYP_INT));
inst_Mov(targetType, REG_LNGRET_HI, hiRetVal->GetRegNum(), /* canSkip */ true, emitActualTypeSize(TYP_INT));
}
#endif // TARGET_X86 || TARGET_ARM
//------------------------------------------------------------------------
// genReturn: Generates code for return statement.
// In case of struct return, delegates to the genStructReturn method.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node.
//
// Return Value:
// None
//
void CodeGen::genReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
GenTree* op1 = treeNode->gtGetOp1();
var_types targetType = treeNode->TypeGet();
// A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in the return
// register, if it's not already there. The processing is the same as GT_RETURN. For filters, the IL spec says the
// result is type int32. Further, the only legal values are 0 or 1; the use of other values is "undefined".
assert(!treeNode->OperIs(GT_RETFILT) || (targetType == TYP_VOID) || (targetType == TYP_INT));
#ifdef DEBUG
if (targetType == TYP_VOID)
{
assert(op1 == nullptr);
}
#endif // DEBUG
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (targetType == TYP_LONG)
{
genLongReturn(treeNode);
}
else
#endif // TARGET_X86 || TARGET_ARM
{
if (isStructReturn(treeNode))
{
genStructReturn(treeNode);
}
else if (targetType != TYP_VOID)
{
assert(op1 != nullptr);
noway_assert(op1->GetRegNum() != REG_NA);
// !! NOTE !! genConsumeReg will clear op1 as GC ref after it has
// consumed a reg for the operand. This is because the variable
// is dead after return. But we are issuing more instructions
// like "profiler leave callback" after this consumption. So
// if you are issuing more instructions after this point,
// remember to keep the variable live up until the new method
// exit point where it is actually dead.
genConsumeReg(op1);
#if defined(TARGET_ARM64)
genSimpleReturn(treeNode);
#else // !TARGET_ARM64
#if defined(TARGET_X86)
if (varTypeUsesFloatReg(treeNode))
{
genFloatReturn(treeNode);
}
else
#elif defined(TARGET_ARM)
if (varTypeUsesFloatReg(treeNode) && (compiler->opts.compUseSoftFP || compiler->info.compIsVarArgs))
{
if (targetType == TYP_FLOAT)
{
GetEmitter()->emitIns_Mov(INS_vmov_f2i, EA_4BYTE, REG_INTRET, op1->GetRegNum(),
/* canSkip */ false);
}
else
{
assert(targetType == TYP_DOUBLE);
GetEmitter()->emitIns_R_R_R(INS_vmov_d2i, EA_8BYTE, REG_INTRET, REG_NEXT(REG_INTRET),
op1->GetRegNum());
}
}
else
#endif // TARGET_ARM
{
regNumber retReg = varTypeUsesFloatReg(treeNode) ? REG_FLOATRET : REG_INTRET;
inst_Mov_Extend(targetType, /* srcInReg */ true, retReg, op1->GetRegNum(), /* canSkip */ true);
}
#endif // !TARGET_ARM64
}
}
#ifdef PROFILING_SUPPORTED
// !! Note !!
// TODO-AMD64-Unix: If the profiler hook is implemented on *nix, make sure for 2 register returned structs
// the RAX and RDX needs to be kept alive. Make the necessary changes in lowerxarch.cpp
// in the handling of the GT_RETURN statement.
// Such structs containing GC pointers need to be handled by calling gcInfo.gcMarkRegSetNpt
// for the return registers containing GC refs.
//
// Reason for not materializing Leave callback as a GT_PROF_HOOK node after GT_RETURN:
// In flowgraph and other places assert that the last node of a block marked as
// BBJ_RETURN is either a GT_RETURN or GT_JMP or a tail call. It would be nice to
// maintain such an invariant irrespective of whether profiler hook needed or not.
// Also, there is not much to be gained by materializing it as an explicit node.
//
// There should be a single return block while generating profiler ELT callbacks,
// so we just look for that block to trigger insertion of the profile hook.
if ((compiler->compCurBB == compiler->genReturnBB) && compiler->compIsProfilerHookNeeded())
{
// !! NOTE !!
// Since we are invalidating the assumption that we would slip into the epilog
// right after the "return", we need to preserve the return reg's GC state
// across the call until actual method return.
ReturnTypeDesc retTypeDesc;
unsigned regCount = 0;
if (compiler->compMethodReturnsMultiRegRetType())
{
if (varTypeIsLong(compiler->info.compRetNativeType))
{
retTypeDesc.InitializeLongReturnType();
}
else // we must have a struct return type
{
CorInfoCallConvExtension callConv = compiler->info.compCallConv;
retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass,
callConv);
}
regCount = retTypeDesc.GetReturnRegCount();
}
if (varTypeIsGC(compiler->info.compRetNativeType))
{
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
}
else if (compiler->compMethodReturnsMultiRegRetType())
{
for (unsigned i = 0; i < regCount; ++i)
{
if (varTypeIsGC(retTypeDesc.GetReturnRegType(i)))
{
gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
}
}
}
else if (compiler->compMethodReturnsRetBufAddr())
{
gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
}
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_LEAVE);
if (varTypeIsGC(compiler->info.compRetNativeType))
{
gcInfo.gcMarkRegSetNpt(genRegMask(REG_INTRET));
}
else if (compiler->compMethodReturnsMultiRegRetType())
{
for (unsigned i = 0; i < regCount; ++i)
{
if (varTypeIsGC(retTypeDesc.GetReturnRegType(i)))
{
gcInfo.gcMarkRegSetNpt(genRegMask(retTypeDesc.GetABIReturnReg(i)));
}
}
}
else if (compiler->compMethodReturnsRetBufAddr())
{
gcInfo.gcMarkRegSetNpt(genRegMask(REG_INTRET));
}
}
#endif // PROFILING_SUPPORTED
#if defined(DEBUG) && defined(TARGET_XARCH)
bool doStackPointerCheck = compiler->opts.compStackCheckOnRet;
#if defined(FEATURE_EH_FUNCLETS)
// Don't do stack pointer check at the return from a funclet; only for the main function.
if (compiler->funCurrentFunc()->funKind != FUNC_ROOT)
{
doStackPointerCheck = false;
}
#else // !FEATURE_EH_FUNCLETS
// Don't generate stack checks for x86 finally/filter EH returns: these are not invoked
// with the same SP as the main function. See also CodeGen::genEHFinallyOrFilterRet().
if ((compiler->compCurBB->bbJumpKind == BBJ_EHFINALLYRET) || (compiler->compCurBB->bbJumpKind == BBJ_EHFILTERRET))
{
doStackPointerCheck = false;
}
#endif // !FEATURE_EH_FUNCLETS
genStackPointerCheck(doStackPointerCheck, compiler->lvaReturnSpCheck);
#endif // defined(DEBUG) && defined(TARGET_XARCH)
}
//------------------------------------------------------------------------
// isStructReturn: Returns whether the 'treeNode' is returning a struct.
//
// Arguments:
// treeNode - The tree node to evaluate whether is a struct return.
//
// Return Value:
// Returns true if the 'treeNode" is a GT_RETURN node of type struct.
// Otherwise returns false.
//
bool CodeGen::isStructReturn(GenTree* treeNode)
{
// This method could be called for 'treeNode' of GT_RET_FILT or GT_RETURN.
// For the GT_RET_FILT, the return is always a bool or a void, for the end of a finally block.
noway_assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
if (treeNode->OperGet() != GT_RETURN)
{
return false;
}
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
assert(!varTypeIsStruct(treeNode));
return false;
#else
return varTypeIsStruct(treeNode) && (compiler->info.compRetNativeType == TYP_STRUCT);
#endif
}
//------------------------------------------------------------------------
// genStructReturn: Generates code for returning a struct.
//
// Arguments:
// treeNode - The GT_RETURN tree node.
//
// Return Value:
// None
//
// Assumption:
// op1 of GT_RETURN node is either GT_LCL_VAR or multi-reg GT_CALL
//
void CodeGen::genStructReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN);
GenTree* op1 = treeNode->gtGetOp1();
genConsumeRegs(op1);
GenTree* actualOp1 = op1;
if (op1->IsCopyOrReload())
{
actualOp1 = op1->gtGetOp1();
}
ReturnTypeDesc retTypeDesc;
LclVarDsc* varDsc = nullptr;
if (actualOp1->OperIs(GT_LCL_VAR))
{
varDsc = compiler->lvaGetDesc(actualOp1->AsLclVar());
retTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd(), compiler->info.compCallConv);
assert(varDsc->lvIsMultiRegRet);
}
else
{
assert(actualOp1->OperIs(GT_CALL));
retTypeDesc = *(actualOp1->AsCall()->GetReturnTypeDesc());
}
unsigned regCount = retTypeDesc.GetReturnRegCount();
assert(regCount <= MAX_RET_REG_COUNT);
#if FEATURE_MULTIREG_RET
// Right now the only enregisterable structs supported are SIMD vector types.
if (genIsRegCandidateLocal(actualOp1))
{
#if defined(DEBUG)
const GenTreeLclVar* lclVar = actualOp1->AsLclVar();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclVar);
assert(varTypeIsSIMD(varDsc->GetRegisterType()));
assert(!lclVar->IsMultiReg());
#endif // DEBUG
#ifdef FEATURE_SIMD
genSIMDSplitReturn(op1, &retTypeDesc);
#endif // FEATURE_SIMD
}
else if (actualOp1->OperIs(GT_LCL_VAR) && !actualOp1->AsLclVar()->IsMultiReg())
{
GenTreeLclVar* lclNode = actualOp1->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
assert(varDsc->lvIsMultiRegRet);
int offset = 0;
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc.GetReturnRegType(i);
regNumber toReg = retTypeDesc.GetABIReturnReg(i);
GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), toReg, lclNode->GetLclNum(), offset);
offset += genTypeSize(type);
}
}
else
{
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc.GetReturnRegType(i);
regNumber toReg = retTypeDesc.GetABIReturnReg(i);
regNumber fromReg = op1->GetRegByIndex(i);
if ((fromReg == REG_NA) && op1->OperIs(GT_COPY))
{
// A copy that doesn't copy this field will have REG_NA.
// TODO-Cleanup: It would probably be better to always have a valid reg
// on a GT_COPY, unless the operand is actually spilled. Then we wouldn't have
// to check for this case (though we'd have to check in the genRegCopy that the
// reg is valid).
fromReg = actualOp1->GetRegByIndex(i);
}
if (fromReg == REG_NA)
{
// This is a spilled field of a multi-reg lclVar.
// We currently only mark a lclVar operand as RegOptional, since we don't have a way
// to mark a multi-reg tree node as used from spill (GTF_NOREG_AT_USE) on a per-reg basis.
assert(varDsc != nullptr);
assert(varDsc->lvPromoted);
unsigned fieldVarNum = varDsc->lvFieldLclStart + i;
assert(compiler->lvaGetDesc(fieldVarNum)->lvOnFrame);
GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), toReg, fieldVarNum, 0);
}
else
{
// Note that ins_Copy(fromReg, type) will return the appropriate register to copy
// between register files if needed.
inst_Mov(type, toReg, fromReg, /* canSkip */ true);
}
}
}
#else // !FEATURE_MULTIREG_RET
unreached();
#endif
}
//----------------------------------------------------------------------------------
// genMultiRegStoreToLocal: store multi-reg value to a local
//
// Arguments:
// lclNode - Gentree of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
// Assumption:
// The child of store is a multi-reg node.
//
void CodeGen::genMultiRegStoreToLocal(GenTreeLclVar* lclNode)
{
assert(lclNode->OperIs(GT_STORE_LCL_VAR));
assert(varTypeIsStruct(lclNode) || varTypeIsMultiReg(lclNode));
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
assert(op1->IsMultiRegNode());
unsigned regCount =
actualOp1->IsMultiRegLclVar() ? actualOp1->AsLclVar()->GetFieldCount(compiler) : actualOp1->GetMultiRegCount();
assert(regCount > 1);
// Assumption: current implementation requires that a multi-reg
// var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
// being promoted, unless compiler->lvaEnregMultiRegVars is true.
unsigned lclNum = lclNode->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
if (op1->OperIs(GT_CALL))
{
assert(regCount <= MAX_RET_REG_COUNT);
noway_assert(varDsc->lvIsMultiRegRet);
}
#ifdef FEATURE_SIMD
// Check for the case of an enregistered SIMD type that's returned in multiple registers.
if (varDsc->lvIsRegCandidate() && lclNode->GetRegNum() != REG_NA)
{
assert(varTypeIsSIMD(lclNode));
genMultiRegStoreToSIMDLocal(lclNode);
return;
}
#endif // FEATURE_SIMD
// We have either a multi-reg local or a local with multiple fields in memory.
//
// The liveness model is as follows:
// use reg #0 from src, including any reload or copy
// define reg #0
// use reg #1 from src, including any reload or copy
// define reg #1
// etc.
// Imagine the following scenario:
// There are 3 registers used. Prior to this node, they occupy registers r3, r2 and r1.
// There are 3 registers defined by this node. They need to be placed in r1, r2 and r3,
// in that order.
//
// If we defined the as using all the source registers at once, we'd have to adopt one
// of the following models:
// - All (or all but one) of the incoming sources are marked "delayFree" so that they won't
// get the same register as any of the registers being defined. This would result in copies for
// the common case where the source and destination registers are the same (e.g. when a CALL
// result is assigned to a lclVar, which is then returned).
// - For our example (and for many/most cases) we would have to copy or spill all sources.
// - We allow circular dependencies between source and destination registers. This would require
// the code generator to determine the order in which the copies must be generated, and would
// require a temp register in case a swap is required. This complexity would have to be handled
// in both the normal code generation case, as well as for copies & reloads, as they are currently
// modeled by the register allocator to happen just prior to the use.
// - For our example, a temp would be required to swap r1 and r3, unless a swap instruction is
// available on the target.
//
// By having a multi-reg local use and define each field in order, we avoid these issues, and the
// register allocator will ensure that any conflicts are resolved via spill or inserted COPYs.
// For our example, the register allocator would simple spill r1 because the first def requires it.
// The code generator would move r3 to r1, leave r2 alone, and then load the spilled value into r3.
unsigned offset = 0;
bool isMultiRegVar = lclNode->IsMultiRegLclVar();
bool hasRegs = false;
if (isMultiRegVar)
{
assert(compiler->lvaEnregMultiRegVars);
assert(regCount == varDsc->lvFieldCnt);
}
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = genConsumeReg(op1, i);
var_types srcType = actualOp1->GetRegTypeByIndex(i);
// genConsumeReg will return the valid register, either from the COPY
// or from the original source.
assert(reg != REG_NA);
if (isMultiRegVar)
{
// Each field is passed in its own register, use the field types.
regNumber varReg = lclNode->GetRegByIndex(i);
unsigned fieldLclNum = varDsc->lvFieldLclStart + i;
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldLclNum);
var_types destType = fieldVarDsc->TypeGet();
if (varReg != REG_NA)
{
hasRegs = true;
// We may need a cross register-file copy here.
inst_Mov(destType, varReg, reg, /* canSkip */ true);
fieldVarDsc->SetRegNum(varReg);
}
else
{
varReg = REG_STK;
}
if ((varReg == REG_STK) || fieldVarDsc->IsAlwaysAliveInMemory())
{
if (!lclNode->AsLclVar()->IsLastUse(i))
{
// A byte field passed in a long register should be written on the stack as a byte.
instruction storeIns = ins_StoreFromSrc(reg, destType);
GetEmitter()->emitIns_S_R(storeIns, emitTypeSize(destType), reg, fieldLclNum, 0);
}
}
fieldVarDsc->SetRegNum(varReg);
}
else
{
// Several fields could be passed in one register, copy using the register type.
// It could rewrite memory outside of the fields but local on the stack are rounded to POINTER_SIZE so
// it is safe to store a long register into a byte field as it is known that we have enough padding after.
GetEmitter()->emitIns_S_R(ins_Store(srcType), emitTypeSize(srcType), reg, lclNum, offset);
offset += genTypeSize(srcType);
#ifdef DEBUG
#ifdef TARGET_64BIT
assert(offset <= varDsc->lvSize());
#else // !TARGET_64BIT
if (varTypeIsStruct(varDsc))
{
assert(offset <= varDsc->lvSize());
}
else
{
assert(varDsc->TypeGet() == TYP_LONG);
assert(offset <= genTypeSize(TYP_LONG));
}
#endif // !TARGET_64BIT
#endif // DEBUG
}
}
// Update variable liveness.
if (isMultiRegVar)
{
if (hasRegs)
{
genProduceReg(lclNode);
}
else
{
genUpdateLife(lclNode);
}
}
else
{
genUpdateLife(lclNode);
varDsc->SetRegNum(REG_STK);
}
}
//------------------------------------------------------------------------
// genRegCopy: Produce code for a GT_COPY node.
//
// Arguments:
// tree - the GT_COPY node
//
// Notes:
// This will copy the register produced by this node's source, to
// the register allocated to this GT_COPY node.
// It has some special handling for these cases:
// - when the source and target registers are in different register files
// (note that this is *not* a conversion).
// - when the source is a lclVar whose home location is being moved to a new
// register (rather than just being copied for temporary use).
//
void CodeGen::genRegCopy(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_COPY);
GenTree* op1 = treeNode->AsOp()->gtOp1;
if (op1->IsMultiRegNode())
{
// Register allocation assumes that any reload and copy are done in operand order.
// That is, we can have:
// (reg0, reg1) = COPY(V0,V1) where V0 is in reg1 and V1 is in memory
// The register allocation model assumes:
// First, V0 is moved to reg0 (v1 can't be in reg0 because it is still live, which would be a conflict).
// Then, V1 is moved to reg1
// However, if we call genConsumeRegs on op1, it will do the reload of V1 before we do the copy of V0.
// So we need to handle that case first.
//
// There should never be any circular dependencies, and we will check that here.
// GenTreeCopyOrReload only reports the highest index that has a valid register.
// However, we need to ensure that we consume all the registers of the child node,
// so we use its regCount.
unsigned regCount =
op1->IsMultiRegLclVar() ? op1->AsLclVar()->GetFieldCount(compiler) : op1->GetMultiRegCount();
assert(regCount <= MAX_MULTIREG_COUNT);
// First set the source registers as busy if they haven't been spilled.
// (Note that this is just for verification that we don't have circular dependencies.)
regMaskTP busyRegs = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
if ((op1->GetRegSpillFlagByIdx(i) & GTF_SPILLED) == 0)
{
busyRegs |= genRegMask(op1->GetRegByIndex(i));
}
}
for (unsigned i = 0; i < regCount; ++i)
{
regNumber sourceReg = op1->GetRegByIndex(i);
// genRegCopy will consume the source register, perform any required reloads,
// and will return either the register copied to, or the original register if there's no copy.
regNumber targetReg = genRegCopy(treeNode, i);
if (targetReg != sourceReg)
{
regMaskTP targetRegMask = genRegMask(targetReg);
assert((busyRegs & targetRegMask) == 0);
// Clear sourceReg from the busyRegs, and add targetReg.
busyRegs &= ~genRegMask(sourceReg);
}
busyRegs |= genRegMask(targetReg);
}
return;
}
regNumber srcReg = genConsumeReg(op1);
var_types targetType = treeNode->TypeGet();
regNumber targetReg = treeNode->GetRegNum();
assert(srcReg != REG_NA);
assert(targetReg != REG_NA);
assert(targetType != TYP_STRUCT);
inst_Mov(targetType, targetReg, srcReg, /* canSkip */ false);
if (op1->IsLocal())
{
// The lclVar will never be a def.
// If it is a last use, the lclVar will be killed by genConsumeReg(), as usual, and genProduceReg will
// appropriately set the gcInfo for the copied value.
// If not, there are two cases we need to handle:
// - If this is a TEMPORARY copy (indicated by the GTF_VAR_DEATH flag) the variable
// will remain live in its original register.
// genProduceReg() will appropriately set the gcInfo for the copied value,
// and genConsumeReg will reset it.
// - Otherwise, we need to update register info for the lclVar.
GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
assert((lcl->gtFlags & GTF_VAR_DEF) == 0);
if ((lcl->gtFlags & GTF_VAR_DEATH) == 0 && (treeNode->gtFlags & GTF_VAR_DEATH) == 0)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
// If we didn't just spill it (in genConsumeReg, above), then update the register info
if (varDsc->GetRegNum() != REG_STK)
{
// The old location is dying
genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
gcInfo.gcMarkRegSetNpt(genRegMask(op1->GetRegNum()));
genUpdateVarReg(varDsc, treeNode);
#ifdef USING_VARIABLE_LIVE_RANGE
// Report the home change for this variable
varLiveKeeper->siUpdateVariableLiveRange(varDsc, lcl->GetLclNum());
#endif // USING_VARIABLE_LIVE_RANGE
// The new location is going live
genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
}
}
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genRegCopy: Produce code for a single register of a multireg copy node.
//
// Arguments:
// tree - The GT_COPY node
// multiRegIndex - The index of the register to be copied
//
// Notes:
// This will copy the corresponding register produced by this node's source, to
// the register allocated to the register specified by this GT_COPY node.
// A multireg copy doesn't support moving between register files, as the GT_COPY
// node does not retain separate types for each index.
// - when the source is a lclVar whose home location is being moved to a new
// register (rather than just being copied for temporary use).
//
// Return Value:
// Either the register copied to, or the original register if there's no copy.
//
regNumber CodeGen::genRegCopy(GenTree* treeNode, unsigned multiRegIndex)
{
assert(treeNode->OperGet() == GT_COPY);
GenTree* op1 = treeNode->gtGetOp1();
assert(op1->IsMultiRegNode());
GenTreeCopyOrReload* copyNode = treeNode->AsCopyOrReload();
assert(copyNode->GetRegCount() <= MAX_MULTIREG_COUNT);
// Consume op1's register, which will perform any necessary reloads.
genConsumeReg(op1, multiRegIndex);
regNumber sourceReg = op1->GetRegByIndex(multiRegIndex);
regNumber targetReg = copyNode->GetRegNumByIdx(multiRegIndex);
// GenTreeCopyOrReload only reports the highest index that has a valid register.
// However there may be lower indices that have no valid register (i.e. the register
// on the source is still valid at the consumer).
if (targetReg != REG_NA)
{
// We shouldn't specify a no-op move.
assert(sourceReg != targetReg);
var_types type;
if (op1->IsMultiRegLclVar())
{
LclVarDsc* parentVarDsc = compiler->lvaGetDesc(op1->AsLclVar());
unsigned fieldVarNum = parentVarDsc->lvFieldLclStart + multiRegIndex;
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum);
type = fieldVarDsc->TypeGet();
inst_Mov(type, targetReg, sourceReg, /* canSkip */ false);
if (!op1->AsLclVar()->IsLastUse(multiRegIndex) && fieldVarDsc->GetRegNum() != REG_STK)
{
// The old location is dying
genUpdateRegLife(fieldVarDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
gcInfo.gcMarkRegSetNpt(genRegMask(sourceReg));
genUpdateVarReg(fieldVarDsc, treeNode);
#ifdef USING_VARIABLE_LIVE_RANGE
// Report the home change for this variable
varLiveKeeper->siUpdateVariableLiveRange(fieldVarDsc, fieldVarNum);
#endif // USING_VARIABLE_LIVE_RANGE
// The new location is going live
genUpdateRegLife(fieldVarDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
}
}
else
{
type = op1->GetRegTypeByIndex(multiRegIndex);
inst_Mov(type, targetReg, sourceReg, /* canSkip */ false);
// We never spill after a copy, so to produce the single register, we simply need to
// update the GC info for the defined register.
gcInfo.gcMarkRegPtrVal(targetReg, type);
}
return targetReg;
}
else
{
return sourceReg;
}
}
#if defined(DEBUG) && defined(TARGET_XARCH)
//------------------------------------------------------------------------
// genStackPointerCheck: Generate code to check the stack pointer against a saved value.
// This is a debug check.
//
// Arguments:
// doStackPointerCheck - If true, do the stack pointer check, otherwise do nothing.
// lvaStackPointerVar - The local variable number that holds the value of the stack pointer
// we are comparing against.
//
// Return Value:
// None
//
void CodeGen::genStackPointerCheck(bool doStackPointerCheck, unsigned lvaStackPointerVar)
{
if (doStackPointerCheck)
{
noway_assert(lvaStackPointerVar != 0xCCCCCCCC && compiler->lvaGetDesc(lvaStackPointerVar)->lvDoNotEnregister &&
compiler->lvaGetDesc(lvaStackPointerVar)->lvOnFrame);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, lvaStackPointerVar, 0);
BasicBlock* sp_check = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_je, sp_check);
instGen(INS_BREAKPOINT);
genDefineTempLabel(sp_check);
}
}
#endif // defined(DEBUG) && defined(TARGET_XARCH)
unsigned CodeGenInterface::getCurrentStackLevel() const
{
return genStackLevel;
}
#ifdef USING_VARIABLE_LIVE_RANGE
#ifdef DEBUG
//------------------------------------------------------------------------
// VariableLiveRanges dumpers
//------------------------------------------------------------------------
// Dump "VariableLiveRange" when code has not been generated and we don't have so the assembly native offset
// but at least "emitLocation"s and "siVarLoc"
void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRange(
const CodeGenInterface* codeGen) const
{
codeGen->dumpSiVarLoc(&m_VarLocation);
printf(" [");
m_StartEmitLocation.Print(codeGen->GetCompiler()->compMethodID);
printf(", ");
if (m_EndEmitLocation.Valid())
{
m_EndEmitLocation.Print(codeGen->GetCompiler()->compMethodID);
}
else
{
printf("...");
}
printf("]");
}
// Dump "VariableLiveRange" when code has been generated and we have the assembly native offset of each "emitLocation"
void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRange(
emitter* emit, const CodeGenInterface* codeGen) const
{
assert(emit != nullptr);
// "VariableLiveRanges" are created setting its location ("m_VarLocation") and the initial native offset
// ("m_StartEmitLocation")
codeGen->dumpSiVarLoc(&m_VarLocation);
// If this is an open "VariableLiveRange", "m_EndEmitLocation" is non-valid and print -1
UNATIVE_OFFSET endAssemblyOffset = m_EndEmitLocation.Valid() ? m_EndEmitLocation.CodeOffset(emit) : -1;
printf(" [%X, %X)", m_StartEmitLocation.CodeOffset(emit), m_EndEmitLocation.CodeOffset(emit));
}
//------------------------------------------------------------------------
// LiveRangeDumper
//------------------------------------------------------------------------
//------------------------------------------------------------------------
// resetDumper: If the the "liveRange" has its last "VariableLiveRange" closed, it makes
// the "LiveRangeDumper" points to end of "liveRange" (nullptr). In other case,
// it makes the "LiveRangeDumper" points to the last "VariableLiveRange" of
// "liveRange", which is opened.
//
// Arguments:
// liveRanges - the "LiveRangeList" of the "VariableLiveDescriptor" we want to
// udpate its "LiveRangeDumper".
//
// Notes:
// This method is expected to be called once a the code for a BasicBlock has been
// generated and all the new "VariableLiveRange"s of the variable during this block
// has been dumped.
void CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::resetDumper(const LiveRangeList* liveRanges)
{
// There must have reported something in order to reset
assert(m_hasLiveRangestoDump);
if (liveRanges->back().m_EndEmitLocation.Valid())
{
// the last "VariableLiveRange" is closed and the variable
// is no longer alive
m_hasLiveRangestoDump = false;
}
else
{
// the last "VariableLiveRange" remains opened because it is
// live at "BasicBlock"s "bbLiveOut".
m_StartingLiveRange = liveRanges->backPosition();
}
}
//------------------------------------------------------------------------
// setDumperStartAt: Make "LiveRangeDumper" instance points the last "VariableLiveRange"
// added so we can starts dumping from there after the actual "BasicBlock"s code is generated.
//
// Arguments:
// liveRangeIt - an iterator to a position in "VariableLiveDescriptor::m_VariableLiveRanges"
//
// Return Value:
// A const pointer to the "LiveRangeList" containing all the "VariableLiveRange"s
// of the variable with index "varNum".
//
// Notes:
// "varNum" should be always a valid inde ("varnum" < "m_LiveDscCount")
void CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::setDumperStartAt(const LiveRangeListIterator liveRangeIt)
{
m_hasLiveRangestoDump = true;
m_StartingLiveRange = liveRangeIt;
}
//------------------------------------------------------------------------
// getStartForDump: Return an iterator to the first "VariableLiveRange" edited/added
// during the current "BasicBlock"
//
// Return Value:
// A LiveRangeListIterator to the first "VariableLiveRange" in "LiveRangeList" which
// was used during last "BasicBlock".
//
CodeGenInterface::VariableLiveKeeper::LiveRangeListIterator CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::
getStartForDump() const
{
return m_StartingLiveRange;
}
//------------------------------------------------------------------------
// hasLiveRangesToDump: Retutn wheter at least a "VariableLiveRange" was alive during
// the current "BasicBlock"'s code generation
//
// Return Value:
// A boolean indicating indicating if there is at least a "VariableLiveRange"
// that has been used for the variable during last "BasicBlock".
//
bool CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::hasLiveRangesToDump() const
{
return m_hasLiveRangestoDump;
}
#endif // DEBUG
//------------------------------------------------------------------------
// VariableLiveDescriptor
//------------------------------------------------------------------------
CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::VariableLiveDescriptor(CompAllocator allocator)
{
// Initialize an empty list
m_VariableLiveRanges = new (allocator) LiveRangeList(allocator);
INDEBUG(m_VariableLifeBarrier = new (allocator) LiveRangeDumper(m_VariableLiveRanges));
}
//------------------------------------------------------------------------
// hasVariableLiveRangeOpen: Return true if the variable is still alive,
// false in other case.
//
bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVariableLiveRangeOpen() const
{
return !m_VariableLiveRanges->empty() && !m_VariableLiveRanges->back().m_EndEmitLocation.Valid();
}
//------------------------------------------------------------------------
// getLiveRanges: Return the list of variable locations for this variable.
//
// Return Value:
// A const LiveRangeList* pointing to the first variable location if it has
// any or the end of the list in other case.
//
CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::
getLiveRanges() const
{
return m_VariableLiveRanges;
}
//------------------------------------------------------------------------
// startLiveRangeFromEmitter: Report this variable as being born in "varLocation"
// since the instruction where "emit" is located.
//
// Arguments:
// varLocation - the home of the variable.
// emit - an emitter* instance located at the first instruction from
// where "varLocation" becomes valid.
//
// Assumptions:
// This variable is being born so it should be dead.
//
// Notes:
// The position of "emit" matters to ensure intervals inclusive of the
// beginning and exclusive of the end.
//
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::startLiveRangeFromEmitter(
CodeGenInterface::siVarLoc varLocation, emitter* emit) const
{
noway_assert(emit != nullptr);
// Is the first "VariableLiveRange" or the previous one has been closed so its "m_EndEmitLocation" is valid
noway_assert(m_VariableLiveRanges->empty() || m_VariableLiveRanges->back().m_EndEmitLocation.Valid());
if (!m_VariableLiveRanges->empty() &&
siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) &&
m_VariableLiveRanges->back().m_EndEmitLocation.IsPreviousInsNum(emit))
{
JITDUMP("Extending debug range...\n");
// The variable is being born just after the instruction at which it died.
// In this case, i.e. an update of the variable's value, we coalesce the live ranges.
m_VariableLiveRanges->back().m_EndEmitLocation.Init();
}
else
{
JITDUMP("New debug range: %s\n",
m_VariableLiveRanges->empty()
? "first"
: siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation))
? "new var or location"
: "not adjacent");
// Creates new live range with invalid end
m_VariableLiveRanges->emplace_back(varLocation, emitLocation(), emitLocation());
m_VariableLiveRanges->back().m_StartEmitLocation.CaptureLocation(emit);
}
#ifdef DEBUG
if (!m_VariableLifeBarrier->hasLiveRangesToDump())
{
m_VariableLifeBarrier->setDumperStartAt(m_VariableLiveRanges->backPosition());
}
#endif // DEBUG
// startEmitLocationendEmitLocation has to be Valid and endEmitLocationendEmitLocation not
noway_assert(m_VariableLiveRanges->back().m_StartEmitLocation.Valid());
noway_assert(!m_VariableLiveRanges->back().m_EndEmitLocation.Valid());
}
//------------------------------------------------------------------------
// endLiveRangeAtEmitter: Report this variable as becoming dead since the
// instruction where "emit" is located.
//
// Arguments:
// emit - an emitter* instance located at the first instruction from
// this variable becomes dead.
//
// Assumptions:
// This variable is becoming dead so it should be alive.
//
// Notes:
// The position of "emit" matters to ensure intervals inclusive of the
// beginning and exclusive of the end.
//
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::endLiveRangeAtEmitter(emitter* emit) const
{
noway_assert(emit != nullptr);
noway_assert(hasVariableLiveRangeOpen());
// Using [close, open) ranges so as to not compute the size of the last instruction
m_VariableLiveRanges->back().m_EndEmitLocation.CaptureLocation(emit);
// No m_EndEmitLocation has to be Valid
noway_assert(m_VariableLiveRanges->back().m_EndEmitLocation.Valid());
}
//------------------------------------------------------------------------
// UpdateLiveRangeAtEmitter: Report this variable as changing its variable
// home to "varLocation" since the instruction where "emit" is located.
//
// Arguments:
// varLocation - the new variable location.
// emit - an emitter* instance located at the first instruction from
// where "varLocation" becomes valid.
//
// Assumptions:
// This variable is being born so it should be dead.
//
// Notes:
// The position of "emit" matters to ensure intervals inclusive of the
// beginning and exclusive of the end.
//
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::updateLiveRangeAtEmitter(
CodeGenInterface::siVarLoc varLocation, emitter* emit) const
{
// This variable is changing home so it has been started before during this block
noway_assert(m_VariableLiveRanges != nullptr && !m_VariableLiveRanges->empty());
// And its last m_EndEmitLocation has to be invalid
noway_assert(!m_VariableLiveRanges->back().m_EndEmitLocation.Valid());
// If we are reporting again the same home, that means we are doing something twice?
// noway_assert(! CodeGenInterface::siVarLoc::Equals(&m_VariableLiveRanges->back().m_VarLocation, varLocation));
// Close previous live range
endLiveRangeAtEmitter(emit);
startLiveRangeFromEmitter(varLocation, emit);
}
#ifdef DEBUG
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpAllRegisterLiveRangesForBlock(
emitter* emit, const CodeGenInterface* codeGen) const
{
bool first = true;
for (LiveRangeListIterator it = m_VariableLiveRanges->begin(); it != m_VariableLiveRanges->end(); it++)
{
if (!first)
{
printf("; ");
}
it->dumpVariableLiveRange(emit, codeGen);
first = false;
}
}
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpRegisterLiveRangesForBlockBeforeCodeGenerated(
const CodeGenInterface* codeGen) const
{
bool first = true;
for (LiveRangeListIterator it = m_VariableLifeBarrier->getStartForDump(); it != m_VariableLiveRanges->end(); it++)
{
if (!first)
{
printf("; ");
}
it->dumpVariableLiveRange(codeGen);
first = false;
}
}
// Returns true if a live range for this variable has been recorded
bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVarLiveRangesToDump() const
{
return !m_VariableLiveRanges->empty();
}
// Returns true if a live range for this variable has been recorded from last call to EndBlock
bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVarLiveRangesFromLastBlockToDump() const
{
return m_VariableLifeBarrier->hasLiveRangesToDump();
}
// Reset the barrier so as to dump only next block changes on next block
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::endBlockLiveRanges()
{
// make "m_VariableLifeBarrier->m_StartingLiveRange" now points to nullptr for printing purposes
m_VariableLifeBarrier->resetDumper(m_VariableLiveRanges);
}
#endif // DEBUG
//------------------------------------------------------------------------
// VariableLiveKeeper
//------------------------------------------------------------------------
// Initialize structures for VariableLiveRanges
void CodeGenInterface::initializeVariableLiveKeeper()
{
CompAllocator allocator = compiler->getAllocator(CMK_VariableLiveRanges);
int amountTrackedVariables = compiler->opts.compDbgInfo ? compiler->info.compLocalsCount : 0;
int amountTrackedArgs = compiler->opts.compDbgInfo ? compiler->info.compArgsCount : 0;
varLiveKeeper = new (allocator) VariableLiveKeeper(amountTrackedVariables, amountTrackedArgs, compiler, allocator);
}
CodeGenInterface::VariableLiveKeeper* CodeGenInterface::getVariableLiveKeeper() const
{
return varLiveKeeper;
};
//------------------------------------------------------------------------
// VariableLiveKeeper: Create an instance of the object in charge of managing
// VariableLiveRanges and intialize the array "m_vlrLiveDsc".
//
// Arguments:
// totalLocalCount - the count of args, special args and IL Local
// variables in the method.
// argsCount - the count of args and special args in the method.
// compiler - a compiler instance
//
CodeGenInterface::VariableLiveKeeper::VariableLiveKeeper(unsigned int totalLocalCount,
unsigned int argsCount,
Compiler* comp,
CompAllocator allocator)
: m_LiveDscCount(totalLocalCount)
, m_LiveArgsCount(argsCount)
, m_Compiler(comp)
, m_LastBasicBlockHasBeenEmited(false)
{
if (m_LiveDscCount > 0)
{
// Allocate memory for "m_vlrLiveDsc" and initialize each "VariableLiveDescriptor"
m_vlrLiveDsc = allocator.allocate<VariableLiveDescriptor>(m_LiveDscCount);
m_vlrLiveDscForProlog = allocator.allocate<VariableLiveDescriptor>(m_LiveDscCount);
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
new (m_vlrLiveDsc + varNum, jitstd::placement_t()) VariableLiveDescriptor(allocator);
new (m_vlrLiveDscForProlog + varNum, jitstd::placement_t()) VariableLiveDescriptor(allocator);
}
}
}
//------------------------------------------------------------------------
// siStartOrCloseVariableLiveRange: Reports the given variable as beign born
// or becoming dead.
//
// Arguments:
// varDsc - the variable for which a location changed will be reported
// varNum - the index of the variable in the "compiler->lvaTable"
// isBorn - whether the variable is being born from where the emitter is located.
// isDying - whether the variable is dying from where the emitter is located.
//
// Assumptions:
// The emitter should be located on the first instruction from where is true that
// the variable becoming valid (when isBorn is true) or invalid (when isDying is true).
//
// Notes:
// This method is being called from treeLifeUpdater when the variable is being born,
// becoming dead, or both.
//
void CodeGenInterface::VariableLiveKeeper::siStartOrCloseVariableLiveRange(const LclVarDsc* varDsc,
unsigned int varNum,
bool isBorn,
bool isDying)
{
noway_assert(varDsc != nullptr);
// Only the variables that exists in the IL, "this", and special arguments
// are reported.
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount)
{
if (isBorn && !isDying)
{
// "varDsc" is valid from this point
siStartVariableLiveRange(varDsc, varNum);
}
if (isDying && !isBorn)
{
// this variable live range is no longer valid from this point
siEndVariableLiveRange(varNum);
}
}
}
//------------------------------------------------------------------------
// siStartOrCloseVariableLiveRanges: Iterates the given set of variables
// calling "siStartOrCloseVariableLiveRange" with each one.
//
// Arguments:
// varsIndexSet - the set of variables to report start/end "VariableLiveRange"
// isBorn - whether the set is being born from where the emitter is located.
// isDying - whether the set is dying from where the emitter is located.
//
// Assumptions:
// The emitter should be located on the first instruction from where is true that
// the variable becoming valid (when isBorn is true) or invalid (when isDying is true).
//
// Notes:
// This method is being called from treeLifeUpdater when a set of variables
// is being born, becoming dead, or both.
//
void CodeGenInterface::VariableLiveKeeper::siStartOrCloseVariableLiveRanges(VARSET_VALARG_TP varsIndexSet,
bool isBorn,
bool isDying)
{
if (m_Compiler->opts.compDbgInfo)
{
VarSetOps::Iter iter(m_Compiler, varsIndexSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned int varNum = m_Compiler->lvaTrackedIndexToLclNum(varIndex);
const LclVarDsc* varDsc = m_Compiler->lvaGetDesc(varNum);
siStartOrCloseVariableLiveRange(varDsc, varNum, isBorn, isDying);
}
}
}
//------------------------------------------------------------------------
// siStartVariableLiveRange: Reports the given variable as being born.
//
// Arguments:
// varDsc - the variable for which a location changed will be reported
// varNum - the index of the variable to report home in lvLiveDsc
//
// Assumptions:
// The emitter should be pointing to the first instruction from where the VariableLiveRange is
// becoming valid.
// The given "varDsc" should have its VariableRangeLists initialized.
//
// Notes:
// This method should be called on every place a Variable is becoming alive.
void CodeGenInterface::VariableLiveKeeper::siStartVariableLiveRange(const LclVarDsc* varDsc, unsigned int varNum)
{
noway_assert(varDsc != nullptr);
// Only the variables that exists in the IL, "this", and special arguments are reported, as long as they were
// allocated.
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && (varDsc->lvIsInReg() || varDsc->lvOnFrame))
{
// Build siVarLoc for this born "varDsc"
CodeGenInterface::siVarLoc varLocation =
m_Compiler->codeGen->getSiVarLoc(varDsc, m_Compiler->codeGen->getCurrentStackLevel());
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum];
// this variable live range is valid from this point
varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter());
}
}
//------------------------------------------------------------------------
// siEndVariableLiveRange: Reports the variable as becoming dead.
//
// Arguments:
// varNum - the index of the variable at m_vlrLiveDsc or lvaTable in that
// is becoming dead.
//
// Assumptions:
// The given variable should be alive.
// The emitter should be pointing to the first instruction from where the VariableLiveRange is
// becoming invalid.
//
// Notes:
// This method should be called on every place a Variable is becoming dead.
void CodeGenInterface::VariableLiveKeeper::siEndVariableLiveRange(unsigned int varNum)
{
// Only the variables that exists in the IL, "this", and special arguments
// will be reported.
// This method is being called from genUpdateLife, and that one is called after
// code for BasicBlock have been generated, but the emitter has no longer
// a valid IG so we don't report the close of a "VariableLiveRange" after code is
// emitted.
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && !m_LastBasicBlockHasBeenEmited &&
m_vlrLiveDsc[varNum].hasVariableLiveRangeOpen())
{
// this variable live range is no longer valid from this point
m_vlrLiveDsc[varNum].endLiveRangeAtEmitter(m_Compiler->GetEmitter());
}
}
//------------------------------------------------------------------------
// siUpdateVariableLiveRange: Reports the change of variable location for the
// given variable.
//
// Arguments:
// varDsc - the variable for which tis home has changed.
// varNum - the index of the variable to report home in lvLiveDsc
//
// Assumptions:
// The given variable should be alive.
// The emitter should be pointing to the first instruction from where
// the new variable location is becoming valid.
//
void CodeGenInterface::VariableLiveKeeper::siUpdateVariableLiveRange(const LclVarDsc* varDsc, unsigned int varNum)
{
noway_assert(varDsc != nullptr);
// Only the variables that exists in the IL, "this", and special arguments
// will be reported. This are locals and arguments, and are counted in
// "info.compLocalsCount".
// This method is being called when the prolog is being generated, and
// the emitter has no longer a valid IG so we don't report the close of
// a "VariableLiveRange" after code is emitted.
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && !m_LastBasicBlockHasBeenEmited)
{
// Build the location of the variable
CodeGenInterface::siVarLoc siVarLoc =
m_Compiler->codeGen->getSiVarLoc(varDsc, m_Compiler->codeGen->getCurrentStackLevel());
// Report the home change for this variable
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum];
varLiveDsc->updateLiveRangeAtEmitter(siVarLoc, m_Compiler->GetEmitter());
}
}
//------------------------------------------------------------------------
// siEndAllVariableLiveRange: Reports the set of variables as becoming dead.
//
// Arguments:
// newLife - the set of variables that are becoming dead.
//
// Assumptions:
// All the variables in the set are alive.
//
// Notes:
// This method is called when the last block being generated to killed all
// the live variables and set a flag to avoid reporting variable locations for
// on next calls to method that update variable liveness.
void CodeGenInterface::VariableLiveKeeper::siEndAllVariableLiveRange(VARSET_VALARG_TP varsToClose)
{
if (m_Compiler->opts.compDbgInfo)
{
if (m_Compiler->lvaTrackedCount > 0 || !m_Compiler->opts.OptimizationDisabled())
{
VarSetOps::Iter iter(m_Compiler, varsToClose);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned int varNum = m_Compiler->lvaTrackedIndexToLclNum(varIndex);
siEndVariableLiveRange(varNum);
}
}
else
{
// It seems we are jitting debug code, so we don't have variable
// liveness info
siEndAllVariableLiveRange();
}
}
m_LastBasicBlockHasBeenEmited = true;
}
//------------------------------------------------------------------------
// siEndAllVariableLiveRange: Reports all live variables as dead.
//
// Notes:
// This overload exists for the case we are jitting code compiled in
// debug mode. When that happen we don't have variable liveness info
// as "BaiscBlock::bbLiveIn" or "BaiscBlock::bbLiveOut" and there is no
// tracked variable.
//
void CodeGenInterface::VariableLiveKeeper::siEndAllVariableLiveRange()
{
// TODO: we can improve this keeping a set for the variables with
// open VariableLiveRanges
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
const VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
if (varLiveDsc->hasVariableLiveRangeOpen())
{
siEndVariableLiveRange(varNum);
}
}
}
//------------------------------------------------------------------------
// getLiveRangesForVarForBody: Return the "VariableLiveRange" that correspond to
// the given "varNum".
//
// Arguments:
// varNum - the index of the variable in m_vlrLiveDsc, which is the same as
// in lvaTable.
//
// Return Value:
// A const pointer to the list of variable locations reported for the variable.
//
// Assumptions:
// This variable should be an argument, a special argument or an IL local
// variable.
CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::getLiveRangesForVarForBody(
unsigned int varNum) const
{
// There should be at least one variable for which its liveness is tracked
noway_assert(varNum < m_LiveDscCount);
return m_vlrLiveDsc[varNum].getLiveRanges();
}
//------------------------------------------------------------------------
// getLiveRangesForVarForProlog: Return the "VariableLiveRange" that correspond to
// the given "varNum".
//
// Arguments:
// varNum - the index of the variable in m_vlrLiveDsc, which is the same as
// in lvaTable.
//
// Return Value:
// A const pointer to the list of variable locations reported for the variable.
//
// Assumptions:
// This variable should be an argument, a special argument or an IL local
// variable.
CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::getLiveRangesForVarForProlog(
unsigned int varNum) const
{
// There should be at least one variable for which its liveness is tracked
noway_assert(varNum < m_LiveDscCount);
return m_vlrLiveDscForProlog[varNum].getLiveRanges();
}
//------------------------------------------------------------------------
// getLiveRangesCount: Returns the count of variable locations reported for the tracked
// variables, which are arguments, special arguments, and local IL variables.
//
// Return Value:
// size_t - the count of variable locations
//
// Notes:
// This method is being called from "genSetScopeInfo" to know the count of
// "varResultInfo" that should be created on eeSetLVcount.
//
size_t CodeGenInterface::VariableLiveKeeper::getLiveRangesCount() const
{
size_t liveRangesCount = 0;
if (m_Compiler->opts.compDbgInfo)
{
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
for (int i = 0; i < 2; i++)
{
VariableLiveDescriptor* varLiveDsc = (i == 0 ? m_vlrLiveDscForProlog : m_vlrLiveDsc) + varNum;
if (m_Compiler->compMap2ILvarNum(varNum) != (unsigned int)ICorDebugInfo::UNKNOWN_ILNUM)
{
liveRangesCount += varLiveDsc->getLiveRanges()->size();
}
}
}
}
return liveRangesCount;
}
//------------------------------------------------------------------------
// psiStartVariableLiveRange: Reports the given variable as being born.
//
// Arguments:
// varLcation - the variable location
// varNum - the index of the variable in "compiler->lvaTable" or
// "VariableLivekeeper->m_vlrLiveDsc"
//
// Notes:
// This function is expected to be called from "psiBegProlog" during
// prolog code generation.
//
void CodeGenInterface::VariableLiveKeeper::psiStartVariableLiveRange(CodeGenInterface::siVarLoc varLocation,
unsigned int varNum)
{
// This descriptor has to correspond to a parameter. The first slots in lvaTable
// are arguments and special arguments.
noway_assert(varNum < m_LiveArgsCount);
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDscForProlog[varNum];
varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter());
}
//------------------------------------------------------------------------
// psiClosePrologVariableRanges: Report all the parameters as becoming dead.
//
// Notes:
// This function is expected to be called from preffix "psiEndProlog" after
// code for prolog has been generated.
//
void CodeGenInterface::VariableLiveKeeper::psiClosePrologVariableRanges()
{
noway_assert(m_LiveArgsCount <= m_LiveDscCount);
for (unsigned int varNum = 0; varNum < m_LiveArgsCount; varNum++)
{
VariableLiveDescriptor* varLiveDsc = m_vlrLiveDscForProlog + varNum;
if (varLiveDsc->hasVariableLiveRangeOpen())
{
varLiveDsc->endLiveRangeAtEmitter(m_Compiler->GetEmitter());
}
}
}
#ifdef DEBUG
void CodeGenInterface::VariableLiveKeeper::dumpBlockVariableLiveRanges(const BasicBlock* block)
{
assert(block != nullptr);
bool hasDumpedHistory = false;
printf("\nVariable Live Range History Dump for " FMT_BB "\n", block->bbNum);
if (m_Compiler->opts.compDbgInfo)
{
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
if (varLiveDsc->hasVarLiveRangesFromLastBlockToDump())
{
hasDumpedHistory = true;
m_Compiler->gtDispLclVar(varNum, false);
printf(": ");
varLiveDsc->dumpRegisterLiveRangesForBlockBeforeCodeGenerated(m_Compiler->codeGen);
varLiveDsc->endBlockLiveRanges();
printf("\n");
}
}
}
if (!hasDumpedHistory)
{
printf("..None..\n");
}
}
void CodeGenInterface::VariableLiveKeeper::dumpLvaVariableLiveRanges() const
{
bool hasDumpedHistory = false;
printf("VARIABLE LIVE RANGES:\n");
if (m_Compiler->opts.compDbgInfo)
{
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
if (varLiveDsc->hasVarLiveRangesToDump())
{
hasDumpedHistory = true;
m_Compiler->gtDispLclVar(varNum, false);
printf(": ");
varLiveDsc->dumpAllRegisterLiveRangesForBlock(m_Compiler->GetEmitter(), m_Compiler->codeGen);
printf("\n");
}
}
}
if (!hasDumpedHistory)
{
printf("..None..\n");
}
}
#endif // DEBUG
#endif // USING_VARIABLE_LIVE_RANGE
//-----------------------------------------------------------------------------
// genPoisonFrame: Generate code that places a recognizable value into address exposed variables.
//
// Remarks:
// This function emits code to poison address exposed non-zero-inited local variables. We expect this function
// to be called when emitting code for the scratch BB that comes right after the prolog.
// The variables are poisoned using 0xcdcdcdcd.
void CodeGen::genPoisonFrame(regMaskTP regLiveIn)
{
assert(compiler->compShouldPoisonFrame());
#if defined(TARGET_XARCH)
regNumber poisonValReg = REG_EAX;
assert((regLiveIn & (RBM_EDI | RBM_ECX | RBM_EAX)) == 0);
#else
regNumber poisonValReg = REG_SCRATCH;
assert((regLiveIn & (genRegMask(REG_SCRATCH) | RBM_ARG_0 | RBM_ARG_1 | RBM_ARG_2)) == 0);
#endif
#ifdef TARGET_64BIT
const ssize_t poisonVal = (ssize_t)0xcdcdcdcdcdcdcdcd;
#else
const ssize_t poisonVal = (ssize_t)0xcdcdcdcd;
#endif
// The first time we need to poison something we will initialize a register to the largest immediate cccccccc that
// we can fit.
bool hasPoisonImm = false;
for (unsigned varNum = 0; varNum < compiler->info.compLocalsCount; varNum++)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvIsParam || varDsc->lvMustInit || !varDsc->IsAddressExposed())
{
continue;
}
assert(varDsc->lvOnFrame);
unsigned int size = compiler->lvaLclSize(varNum);
if ((size / TARGET_POINTER_SIZE) > 16)
{
// This will require more than 16 instructions, switch to rep stosd/memset call.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH)
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_EDI, (int)varNum, 0);
assert(size % 4 == 0);
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_ECX, size / 4);
// On xarch we can leave the value in eax and only set eax once
// since rep stosd does not kill eax.
if (!hasPoisonImm)
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_EAX, poisonVal);
hasPoisonImm = true;
}
instGen(INS_r_stosd);
#else
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_0, (int)varNum, 0);
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_ARG_1, static_cast<char>(poisonVal));
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_ARG_2, size);
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
// May kill REG_SCRATCH, so we need to reload it.
hasPoisonImm = false;
#endif
}
else
{
if (!hasPoisonImm)
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, poisonValReg, poisonVal);
hasPoisonImm = true;
}
// For 64-bit we check if the local is 8-byte aligned. For 32-bit, we assume everything is always 4-byte aligned.
#ifdef TARGET_64BIT
bool fpBased;
int addr = compiler->lvaFrameAddress((int)varNum, &fpBased);
#else
int addr = 0;
#endif
int end = addr + (int)size;
for (int offs = addr; offs < end;)
{
#ifdef TARGET_64BIT
if ((offs % 8) == 0 && end - offs >= 8)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_LONG), EA_8BYTE, REG_SCRATCH, (int)varNum, offs - addr);
offs += 8;
continue;
}
#endif
assert((offs % 4) == 0 && end - offs >= 4);
GetEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, REG_SCRATCH, (int)varNum, offs - addr);
offs += 4;
}
}
}
}
//----------------------------------------------------------------------
// genBitCast - Generate the instruction to move a value between register files
//
// Arguments
// targetType - the destination type
// targetReg - the destination register
// srcType - the source type
// srcReg - the source register
//
void CodeGen::genBitCast(var_types targetType, regNumber targetReg, var_types srcType, regNumber srcReg)
{
const bool srcFltReg = varTypeUsesFloatReg(srcType) || varTypeIsSIMD(srcType);
assert(srcFltReg == genIsValidFloatReg(srcReg));
const bool dstFltReg = varTypeUsesFloatReg(targetType) || varTypeIsSIMD(targetType);
assert(dstFltReg == genIsValidFloatReg(targetReg));
inst_Mov(targetType, targetReg, srcReg, /* canSkip */ true);
}
//----------------------------------------------------------------------
// genCodeForBitCast - Generate code for a GT_BITCAST that is not contained
//
// Arguments
// treeNode - the GT_BITCAST for which we're generating code
//
void CodeGen::genCodeForBitCast(GenTreeOp* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
GenTree* op1 = treeNode->gtGetOp1();
genConsumeRegs(op1);
if (op1->isContained())
{
assert(op1->IsLocal() || op1->isIndir());
if (genIsRegCandidateLocal(op1))
{
unsigned lclNum = op1->AsLclVar()->GetLclNum();
GetEmitter()->emitIns_R_S(ins_Load(treeNode->TypeGet(), compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(treeNode), targetReg, lclNum, 0);
}
else
{
op1->gtType = treeNode->TypeGet();
op1->SetRegNum(targetReg);
op1->ClearContained();
JITDUMP("Changing type of BITCAST source to load directly.\n");
genCodeForTreeNode(op1);
}
}
else
{
#ifdef TARGET_ARM
if (compiler->opts.compUseSoftFP && (targetType == TYP_LONG))
{
// This is a special arm-softFP case when a TYP_LONG node was introduced during lowering
// for a call argument, so it was not handled by decomposelongs phase as all other TYP_LONG nodes.
// Example foo(double LclVar V01), LclVar V01 has to be passed in general registers r0, r1,
// so lowering will add `BITCAST long(LclVar double V01)` and codegen has to support it here.
const regNumber srcReg = op1->GetRegNum();
const regNumber otherReg = treeNode->AsMultiRegOp()->gtOtherReg;
assert(otherReg != REG_NA);
inst_RV_RV_RV(INS_vmov_d2i, targetReg, otherReg, srcReg, EA_8BYTE);
}
else
#endif // TARGET_ARM
{
genBitCast(targetType, targetReg, op1->TypeGet(), op1->GetRegNum());
}
}
genProduceReg(treeNode);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Code Generator Common: XX
XX Methods common to all architectures and register allocation strategies XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// TODO-Cleanup: There are additional methods in CodeGen*.cpp that are almost
// identical, and which should probably be moved here.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "codegen.h"
#include "gcinfo.h"
#include "emit.h"
#ifndef JIT32_GCENCODER
#include "gcinfoencoder.h"
#endif
#include "patchpointinfo.h"
/*****************************************************************************/
void CodeGenInterface::setFramePointerRequiredEH(bool value)
{
m_cgFramePointerRequired = value;
#ifndef JIT32_GCENCODER
if (value)
{
// EnumGcRefs will only enumerate slots in aborted frames
// if they are fully-interruptible. So if we have a catch
// or finally that will keep frame-vars alive, we need to
// force fully-interruptible.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
{
printf("Method has EH, marking method as fully interruptible\n");
}
#endif
m_cgInterruptible = true;
}
#endif // JIT32_GCENCODER
}
/*****************************************************************************/
CodeGenInterface* getCodeGenerator(Compiler* comp)
{
return new (comp, CMK_Codegen) CodeGen(comp);
}
// CodeGen constructor
CodeGenInterface::CodeGenInterface(Compiler* theCompiler)
: gcInfo(theCompiler), regSet(theCompiler, gcInfo), compiler(theCompiler), treeLifeUpdater(nullptr)
{
}
/*****************************************************************************/
CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler)
{
#if defined(TARGET_XARCH)
negBitmaskFlt = nullptr;
negBitmaskDbl = nullptr;
absBitmaskFlt = nullptr;
absBitmaskDbl = nullptr;
u8ToDblBitmask = nullptr;
#endif // defined(TARGET_XARCH)
#if defined(FEATURE_PUT_STRUCT_ARG_STK) && !defined(TARGET_X86)
m_stkArgVarNum = BAD_VAR_NUM;
#endif
#if defined(UNIX_X86_ABI)
curNestedAlignment = 0;
maxNestedAlignment = 0;
#endif
gcInfo.regSet = ®Set;
m_cgEmitter = new (compiler->getAllocator()) emitter();
m_cgEmitter->codeGen = this;
m_cgEmitter->gcInfo = &gcInfo;
#ifdef DEBUG
setVerbose(compiler->verbose);
#endif // DEBUG
regSet.tmpInit();
#ifdef LATE_DISASM
getDisAssembler().disInit(compiler);
#endif
#ifdef DEBUG
genTempLiveChg = true;
genTrnslLocalVarCount = 0;
// Shouldn't be used before it is set in genFnProlog()
compiler->compCalleeRegsPushed = UninitializedWord<unsigned>(compiler);
#if defined(TARGET_XARCH)
// Shouldn't be used before it is set in genFnProlog()
compiler->compCalleeFPRegsSavedMask = (regMaskTP)-1;
#endif // defined(TARGET_XARCH)
#endif // DEBUG
#ifdef TARGET_AMD64
// This will be set before final frame layout.
compiler->compVSQuirkStackPaddingNeeded = 0;
#endif // TARGET_AMD64
compiler->genCallSite2DebugInfoMap = nullptr;
/* Assume that we not fully interruptible */
SetInterruptible(false);
#ifdef TARGET_ARMARCH
SetHasTailCalls(false);
#endif // TARGET_ARMARCH
#ifdef DEBUG
genInterruptibleUsed = false;
genCurDispOffset = (unsigned)-1;
#endif
#ifdef TARGET_ARM64
genSaveFpLrWithAllCalleeSavedRegisters = false;
#endif // TARGET_ARM64
}
void CodeGenInterface::genMarkTreeInReg(GenTree* tree, regNumber reg)
{
tree->SetRegNum(reg);
}
#if defined(TARGET_X86) || defined(TARGET_ARM)
//---------------------------------------------------------------------
// genTotalFrameSize - return the "total" size of the stack frame, including local size
// and callee-saved register size. There are a few things "missing" depending on the
// platform. The function genCallerSPtoInitialSPdelta() includes those things.
//
// For ARM, this doesn't include the prespilled registers.
//
// For x86, this doesn't include the frame pointer if codeGen->isFramePointerUsed() is true.
// It also doesn't include the pushed return address.
//
// Return value:
// Frame size
int CodeGenInterface::genTotalFrameSize() const
{
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
//---------------------------------------------------------------------
// genSPtoFPdelta - return the offset from SP to the frame pointer.
// This number is going to be positive, since SP must be at the lowest
// address.
//
// There must be a frame pointer to call this function!
int CodeGenInterface::genSPtoFPdelta() const
{
assert(isFramePointerUsed());
int delta;
delta = -genCallerSPtoInitialSPdelta() + genCallerSPtoFPdelta();
assert(delta >= 0);
return delta;
}
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
// address than the frame pointer.
//
// There must be a frame pointer to call this function!
int CodeGenInterface::genCallerSPtoFPdelta() const
{
assert(isFramePointerUsed());
int callerSPtoFPdelta = 0;
#if defined(TARGET_ARM)
// On ARM, we first push the prespill registers, then store LR, then R11 (FP), and point R11 at the saved R11.
callerSPtoFPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
callerSPtoFPdelta -= 2 * REGSIZE_BYTES;
#elif defined(TARGET_X86)
// Thanks to ebp chaining, the difference between ebp-based addresses
// and caller-SP-relative addresses is just the 2 pointers:
// return address
// pushed ebp
callerSPtoFPdelta -= 2 * REGSIZE_BYTES;
#else
#error "Unknown TARGET"
#endif // TARGET*
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
}
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
// This number will be negative.
int CodeGenInterface::genCallerSPtoInitialSPdelta() const
{
int callerSPtoSPdelta = 0;
#if defined(TARGET_ARM)
callerSPtoSPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
callerSPtoSPdelta -= genTotalFrameSize();
#elif defined(TARGET_X86)
callerSPtoSPdelta -= genTotalFrameSize();
callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
// compCalleeRegsPushed does not account for the frame pointer
// TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
if (isFramePointerUsed())
{
callerSPtoSPdelta -= REGSIZE_BYTES;
}
#else
#error "Unknown TARGET"
#endif // TARGET*
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
/*****************************************************************************
* Should we round simple operations (assignments, arithmetic operations, etc.)
*/
// inline
// static
bool CodeGen::genShouldRoundFP()
{
RoundLevel roundLevel = getRoundFloatLevel();
switch (roundLevel)
{
case ROUND_NEVER:
case ROUND_CMP_CONST:
case ROUND_CMP:
return false;
default:
assert(roundLevel == ROUND_ALWAYS);
return true;
}
}
/*****************************************************************************
*
* Initialize some global variables.
*/
void CodeGen::genPrepForCompiler()
{
treeLifeUpdater = new (compiler, CMK_bitset) TreeLifeUpdater<true>(compiler);
/* Figure out which non-register variables hold pointers */
VarSetOps::AssignNoCopy(compiler, gcInfo.gcTrkStkPtrLcls, VarSetOps::MakeEmpty(compiler));
// Also, initialize gcTrkStkPtrLcls to include all tracked variables that do not fully live
// in a register (i.e. they live on the stack for all or part of their lifetime).
// Note that lvRegister indicates that a lclVar is in a register for its entire lifetime.
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (varDsc->lvTracked || varDsc->lvIsRegCandidate())
{
if (!varDsc->lvRegister && compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::AddElemD(compiler, gcInfo.gcTrkStkPtrLcls, varDsc->lvVarIndex);
}
}
}
VarSetOps::AssignNoCopy(compiler, genLastLiveSet, VarSetOps::MakeEmpty(compiler));
genLastLiveMask = RBM_NONE;
#ifdef DEBUG
compiler->fgBBcountAtCodegen = compiler->fgBBcount;
#endif
}
//------------------------------------------------------------------------
// genMarkLabelsForCodegen: Mark labels required for codegen.
//
// Mark all blocks that require a label with BBF_HAS_LABEL. These are either blocks that are:
// 1. the target of jumps (fall-through flow doesn't require a label),
// 2. referenced labels such as for "switch" codegen,
// 3. needed to denote the range of EH regions to the VM.
// 4. needed to denote the range of code for alignment processing.
//
// No labels will be in the IR before now, but future codegen might annotate additional blocks
// with this flag, such as "switch" codegen, or codegen-created blocks from genCreateTempLabel().
// Also, the alignment processing code marks BBJ_COND fall-through labels elsewhere.
//
// To report exception handling information to the VM, we need the size of the exception
// handling regions. To compute that, we need to emit labels for the beginning block of
// an EH region, and the block that immediately follows a region. Go through the EH
// table and mark all these blocks with BBF_HAS_LABEL to make this happen.
//
// This code is closely couple with genReportEH() in the sense that any block
// that this procedure has determined it needs to have a label has to be selected
// using the same logic both here and in genReportEH(), so basically any time there is
// a change in the way we handle EH reporting, we have to keep the logic of these two
// methods 'in sync'.
//
// No blocks should be added or removed after this.
//
void CodeGen::genMarkLabelsForCodegen()
{
assert(!compiler->fgSafeBasicBlockCreation);
JITDUMP("Mark labels for codegen\n");
#ifdef DEBUG
// No label flags should be set before this.
for (BasicBlock* const block : compiler->Blocks())
{
assert((block->bbFlags & BBF_HAS_LABEL) == 0);
}
#endif // DEBUG
// The first block is special; it always needs a label. This is to properly set up GC info.
JITDUMP(" " FMT_BB " : first block\n", compiler->fgFirstBB->bbNum);
compiler->fgFirstBB->bbFlags |= BBF_HAS_LABEL;
// The current implementation of switch tables requires the first block to have a label so it
// can generate offsets to the switch label targets.
// (This is duplicative with the fact we always set the first block with a label above.)
// TODO-CQ: remove this when switches have been re-implemented to not use this.
if (compiler->fgHasSwitch)
{
JITDUMP(" " FMT_BB " : function has switch; mark first block\n", compiler->fgFirstBB->bbNum);
compiler->fgFirstBB->bbFlags |= BBF_HAS_LABEL;
}
for (BasicBlock* const block : compiler->Blocks())
{
switch (block->bbJumpKind)
{
case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair.
case BBJ_COND:
case BBJ_EHCATCHRET:
JITDUMP(" " FMT_BB " : branch target\n", block->bbJumpDest->bbNum);
block->bbJumpDest->bbFlags |= BBF_HAS_LABEL;
break;
case BBJ_SWITCH:
for (BasicBlock* const bTarget : block->SwitchTargets())
{
JITDUMP(" " FMT_BB " : branch target\n", bTarget->bbNum);
bTarget->bbFlags |= BBF_HAS_LABEL;
}
break;
case BBJ_CALLFINALLY:
// The finally target itself will get marked by walking the EH table, below, and marking
// all handler begins.
CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_EH_CALLFINALLY_THUNKS
{
// For callfinally thunks, we need to mark the block following the callfinally/always pair,
// as that's needed for identifying the range of the "duplicate finally" region in EH data.
BasicBlock* bbToLabel = block->bbNext;
if (block->isBBCallAlwaysPair())
{
bbToLabel = bbToLabel->bbNext; // skip the BBJ_ALWAYS
}
if (bbToLabel != nullptr)
{
JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum);
bbToLabel->bbFlags |= BBF_HAS_LABEL;
}
}
#endif // FEATURE_EH_CALLFINALLY_THUNKS
break;
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
case BBJ_RETURN:
case BBJ_THROW:
case BBJ_NONE:
break;
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
}
// Walk all the exceptional code blocks and mark them, since they don't appear in the normal flow graph.
for (Compiler::AddCodeDsc* add = compiler->fgAddCodeList; add; add = add->acdNext)
{
JITDUMP(" " FMT_BB " : throw helper block\n", add->acdDstBlk->bbNum);
add->acdDstBlk->bbFlags |= BBF_HAS_LABEL;
}
for (EHblkDsc* const HBtab : EHClauses(compiler))
{
HBtab->ebdTryBeg->bbFlags |= BBF_HAS_LABEL;
HBtab->ebdHndBeg->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : try begin\n", HBtab->ebdTryBeg->bbNum);
JITDUMP(" " FMT_BB " : hnd begin\n", HBtab->ebdHndBeg->bbNum);
if (HBtab->ebdTryLast->bbNext != nullptr)
{
HBtab->ebdTryLast->bbNext->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->bbNext->bbNum);
}
if (HBtab->ebdHndLast->bbNext != nullptr)
{
HBtab->ebdHndLast->bbNext->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->bbNext->bbNum);
}
if (HBtab->HasFilter())
{
HBtab->ebdFilter->bbFlags |= BBF_HAS_LABEL;
JITDUMP(" " FMT_BB " : filter begin\n", HBtab->ebdFilter->bbNum);
}
}
#ifdef DEBUG
if (compiler->verbose)
{
printf("*************** After genMarkLabelsForCodegen()\n");
compiler->fgDispBasicBlocks();
}
#endif // DEBUG
}
void CodeGenInterface::genUpdateLife(GenTree* tree)
{
treeLifeUpdater->UpdateLife(tree);
}
void CodeGenInterface::genUpdateLife(VARSET_VALARG_TP newLife)
{
compiler->compUpdateLife</*ForCodeGen*/ true>(newLife);
}
// Return the register mask for the given register variable
// inline
regMaskTP CodeGenInterface::genGetRegMask(const LclVarDsc* varDsc)
{
regMaskTP regMask = RBM_NONE;
assert(varDsc->lvIsInReg());
regNumber reg = varDsc->GetRegNum();
if (genIsValidFloatReg(reg))
{
regMask = genRegMaskFloat(reg, varDsc->GetRegisterType());
}
else
{
regMask = genRegMask(reg);
}
return regMask;
}
// Return the register mask for the given lclVar or regVar tree node
// inline
regMaskTP CodeGenInterface::genGetRegMask(GenTree* tree)
{
assert(tree->gtOper == GT_LCL_VAR);
regMaskTP regMask = RBM_NONE;
const LclVarDsc* varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->lvPromoted)
{
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
const LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(i);
noway_assert(fieldVarDsc->lvIsStructField);
if (fieldVarDsc->lvIsInReg())
{
regMask |= genGetRegMask(fieldVarDsc);
}
}
}
else if (varDsc->lvIsInReg())
{
regMask = genGetRegMask(varDsc);
}
return regMask;
}
// The given lclVar is either going live (being born) or dying.
// It might be both going live and dying (that is, it is a dead store) under MinOpts.
// Update regSet.GetMaskVars() accordingly.
// inline
void CodeGenInterface::genUpdateRegLife(const LclVarDsc* varDsc, bool isBorn, bool isDying DEBUGARG(GenTree* tree))
{
regMaskTP regMask = genGetRegMask(varDsc);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tV%02u in reg ", compiler->lvaGetLclNum(varDsc));
varDsc->PrintVarReg();
printf(" is becoming %s ", (isDying) ? "dead" : "live");
Compiler::printTreeID(tree);
printf("\n");
}
#endif // DEBUG
if (isDying)
{
// We'd like to be able to assert the following, however if we are walking
// through a qmark/colon tree, we may encounter multiple last-use nodes.
// assert((regSet.GetMaskVars() & regMask) == regMask);
regSet.RemoveMaskVars(regMask);
}
else
{
// If this is going live, the register must not have a variable in it, except
// in the case of an exception or "spill at single-def" variable, which may be already treated
// as live in the register.
assert(varDsc->IsAlwaysAliveInMemory() || ((regSet.GetMaskVars() & regMask) == 0));
regSet.AddMaskVars(regMask);
}
}
//----------------------------------------------------------------------
// compHelperCallKillSet: Gets a register mask that represents the kill set for a helper call.
// Not all JIT Helper calls follow the standard ABI on the target architecture.
//
// TODO-CQ: Currently this list is incomplete (not all helpers calls are
// enumerated) and not 100% accurate (some killsets are bigger than
// what they really are).
// There's some work to be done in several places in the JIT to
// accurately track the registers that are getting killed by
// helper calls:
// a) LSRA needs several changes to accomodate more precise killsets
// for every helper call it sees (both explicitly [easy] and
// implicitly [hard])
// b) Currently for AMD64, when we generate code for a helper call
// we're independently over-pessimizing the killsets of the call
// (independently from LSRA) and this needs changes
// both in CodeGenAmd64.cpp and emitx86.cpp.
//
// The best solution for this problem would be to try to centralize
// the killset information in a single place but then make the
// corresponding changes so every code generation phase is in sync
// about this.
//
// The interim solution is to only add known helper calls that don't
// follow the AMD64 ABI and actually trash registers that are supposed to be non-volatile.
//
// Arguments:
// helper - The helper being inquired about
//
// Return Value:
// Mask of register kills -- registers whose values are no longer guaranteed to be the same.
//
regMaskTP Compiler::compHelperCallKillSet(CorInfoHelpFunc helper)
{
switch (helper)
{
case CORINFO_HELP_ASSIGN_BYREF:
#if defined(TARGET_AMD64)
return RBM_RSI | RBM_RDI | RBM_CALLEE_TRASH_NOGC;
#elif defined(TARGET_ARMARCH)
return RBM_CALLEE_TRASH_WRITEBARRIER_BYREF;
#elif defined(TARGET_X86)
return RBM_ESI | RBM_EDI | RBM_ECX;
#else
NYI("Model kill set for CORINFO_HELP_ASSIGN_BYREF on target arch");
return RBM_CALLEE_TRASH;
#endif
#if defined(TARGET_ARMARCH)
case CORINFO_HELP_ASSIGN_REF:
case CORINFO_HELP_CHECKED_ASSIGN_REF:
return RBM_CALLEE_TRASH_WRITEBARRIER;
#endif
case CORINFO_HELP_PROF_FCN_ENTER:
#ifdef RBM_PROFILER_ENTER_TRASH
return RBM_PROFILER_ENTER_TRASH;
#else
NYI("Model kill set for CORINFO_HELP_PROF_FCN_ENTER on target arch");
#endif
case CORINFO_HELP_PROF_FCN_LEAVE:
#ifdef RBM_PROFILER_LEAVE_TRASH
return RBM_PROFILER_LEAVE_TRASH;
#else
NYI("Model kill set for CORINFO_HELP_PROF_FCN_LEAVE on target arch");
#endif
case CORINFO_HELP_PROF_FCN_TAILCALL:
#ifdef RBM_PROFILER_TAILCALL_TRASH
return RBM_PROFILER_TAILCALL_TRASH;
#else
NYI("Model kill set for CORINFO_HELP_PROF_FCN_TAILCALL on target arch");
#endif
#ifdef TARGET_X86
case CORINFO_HELP_ASSIGN_REF_EAX:
case CORINFO_HELP_ASSIGN_REF_ECX:
case CORINFO_HELP_ASSIGN_REF_EBX:
case CORINFO_HELP_ASSIGN_REF_EBP:
case CORINFO_HELP_ASSIGN_REF_ESI:
case CORINFO_HELP_ASSIGN_REF_EDI:
case CORINFO_HELP_CHECKED_ASSIGN_REF_EAX:
case CORINFO_HELP_CHECKED_ASSIGN_REF_ECX:
case CORINFO_HELP_CHECKED_ASSIGN_REF_EBX:
case CORINFO_HELP_CHECKED_ASSIGN_REF_EBP:
case CORINFO_HELP_CHECKED_ASSIGN_REF_ESI:
case CORINFO_HELP_CHECKED_ASSIGN_REF_EDI:
return RBM_EDX;
#ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
case CORINFO_HELP_ASSIGN_REF:
case CORINFO_HELP_CHECKED_ASSIGN_REF:
return RBM_EAX | RBM_EDX;
#endif // FEATURE_USE_ASM_GC_WRITE_BARRIERS
#endif
case CORINFO_HELP_STOP_FOR_GC:
return RBM_STOP_FOR_GC_TRASH;
case CORINFO_HELP_INIT_PINVOKE_FRAME:
return RBM_INIT_PINVOKE_FRAME_TRASH;
case CORINFO_HELP_VALIDATE_INDIRECT_CALL:
return RBM_VALIDATE_INDIRECT_CALL_TRASH;
default:
return RBM_CALLEE_TRASH;
}
}
//------------------------------------------------------------------------
// compChangeLife: Compare the given "newLife" with last set of live variables and update
// codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness.
//
// Arguments:
// newLife - the new set of variables that are alive.
//
// Assumptions:
// The set of live variables reflects the result of only emitted code, it should not be considering the becoming
// live/dead of instructions that has not been emitted yet. This is used to ensure [) "VariableLiveRange"
// intervals when calling "siStartVariableLiveRange" and "siEndVariableLiveRange".
//
// Notes:
// If "ForCodeGen" is false, only "compCurLife" set (and no mask) will be setted.
//
template <bool ForCodeGen>
void Compiler::compChangeLife(VARSET_VALARG_TP newLife)
{
#ifdef DEBUG
if (verbose)
{
printf("Change life %s ", VarSetOps::ToString(this, compCurLife));
dumpConvertedVarSet(this, compCurLife);
printf(" -> %s ", VarSetOps::ToString(this, newLife));
dumpConvertedVarSet(this, newLife);
printf("\n");
}
#endif // DEBUG
/* We should only be called when the live set has actually changed */
noway_assert(!VarSetOps::Equal(this, compCurLife, newLife));
if (!ForCodeGen)
{
VarSetOps::Assign(this, compCurLife, newLife);
return;
}
/* Figure out which variables are becoming live/dead at this point */
// deadSet = compCurLife - newLife
VARSET_TP deadSet(VarSetOps::Diff(this, compCurLife, newLife));
// bornSet = newLife - compCurLife
VARSET_TP bornSet(VarSetOps::Diff(this, newLife, compCurLife));
/* Can't simultaneously become live and dead at the same time */
// (deadSet UNION bornSet) != EMPTY
noway_assert(!VarSetOps::IsEmptyUnion(this, deadSet, bornSet));
// (deadSet INTERSECTION bornSet) == EMPTY
noway_assert(VarSetOps::IsEmptyIntersection(this, deadSet, bornSet));
VarSetOps::Assign(this, compCurLife, newLife);
// Handle the dying vars first, then the newly live vars.
// This is because, in the RyuJIT backend case, they may occupy registers that
// will be occupied by another var that is newly live.
VarSetOps::Iter deadIter(this, deadSet);
unsigned deadVarIndex = 0;
while (deadIter.NextElem(&deadVarIndex))
{
unsigned varNum = lvaTrackedIndexToLclNum(deadVarIndex);
LclVarDsc* varDsc = lvaGetDesc(varNum);
bool isGCRef = (varDsc->TypeGet() == TYP_REF);
bool isByRef = (varDsc->TypeGet() == TYP_BYREF);
bool isInReg = varDsc->lvIsInReg();
bool isInMemory = !isInReg || varDsc->IsAlwaysAliveInMemory();
if (isInReg)
{
// TODO-Cleanup: Move the code from compUpdateLifeVar to genUpdateRegLife that updates the
// gc sets
regMaskTP regMask = varDsc->lvRegMask();
if (isGCRef)
{
codeGen->gcInfo.gcRegGCrefSetCur &= ~regMask;
}
else if (isByRef)
{
codeGen->gcInfo.gcRegByrefSetCur &= ~regMask;
}
codeGen->genUpdateRegLife(varDsc, false /*isBorn*/, true /*isDying*/ DEBUGARG(nullptr));
}
// Update the gcVarPtrSetCur if it is in memory.
if (isInMemory && (isGCRef || isByRef))
{
VarSetOps::RemoveElemD(this, codeGen->gcInfo.gcVarPtrSetCur, deadVarIndex);
JITDUMP("\t\t\t\t\t\t\tV%02u becoming dead\n", varNum);
}
#ifdef USING_VARIABLE_LIVE_RANGE
codeGen->getVariableLiveKeeper()->siEndVariableLiveRange(varNum);
#endif // USING_VARIABLE_LIVE_RANGE
}
VarSetOps::Iter bornIter(this, bornSet);
unsigned bornVarIndex = 0;
while (bornIter.NextElem(&bornVarIndex))
{
unsigned varNum = lvaTrackedIndexToLclNum(bornVarIndex);
LclVarDsc* varDsc = lvaGetDesc(varNum);
bool isGCRef = (varDsc->TypeGet() == TYP_REF);
bool isByRef = (varDsc->TypeGet() == TYP_BYREF);
if (varDsc->lvIsInReg())
{
// If this variable is going live in a register, it is no longer live on the stack,
// unless it is an EH/"spill at single-def" var, which always remains live on the stack.
if (!varDsc->IsAlwaysAliveInMemory())
{
#ifdef DEBUG
if (VarSetOps::IsMember(this, codeGen->gcInfo.gcVarPtrSetCur, bornVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tRemoving V%02u from gcVarPtrSetCur\n", varNum);
}
#endif // DEBUG
VarSetOps::RemoveElemD(this, codeGen->gcInfo.gcVarPtrSetCur, bornVarIndex);
}
codeGen->genUpdateRegLife(varDsc, true /*isBorn*/, false /*isDying*/ DEBUGARG(nullptr));
regMaskTP regMask = varDsc->lvRegMask();
if (isGCRef)
{
codeGen->gcInfo.gcRegGCrefSetCur |= regMask;
}
else if (isByRef)
{
codeGen->gcInfo.gcRegByrefSetCur |= regMask;
}
}
else if (lvaIsGCTracked(varDsc))
{
// This isn't in a register, so update the gcVarPtrSetCur to show that it's live on the stack.
VarSetOps::AddElemD(this, codeGen->gcInfo.gcVarPtrSetCur, bornVarIndex);
JITDUMP("\t\t\t\t\t\t\tV%02u becoming live\n", varNum);
}
#ifdef USING_VARIABLE_LIVE_RANGE
codeGen->getVariableLiveKeeper()->siStartVariableLiveRange(varDsc, varNum);
#endif // USING_VARIABLE_LIVE_RANGE
}
#ifdef USING_SCOPE_INFO
codeGen->siUpdate();
#endif // USING_SCOPE_INFO
}
// Need an explicit instantiation.
template void Compiler::compChangeLife<true>(VARSET_VALARG_TP newLife);
/*****************************************************************************
*
* Generate a spill.
*/
void CodeGenInterface::spillReg(var_types type, TempDsc* tmp, regNumber reg)
{
GetEmitter()->emitIns_S_R(ins_Store(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
}
/*****************************************************************************
*
* Generate a reload.
*/
void CodeGenInterface::reloadReg(var_types type, TempDsc* tmp, regNumber reg)
{
GetEmitter()->emitIns_R_S(ins_Load(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
}
// inline
regNumber CodeGenInterface::genGetThisArgReg(GenTreeCall* call) const
{
return REG_ARG_0;
}
//----------------------------------------------------------------------
// getSpillTempDsc: get the TempDsc corresponding to a spilled tree.
//
// Arguments:
// tree - spilled GenTree node
//
// Return Value:
// TempDsc corresponding to tree
TempDsc* CodeGenInterface::getSpillTempDsc(GenTree* tree)
{
// tree must be in spilled state.
assert((tree->gtFlags & GTF_SPILLED) != 0);
// Get the tree's SpillDsc.
RegSet::SpillDsc* prevDsc;
RegSet::SpillDsc* spillDsc = regSet.rsGetSpillInfo(tree, tree->GetRegNum(), &prevDsc);
assert(spillDsc != nullptr);
// Get the temp desc.
TempDsc* temp = regSet.rsGetSpillTempWord(tree->GetRegNum(), spillDsc, prevDsc);
return temp;
}
/*****************************************************************************
*
* The following can be used to create basic blocks that serve as labels for
* the emitter. Use with caution - these are not real basic blocks!
*
*/
// inline
BasicBlock* CodeGen::genCreateTempLabel()
{
#ifdef DEBUG
// These blocks don't affect FP
compiler->fgSafeBasicBlockCreation = true;
#endif
BasicBlock* block = compiler->bbNewBasicBlock(BBJ_NONE);
#ifdef DEBUG
compiler->fgSafeBasicBlockCreation = false;
#endif
JITDUMP("Mark " FMT_BB " as label: codegen temp block\n", block->bbNum);
block->bbFlags |= BBF_HAS_LABEL;
// Use coldness of current block, as this label will
// be contained in it.
block->bbFlags |= (compiler->compCurBB->bbFlags & BBF_COLD);
#ifdef DEBUG
#ifdef UNIX_X86_ABI
block->bbTgtStkDepth = (genStackLevel - curNestedAlignment) / sizeof(int);
#else
block->bbTgtStkDepth = genStackLevel / sizeof(int);
#endif
#endif
return block;
}
void CodeGen::genLogLabel(BasicBlock* bb)
{
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n L_M%03u_" FMT_BB ":\n", compiler->compMethodID, bb->bbNum);
}
#endif
}
// genDefineTempLabel: Define a label based on the current GC info tracked by
// the code generator.
//
// Arguments:
// label - A label represented as a basic block. These are created with
// genCreateTempLabel and are not normal basic blocks.
//
// Notes:
// The label will be defined with the current GC info tracked by the code
// generator. When the emitter sees this label it will thus remove any temporary
// GC refs it is tracking in registers. For example, a call might produce a ref
// in RAX which the emitter would track but which would not be tracked in
// codegen's GC info since codegen would immediately copy it from RAX into its
// home.
//
void CodeGen::genDefineTempLabel(BasicBlock* label)
{
genLogLabel(label);
label->bbEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(label));
}
// genDefineInlineTempLabel: Define an inline label that does not affect the GC
// info.
//
// Arguments:
// label - A label represented as a basic block. These are created with
// genCreateTempLabel and are not normal basic blocks.
//
// Notes:
// The emitter will continue to track GC info as if there was no label.
//
void CodeGen::genDefineInlineTempLabel(BasicBlock* label)
{
genLogLabel(label);
label->bbEmitCookie = GetEmitter()->emitAddInlineLabel();
}
//------------------------------------------------------------------------
// genAdjustStackLevel: Adjust the stack level, if required, for a throw helper block
//
// Arguments:
// block - The BasicBlock for which we are about to generate code.
//
// Assumptions:
// Must be called just prior to generating code for 'block'.
//
// Notes:
// This only makes an adjustment if !FEATURE_FIXED_OUT_ARGS, if there is no frame pointer,
// and if 'block' is a throw helper block with a non-zero stack level.
void CodeGen::genAdjustStackLevel(BasicBlock* block)
{
#if !FEATURE_FIXED_OUT_ARGS
// Check for inserted throw blocks and adjust genStackLevel.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(UNIX_X86_ABI)
if (isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
{
// x86/Linux requires stack frames to be 16-byte aligned, but SP may be unaligned
// at this point if a jump to this block is made in the middle of pushing arugments.
//
// Here we restore SP to prevent potential stack alignment issues.
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -genSPtoFPdelta());
}
#endif
if (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
{
noway_assert(block->bbFlags & BBF_HAS_LABEL);
SetStackLevel(compiler->fgThrowHlpBlkStkLevel(block) * sizeof(int));
if (genStackLevel != 0)
{
#ifdef TARGET_X86
GetEmitter()->emitMarkStackLvl(genStackLevel);
inst_RV_IV(INS_add, REG_SPBASE, genStackLevel, EA_PTRSIZE);
SetStackLevel(0);
#else // TARGET_X86
NYI("Need emitMarkStackLvl()");
#endif // TARGET_X86
}
}
#endif // !FEATURE_FIXED_OUT_ARGS
}
/*****************************************************************************
*
* Take an address expression and try to find the best set of components to
* form an address mode; returns non-zero if this is successful.
*
* TODO-Cleanup: The RyuJIT backend never uses this to actually generate code.
* Refactor this code so that the underlying analysis can be used in
* the RyuJIT Backend to do lowering, instead of having to call this method with the
* option to not generate the code.
*
* 'fold' specifies if it is OK to fold the array index which hangs off
* a GT_NOP node.
*
* If successful, the parameters will be set to the following values:
*
* *rv1Ptr ... base operand
* *rv2Ptr ... optional operand
* *revPtr ... true if rv2 is before rv1 in the evaluation order
* *mulPtr ... optional multiplier (2/4/8) for rv2
* Note that for [reg1 + reg2] and [reg1 + reg2 + icon], *mulPtr == 0.
* *cnsPtr ... integer constant [optional]
*
* IMPORTANT NOTE: This routine doesn't generate any code, it merely
* identifies the components that might be used to
* form an address mode later on.
*/
bool CodeGen::genCreateAddrMode(
GenTree* addr, bool fold, bool* revPtr, GenTree** rv1Ptr, GenTree** rv2Ptr, unsigned* mulPtr, ssize_t* cnsPtr)
{
/*
The following indirections are valid address modes on x86/x64:
[ icon] * not handled here
[reg ]
[reg + icon]
[reg1 + reg2 ]
[reg1 + reg2 + icon]
[reg1 + 2 * reg2 ]
[reg1 + 4 * reg2 ]
[reg1 + 8 * reg2 ]
[ 2 * reg2 + icon]
[ 4 * reg2 + icon]
[ 8 * reg2 + icon]
[reg1 + 2 * reg2 + icon]
[reg1 + 4 * reg2 + icon]
[reg1 + 8 * reg2 + icon]
The following indirections are valid address modes on arm64:
[reg]
[reg + icon]
[reg1 + reg2]
[reg1 + reg2 * natural-scale]
*/
/* All indirect address modes require the address to be an addition */
if (addr->gtOper != GT_ADD)
{
return false;
}
// Can't use indirect addressing mode as we need to check for overflow.
// Also, can't use 'lea' as it doesn't set the flags.
if (addr->gtOverflow())
{
return false;
}
GenTree* rv1 = nullptr;
GenTree* rv2 = nullptr;
GenTree* op1;
GenTree* op2;
ssize_t cns;
unsigned mul;
GenTree* tmp;
/* What order are the sub-operands to be evaluated */
if (addr->gtFlags & GTF_REVERSE_OPS)
{
op1 = addr->AsOp()->gtOp2;
op2 = addr->AsOp()->gtOp1;
}
else
{
op1 = addr->AsOp()->gtOp1;
op2 = addr->AsOp()->gtOp2;
}
bool rev = false; // Is op2 first in the evaluation order?
/*
A complex address mode can combine the following operands:
op1 ... base address
op2 ... optional scaled index
mul ... optional multiplier (2/4/8) for op2
cns ... optional displacement
Here we try to find such a set of operands and arrange for these
to sit in registers.
*/
cns = 0;
mul = 0;
AGAIN:
/* We come back to 'AGAIN' if we have an add of a constant, and we are folding that
constant, or we have gone through a GT_NOP or GT_COMMA node. We never come back
here if we find a scaled index.
*/
CLANG_FORMAT_COMMENT_ANCHOR;
assert(mul == 0);
/* Special case: keep constants as 'op2', but don't do this for constant handles
because they don't fit I32 that we're going to check for below anyway. */
if (op1->IsCnsIntOrI() && !op1->IsIconHandle())
{
// Presumably op2 is assumed to not be a constant (shouldn't happen if we've done constant folding)?
tmp = op1;
op1 = op2;
op2 = tmp;
}
/* Check for an addition of a constant */
if (op2->IsIntCnsFitsInI32() && (op2->gtType != TYP_REF) && FitsIn<INT32>(cns + op2->AsIntConCommon()->IconValue()))
{
// We should not be building address modes out of non-foldable constants
assert(op2->AsIntConCommon()->ImmedValCanBeFolded(compiler, addr->OperGet()));
/* We're adding a constant */
cns += op2->AsIntConCommon()->IconValue();
#if defined(TARGET_ARMARCH)
if (cns == 0)
#endif
{
/* Inspect the operand the constant is being added to */
switch (op1->gtOper)
{
case GT_ADD:
if (op1->gtOverflow())
{
break;
}
op2 = op1->AsOp()->gtOp2;
op1 = op1->AsOp()->gtOp1;
goto AGAIN;
#if !defined(TARGET_ARMARCH)
// TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index.
case GT_MUL:
if (op1->gtOverflow())
{
return false; // Need overflow check
}
FALLTHROUGH;
case GT_LSH:
mul = op1->GetScaledIndex();
if (mul)
{
/* We can use "[mul*rv2 + icon]" */
rv1 = nullptr;
rv2 = op1->AsOp()->gtOp1;
goto FOUND_AM;
}
break;
#endif // !defined(TARGET_ARMARCH)
default:
break;
}
}
/* The best we can do is "[rv1 + icon]" */
rv1 = op1;
rv2 = nullptr;
goto FOUND_AM;
}
// op2 is not a constant. So keep on trying.
/* Neither op1 nor op2 are sitting in a register right now */
switch (op1->gtOper)
{
#if !defined(TARGET_ARMARCH)
// TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index.
case GT_ADD:
if (op1->gtOverflow())
{
break;
}
if (op1->AsOp()->gtOp2->IsIntCnsFitsInI32() &&
FitsIn<INT32>(cns + op1->AsOp()->gtOp2->AsIntCon()->gtIconVal))
{
cns += op1->AsOp()->gtOp2->AsIntCon()->gtIconVal;
op1 = op1->AsOp()->gtOp1;
goto AGAIN;
}
break;
case GT_MUL:
if (op1->gtOverflow())
{
break;
}
FALLTHROUGH;
case GT_LSH:
mul = op1->GetScaledIndex();
if (mul)
{
/* 'op1' is a scaled value */
rv1 = op2;
rv2 = op1->AsOp()->gtOp1;
int argScale;
while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
{
if (jitIsScaleIndexMul(argScale * mul))
{
mul = mul * argScale;
rv2 = rv2->AsOp()->gtOp1;
}
else
{
break;
}
}
noway_assert(rev == false);
rev = true;
goto FOUND_AM;
}
break;
#endif // !TARGET_ARMARCH
case GT_NOP:
op1 = op1->AsOp()->gtOp1;
goto AGAIN;
case GT_COMMA:
op1 = op1->AsOp()->gtOp2;
goto AGAIN;
default:
break;
}
noway_assert(op2);
switch (op2->gtOper)
{
#if !defined(TARGET_ARMARCH)
// TODO-ARM64-CQ, TODO-ARM-CQ: For now we only handle MUL and LSH because
// arm doesn't support both scale and offset at the same. Offset is handled
// at the emitter as a peephole optimization.
case GT_ADD:
if (op2->gtOverflow())
{
break;
}
if (op2->AsOp()->gtOp2->IsIntCnsFitsInI32() &&
FitsIn<INT32>(cns + op2->AsOp()->gtOp2->AsIntCon()->gtIconVal))
{
cns += op2->AsOp()->gtOp2->AsIntCon()->gtIconVal;
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
}
break;
case GT_MUL:
if (op2->gtOverflow())
{
break;
}
FALLTHROUGH;
case GT_LSH:
mul = op2->GetScaledIndex();
if (mul)
{
// 'op2' is a scaled value...is it's argument also scaled?
int argScale;
rv2 = op2->AsOp()->gtOp1;
while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
{
if (jitIsScaleIndexMul(argScale * mul))
{
mul = mul * argScale;
rv2 = rv2->AsOp()->gtOp1;
}
else
{
break;
}
}
rv1 = op1;
goto FOUND_AM;
}
break;
#endif // TARGET_ARMARCH
case GT_NOP:
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
case GT_COMMA:
op2 = op2->AsOp()->gtOp2;
goto AGAIN;
default:
break;
}
/* The best we can do "[rv1 + rv2]" or "[rv1 + rv2 + cns]" */
rv1 = op1;
rv2 = op2;
#ifdef TARGET_ARM64
assert(cns == 0);
#endif
FOUND_AM:
if (rv2)
{
/* Make sure a GC address doesn't end up in 'rv2' */
if (varTypeIsGC(rv2->TypeGet()))
{
noway_assert(rv1 && !varTypeIsGC(rv1->TypeGet()));
tmp = rv1;
rv1 = rv2;
rv2 = tmp;
rev = !rev;
}
/* Special case: constant array index (that is range-checked) */
if (fold)
{
ssize_t tmpMul;
GenTree* index;
if ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (rv2->AsOp()->gtOp2->IsCnsIntOrI()))
{
/* For valuetype arrays where we can't use the scaled address
mode, rv2 will point to the scaled index. So we have to do
more work */
tmpMul = compiler->optGetArrayRefScaleAndIndex(rv2, &index DEBUGARG(false));
if (mul)
{
tmpMul *= mul;
}
}
else
{
/* May be a simple array. rv2 will points to the actual index */
index = rv2;
tmpMul = mul;
}
/* Get hold of the array index and see if it's a constant */
if (index->IsIntCnsFitsInI32())
{
/* Get hold of the index value */
ssize_t ixv = index->AsIntConCommon()->IconValue();
/* Scale the index if necessary */
if (tmpMul)
{
ixv *= tmpMul;
}
if (FitsIn<INT32>(cns + ixv))
{
/* Add the scaled index to the offset value */
cns += ixv;
/* There is no scaled operand any more */
mul = 0;
rv2 = nullptr;
}
}
}
}
// We shouldn't have [rv2*1 + cns] - this is equivalent to [rv1 + cns]
noway_assert(rv1 || mul != 1);
noway_assert(FitsIn<INT32>(cns));
if (rv1 == nullptr && rv2 == nullptr)
{
return false;
}
/* Success - return the various components to the caller */
*revPtr = rev;
*rv1Ptr = rv1;
*rv2Ptr = rv2;
*mulPtr = mul;
*cnsPtr = cns;
return true;
}
/*****************************************************************************
*
* Generate an exit sequence for a return from a method (note: when compiling
* for speed there might be multiple exit points).
*/
void CodeGen::genExitCode(BasicBlock* block)
{
/* Just wrote the first instruction of the epilog - inform debugger
Note that this may result in a duplicate IPmapping entry, and
that this is ok */
// For non-optimized debuggable code, there is only one epilog.
genIPmappingAdd(IPmappingDscKind::Epilog, DebugInfo(), true);
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
if (compiler->getNeedsGSSecurityCookie())
{
genEmitGSCookieCheck(jmpEpilog);
if (jmpEpilog)
{
// Dev10 642944 -
// The GS cookie check created a temp label that has no live
// incoming GC registers, we need to fix that
unsigned varNum;
LclVarDsc* varDsc;
/* Figure out which register parameters hold pointers */
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount && varDsc->lvIsRegArg;
varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
gcInfo.gcMarkRegPtrVal(varDsc->GetArgReg(), varDsc->TypeGet());
}
GetEmitter()->emitThisGCrefRegs = GetEmitter()->emitInitGCrefRegs = gcInfo.gcRegGCrefSetCur;
GetEmitter()->emitThisByrefRegs = GetEmitter()->emitInitByrefRegs = gcInfo.gcRegByrefSetCur;
}
}
genReserveEpilog(block);
}
//------------------------------------------------------------------------
// genJumpToThrowHlpBlk: Generate code for an out-of-line exception.
//
// Notes:
// For code that uses throw helper blocks, we share the helper blocks created by fgAddCodeRef().
// Otherwise, we generate the 'throw' inline.
//
// Arguments:
// jumpKind - jump kind to generate;
// codeKind - the special throw-helper kind;
// failBlk - optional fail target block, if it is already known;
//
void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, BasicBlock* failBlk)
{
bool useThrowHlpBlk = compiler->fgUseThrowHelperBlocks();
#if defined(UNIX_X86_ABI) && defined(FEATURE_EH_FUNCLETS)
// Inline exception-throwing code in funclet to make it possible to unwind funclet frames.
useThrowHlpBlk = useThrowHlpBlk && (compiler->funCurrentFunc()->funKind == FUNC_ROOT);
#endif // UNIX_X86_ABI && FEATURE_EH_FUNCLETS
if (useThrowHlpBlk)
{
// For code with throw helper blocks, find and use the helper block for
// raising the exception. The block may be shared by other trees too.
BasicBlock* excpRaisingBlock;
if (failBlk != nullptr)
{
// We already know which block to jump to. Use that.
excpRaisingBlock = failBlk;
#ifdef DEBUG
Compiler::AddCodeDsc* add =
compiler->fgFindExcptnTarget(codeKind, compiler->bbThrowIndex(compiler->compCurBB));
assert(excpRaisingBlock == add->acdDstBlk);
#if !FEATURE_FIXED_OUT_ARGS
assert(add->acdStkLvlInit || isFramePointerUsed());
#endif // !FEATURE_FIXED_OUT_ARGS
#endif // DEBUG
}
else
{
// Find the helper-block which raises the exception.
Compiler::AddCodeDsc* add =
compiler->fgFindExcptnTarget(codeKind, compiler->bbThrowIndex(compiler->compCurBB));
PREFIX_ASSUME_MSG((add != nullptr), ("ERROR: failed to find exception throw block"));
excpRaisingBlock = add->acdDstBlk;
#if !FEATURE_FIXED_OUT_ARGS
assert(add->acdStkLvlInit || isFramePointerUsed());
#endif // !FEATURE_FIXED_OUT_ARGS
}
noway_assert(excpRaisingBlock != nullptr);
// Jump to the exception-throwing block on error.
inst_JMP(jumpKind, excpRaisingBlock);
}
else
{
// The code to throw the exception will be generated inline, and
// we will jump around it in the normal non-exception case.
BasicBlock* tgtBlk = nullptr;
emitJumpKind reverseJumpKind = emitter::emitReverseJumpKind(jumpKind);
if (reverseJumpKind != jumpKind)
{
tgtBlk = genCreateTempLabel();
inst_JMP(reverseJumpKind, tgtBlk);
}
genEmitHelperCall(compiler->acdHelper(codeKind), 0, EA_UNKNOWN);
// Define the spot for the normal non-exception case to jump to.
if (tgtBlk != nullptr)
{
assert(reverseJumpKind != jumpKind);
genDefineTempLabel(tgtBlk);
}
}
}
/*****************************************************************************
*
* The last operation done was generating code for "tree" and that would
* have set the flags. Check if the operation caused an overflow.
*/
// inline
void CodeGen::genCheckOverflow(GenTree* tree)
{
// Overflow-check should be asked for this tree
noway_assert(tree->gtOverflow());
const var_types type = tree->TypeGet();
// Overflow checks can only occur for the non-small types: (i.e. TYP_INT,TYP_LONG)
noway_assert(!varTypeIsSmall(type));
emitJumpKind jumpKind;
#ifdef TARGET_ARM64
if (tree->OperGet() == GT_MUL)
{
jumpKind = EJ_ne;
}
else
#endif
{
bool isUnsignedOverflow = ((tree->gtFlags & GTF_UNSIGNED) != 0);
#if defined(TARGET_XARCH)
jumpKind = isUnsignedOverflow ? EJ_jb : EJ_jo;
#elif defined(TARGET_ARMARCH)
jumpKind = isUnsignedOverflow ? EJ_lo : EJ_vs;
if (jumpKind == EJ_lo)
{
if (tree->OperGet() != GT_SUB)
{
jumpKind = EJ_hs;
}
}
#endif // defined(TARGET_ARMARCH)
}
// Jump to the block which will throw the expection
genJumpToThrowHlpBlk(jumpKind, SCK_OVERFLOW);
}
#if defined(FEATURE_EH_FUNCLETS)
/*****************************************************************************
*
* Update the current funclet as needed by calling genUpdateCurrentFunclet().
* For non-BBF_FUNCLET_BEG blocks, it asserts that the current funclet
* is up-to-date.
*
*/
void CodeGen::genUpdateCurrentFunclet(BasicBlock* block)
{
if (block->bbFlags & BBF_FUNCLET_BEG)
{
compiler->funSetCurrentFunc(compiler->funGetFuncIdx(block));
if (compiler->funCurrentFunc()->funKind == FUNC_FILTER)
{
assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->ebdFilter == block);
}
else
{
// We shouldn't see FUNC_ROOT
assert(compiler->funCurrentFunc()->funKind == FUNC_HANDLER);
assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->ebdHndBeg == block);
}
}
else
{
assert(compiler->compCurrFuncIdx <= compiler->compFuncInfoCount);
if (compiler->funCurrentFunc()->funKind == FUNC_FILTER)
{
assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->InFilterRegionBBRange(block));
}
else if (compiler->funCurrentFunc()->funKind == FUNC_ROOT)
{
assert(!block->hasHndIndex());
}
else
{
assert(compiler->funCurrentFunc()->funKind == FUNC_HANDLER);
assert(compiler->ehGetDsc(compiler->funCurrentFunc()->funEHIndex)->InHndRegionBBRange(block));
}
}
}
#endif // FEATURE_EH_FUNCLETS
//----------------------------------------------------------------------
// genGenerateCode: Generate code for the function.
//
// Arguments:
// codePtr [OUT] - address of generated code
// nativeSizeOfCode [OUT] - length of generated code in bytes
//
void CodeGen::genGenerateCode(void** codePtr, uint32_t* nativeSizeOfCode)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genGenerateCode()\n");
compiler->fgDispBasicBlocks(compiler->verboseTrees);
}
#endif
this->codePtr = codePtr;
this->nativeSizeOfCode = nativeSizeOfCode;
DoPhase(this, PHASE_GENERATE_CODE, &CodeGen::genGenerateMachineCode);
DoPhase(this, PHASE_EMIT_CODE, &CodeGen::genEmitMachineCode);
DoPhase(this, PHASE_EMIT_GCEH, &CodeGen::genEmitUnwindDebugGCandEH);
}
//----------------------------------------------------------------------
// genGenerateMachineCode -- determine which machine instructions to emit
//
void CodeGen::genGenerateMachineCode()
{
#ifdef DEBUG
genInterruptibleUsed = true;
compiler->fgDebugCheckBBlist();
#endif // DEBUG
/* This is the real thing */
genPrepForCompiler();
/* Prepare the emitter */
GetEmitter()->Init();
#ifdef DEBUG
VarSetOps::AssignNoCopy(compiler, genTempOldLife, VarSetOps::MakeEmpty(compiler));
#endif
#ifdef DEBUG
if (compiler->opts.disAsmSpilled && regSet.rsNeededSpillReg)
{
compiler->opts.disAsm = true;
}
if (compiler->opts.disAsm)
{
printf("; Assembly listing for method %s\n", compiler->info.compFullName);
printf("; Emitting ");
if (compiler->compCodeOpt() == Compiler::SMALL_CODE)
{
printf("SMALL_CODE");
}
else if (compiler->compCodeOpt() == Compiler::FAST_CODE)
{
printf("FAST_CODE");
}
else
{
printf("BLENDED_CODE");
}
printf(" for ");
if (compiler->info.genCPU == CPU_X86)
{
printf("generic X86 CPU");
}
else if (compiler->info.genCPU == CPU_X86_PENTIUM_4)
{
printf("Pentium 4");
}
else if (compiler->info.genCPU == CPU_X64)
{
if (compiler->canUseVexEncoding())
{
printf("X64 CPU with AVX");
}
else
{
printf("X64 CPU with SSE2");
}
}
else if (compiler->info.genCPU == CPU_ARM)
{
printf("generic ARM CPU");
}
else if (compiler->info.genCPU == CPU_ARM64)
{
printf("generic ARM64 CPU");
}
else
{
printf("unknown architecture");
}
if (TargetOS::IsWindows)
{
printf(" - Windows");
}
else if (TargetOS::IsMacOS)
{
printf(" - MacOS");
}
else if (TargetOS::IsUnix)
{
printf(" - Unix");
}
printf("\n");
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0))
{
printf("; Tier-0 compilation\n");
}
else if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1))
{
printf("; Tier-1 compilation\n");
}
else if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN))
{
printf("; ReadyToRun compilation\n");
}
if (compiler->opts.IsOSR())
{
printf("; OSR variant for entry point 0x%x\n", compiler->info.compILEntry);
}
if ((compiler->opts.compFlags & CLFLG_MAXOPT) == CLFLG_MAXOPT)
{
printf("; optimized code\n");
}
else if (compiler->opts.compDbgCode)
{
printf("; debuggable code\n");
}
else if (compiler->opts.MinOpts())
{
printf("; MinOpts code\n");
}
else
{
printf("; unknown optimization flags\n");
}
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR))
{
printf("; instrumented for collecting profile data\n");
}
else if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && compiler->fgHaveProfileData())
{
printf("; optimized using profile data\n");
}
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
printf("; double-aligned frame\n");
else
#endif
printf("; %s based frame\n", isFramePointerUsed() ? STR_FPBASE : STR_SPBASE);
if (GetInterruptible())
{
printf("; fully interruptible\n");
}
else
{
printf("; partially interruptible\n");
}
if (compiler->fgHaveProfileData())
{
printf("; with PGO: edge weights are %s, and fgCalledCount is " FMT_WT "\n",
compiler->fgHaveValidEdgeWeights ? "valid" : "invalid", compiler->fgCalledCount);
}
if (compiler->fgPgoFailReason != nullptr)
{
printf("; %s\n", compiler->fgPgoFailReason);
}
if ((compiler->fgPgoInlineePgo + compiler->fgPgoInlineeNoPgo + compiler->fgPgoInlineeNoPgoSingleBlock) > 0)
{
printf("; %u inlinees with PGO data; %u single block inlinees; %u inlinees without PGO data\n",
compiler->fgPgoInlineePgo, compiler->fgPgoInlineeNoPgoSingleBlock, compiler->fgPgoInlineeNoPgo);
}
if (compiler->opts.IsCFGEnabled())
{
printf("; control-flow guard enabled\n");
}
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT))
{
printf("; invoked as altjit\n");
}
}
#endif // DEBUG
// We compute the final frame layout before code generation. This is because LSRA
// has already computed exactly the maximum concurrent number of spill temps of each type that are
// required during code generation. So, there is nothing left to estimate: we can be precise in the frame
// layout. This helps us generate smaller code, and allocate, after code generation, a smaller amount of
// memory from the VM.
genFinalizeFrame();
unsigned maxTmpSize = regSet.tmpGetTotalSize(); // This is precise after LSRA has pre-allocated the temps.
GetEmitter()->emitBegFN(isFramePointerUsed()
#if defined(DEBUG)
,
(compiler->compCodeOpt() != Compiler::SMALL_CODE) &&
!compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)
#endif
,
maxTmpSize);
/* Now generate code for the function */
genCodeForBBlist();
#ifdef DEBUG
// After code generation, dump the frame layout again. It should be the same as before code generation, if code
// generation hasn't touched it (it shouldn't!).
if (verbose)
{
compiler->lvaTableDump();
}
#endif // DEBUG
/* We can now generate the function prolog and epilog */
genGeneratePrologsAndEpilogs();
/* Bind jump distances */
GetEmitter()->emitJumpDistBind();
#if FEATURE_LOOP_ALIGN
/* Perform alignment adjustments */
GetEmitter()->emitLoopAlignAdjustments();
#endif
/* The code is now complete and final; it should not change after this. */
}
//----------------------------------------------------------------------
// genEmitMachineCode -- emit the actual machine instruction code
//
void CodeGen::genEmitMachineCode()
{
/* Compute the size of the code sections that we are going to ask the VM
to allocate. Note that this might not be precisely the size of the
code we emit, though it's fatal if we emit more code than the size we
compute here.
(Note: an example of a case where we emit less code would be useful.)
*/
GetEmitter()->emitComputeCodeSizes();
#ifdef DEBUG
unsigned instrCount;
// Code to test or stress our ability to run a fallback compile.
// We trigger the fallback here, before asking the VM for any memory,
// because if not, we will leak mem, as the current codebase can't free
// the mem after the emitter asks the VM for it. As this is only a stress
// mode, we only want the functionality, and don't care about the relative
// ugliness of having the failure here.
if (!compiler->jitFallbackCompile)
{
// Use COMPlus_JitNoForceFallback=1 to prevent NOWAY assert testing from happening,
// especially that caused by enabling JIT stress.
if (!JitConfig.JitNoForceFallback())
{
if (JitConfig.JitForceFallback() || compiler->compStressCompile(Compiler::STRESS_GENERIC_VARN, 5))
{
JITDUMP("\n\n*** forcing no-way fallback -- current jit request will be abandoned ***\n\n");
NO_WAY_NOASSERT("Stress failure");
}
}
}
#endif // DEBUG
/* We've finished collecting all the unwind information for the function. Now reserve
space for it from the VM.
*/
compiler->unwindReserve();
bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ?
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
trackedStackPtrsContig = false;
#elif defined(TARGET_ARM)
// On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous
trackedStackPtrsContig = !compiler->opts.compDbgEnC && !compiler->compIsProfilerHookNeeded();
#else
trackedStackPtrsContig = !compiler->opts.compDbgEnC;
#endif
codeSize = GetEmitter()->emitEndCodeGen(compiler, trackedStackPtrsContig, GetInterruptible(),
IsFullPtrRegMapRequired(), compiler->compHndBBtabCount, &prologSize,
&epilogSize, codePtr, &coldCodePtr, &consPtr DEBUGARG(&instrCount));
#ifdef DEBUG
assert(compiler->compCodeGenDone == false);
/* We're done generating code for this function */
compiler->compCodeGenDone = true;
#endif
#if defined(DEBUG) || defined(LATE_DISASM)
// Add code size information into the Perf Score
// All compPerfScore calculations must be performed using doubles
compiler->info.compPerfScore += ((double)compiler->info.compTotalHotCodeSize * (double)PERFSCORE_CODESIZE_COST_HOT);
compiler->info.compPerfScore +=
((double)compiler->info.compTotalColdCodeSize * (double)PERFSCORE_CODESIZE_COST_COLD);
#endif // DEBUG || LATE_DISASM
#ifdef DEBUG
if (compiler->opts.disAsm || verbose)
{
printf("\n; Total bytes of code %d, prolog size %d, PerfScore %.2f, instruction count %d, allocated bytes for "
"code %d",
codeSize, prologSize, compiler->info.compPerfScore, instrCount,
GetEmitter()->emitTotalHotCodeSize + GetEmitter()->emitTotalColdCodeSize);
#if TRACK_LSRA_STATS
if (JitConfig.DisplayLsraStats() == 3)
{
compiler->m_pLinearScan->dumpLsraStatsSummary(jitstdout);
}
#endif // TRACK_LSRA_STATS
printf(" (MethodHash=%08x) for method %s\n", compiler->info.compMethodHash(), compiler->info.compFullName);
printf("; ============================================================\n\n");
printf(""); // in our logic this causes a flush
}
if (verbose)
{
printf("*************** After end code gen, before unwindEmit()\n");
GetEmitter()->emitDispIGlist(true);
}
#endif
#if EMIT_TRACK_STACK_DEPTH && defined(DEBUG_ARG_SLOTS)
// Check our max stack level. Needed for fgAddCodeRef().
// We need to relax the assert as our estimation won't include code-gen
// stack changes (which we know don't affect fgAddCodeRef()).
// NOTE: after emitEndCodeGen (including here), emitMaxStackDepth is a
// count of DWORD-sized arguments, NOT argument size in bytes.
{
unsigned maxAllowedStackDepth = compiler->fgGetPtrArgCntMax() + // Max number of pointer-sized stack arguments.
compiler->compHndBBtabCount + // Return address for locally-called finallys
genTypeStSz(TYP_LONG) + // longs/doubles may be transferred via stack, etc
(compiler->compTailCallUsed ? 4 : 0); // CORINFO_HELP_TAILCALL args
#if defined(UNIX_X86_ABI)
// Convert maxNestedAlignment to DWORD count before adding to maxAllowedStackDepth.
assert(maxNestedAlignment % sizeof(int) == 0);
maxAllowedStackDepth += maxNestedAlignment / sizeof(int);
#endif
assert(GetEmitter()->emitMaxStackDepth <= maxAllowedStackDepth);
}
#endif // EMIT_TRACK_STACK_DEPTH && DEBUG
*nativeSizeOfCode = codeSize;
compiler->info.compNativeCodeSize = (UNATIVE_OFFSET)codeSize;
// printf("%6u bytes of code generated for %s.%s\n", codeSize, compiler->info.compFullName);
// Make sure that the x86 alignment and cache prefetch optimization rules
// were obeyed.
// Don't start a method in the last 7 bytes of a 16-byte alignment area
// unless we are generating SMALL_CODE
// noway_assert( (((unsigned)(*codePtr) % 16) <= 8) || (compiler->compCodeOpt() == SMALL_CODE));
}
//----------------------------------------------------------------------
// genEmitUnwindDebugGCandEH: emit unwind, debug, gc, and EH info
//
void CodeGen::genEmitUnwindDebugGCandEH()
{
/* Now that the code is issued, we can finalize and emit the unwind data */
compiler->unwindEmit(*codePtr, coldCodePtr);
/* Finalize the line # tracking logic after we know the exact block sizes/offsets */
genIPmappingGen();
INDEBUG(genDumpPreciseDebugInfo());
/* Finalize the Local Var info in terms of generated code */
genSetScopeInfo();
#if defined(USING_VARIABLE_LIVE_RANGE) && defined(DEBUG)
if (compiler->verbose)
{
varLiveKeeper->dumpLvaVariableLiveRanges();
}
#endif // defined(USING_VARIABLE_LIVE_RANGE) && defined(DEBUG)
#ifdef LATE_DISASM
unsigned finalHotCodeSize;
unsigned finalColdCodeSize;
if (compiler->fgFirstColdBlock != nullptr)
{
// We did some hot/cold splitting. The hot section is always padded out to the
// size we thought it would be, but the cold section is not.
assert(codeSize <= compiler->info.compTotalHotCodeSize + compiler->info.compTotalColdCodeSize);
assert(compiler->info.compTotalHotCodeSize > 0);
assert(compiler->info.compTotalColdCodeSize > 0);
finalHotCodeSize = compiler->info.compTotalHotCodeSize;
finalColdCodeSize = codeSize - finalHotCodeSize;
}
else
{
// No hot/cold splitting
assert(codeSize <= compiler->info.compTotalHotCodeSize);
assert(compiler->info.compTotalHotCodeSize > 0);
assert(compiler->info.compTotalColdCodeSize == 0);
finalHotCodeSize = codeSize;
finalColdCodeSize = 0;
}
getDisAssembler().disAsmCode((BYTE*)*codePtr, finalHotCodeSize, (BYTE*)coldCodePtr, finalColdCodeSize);
#endif // LATE_DISASM
/* Report any exception handlers to the VM */
genReportEH();
#ifdef JIT32_GCENCODER
#ifdef DEBUG
void* infoPtr =
#endif // DEBUG
#endif
// Create and store the GC info for this method.
genCreateAndStoreGCInfo(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
#ifdef DEBUG
FILE* dmpf = jitstdout;
compiler->opts.dmpHex = false;
if (!strcmp(compiler->info.compMethodName, "<name of method you want the hex dump for"))
{
FILE* codf;
errno_t ec = fopen_s(&codf, "C:\\JIT.COD", "at"); // NOTE: file append mode
if (ec != 0)
{
assert(codf);
dmpf = codf;
compiler->opts.dmpHex = true;
}
}
if (compiler->opts.dmpHex)
{
size_t consSize = GetEmitter()->emitDataSize();
fprintf(dmpf, "Generated code for %s:\n", compiler->info.compFullName);
fprintf(dmpf, "\n");
if (codeSize)
{
fprintf(dmpf, " Code at %p [%04X bytes]\n", dspPtr(*codePtr), codeSize);
}
if (consSize)
{
fprintf(dmpf, " Const at %p [%04X bytes]\n", dspPtr(consPtr), consSize);
}
#ifdef JIT32_GCENCODER
size_t infoSize = compiler->compInfoBlkSize;
if (infoSize)
fprintf(dmpf, " Info at %p [%04X bytes]\n", dspPtr(infoPtr), infoSize);
#endif // JIT32_GCENCODER
fprintf(dmpf, "\n");
if (codeSize)
{
hexDump(dmpf, "Code", (BYTE*)*codePtr, codeSize);
}
if (consSize)
{
hexDump(dmpf, "Const", (BYTE*)consPtr, consSize);
}
#ifdef JIT32_GCENCODER
if (infoSize)
hexDump(dmpf, "Info", (BYTE*)infoPtr, infoSize);
#endif // JIT32_GCENCODER
fflush(dmpf);
}
if (dmpf != jitstdout)
{
fclose(dmpf);
}
#endif // DEBUG
/* Tell the emitter that we're done with this function */
GetEmitter()->emitEndFN();
/* Shut down the spill logic */
regSet.rsSpillDone();
/* Shut down the temp logic */
regSet.tmpDone();
#if DISPLAY_SIZES
size_t dataSize = GetEmitter()->emitDataSize();
grossVMsize += compiler->info.compILCodeSize;
totalNCsize += codeSize + dataSize + compiler->compInfoBlkSize;
grossNCsize += codeSize + dataSize;
#endif // DISPLAY_SIZES
}
/*****************************************************************************
*
* Report EH clauses to the VM
*/
void CodeGen::genReportEH()
{
if (compiler->compHndBBtabCount == 0)
{
return;
}
#ifdef DEBUG
if (compiler->opts.dspEHTable)
{
printf("*************** EH table for %s\n", compiler->info.compFullName);
}
#endif // DEBUG
unsigned XTnum;
bool isCoreRTABI = compiler->IsTargetAbi(CORINFO_CORERT_ABI);
unsigned EHCount = compiler->compHndBBtabCount;
#if defined(FEATURE_EH_FUNCLETS)
// Count duplicated clauses. This uses the same logic as below, where we actually generate them for reporting to the
// VM.
unsigned duplicateClauseCount = 0;
unsigned enclosingTryIndex;
// Duplicate clauses are not used by CoreRT ABI
if (!isCoreRTABI)
{
for (XTnum = 0; XTnum < compiler->compHndBBtabCount; XTnum++)
{
for (enclosingTryIndex = compiler->ehTrueEnclosingTryIndexIL(XTnum); // find the true enclosing try index,
// ignoring 'mutual protect' trys
enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX;
enclosingTryIndex = compiler->ehGetEnclosingTryIndex(enclosingTryIndex))
{
++duplicateClauseCount;
}
}
EHCount += duplicateClauseCount;
}
#if FEATURE_EH_CALLFINALLY_THUNKS
unsigned clonedFinallyCount = 0;
// Duplicate clauses are not used by CoreRT ABI
if (!isCoreRTABI)
{
// We don't keep track of how many cloned finally there are. So, go through and count.
// We do a quick pass first through the EH table to see if there are any try/finally
// clauses. If there aren't, we don't need to look for BBJ_CALLFINALLY.
bool anyFinallys = false;
for (EHblkDsc* const HBtab : EHClauses(compiler))
{
if (HBtab->HasFinallyHandler())
{
anyFinallys = true;
break;
}
}
if (anyFinallys)
{
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
++clonedFinallyCount;
}
}
EHCount += clonedFinallyCount;
}
}
#endif // FEATURE_EH_CALLFINALLY_THUNKS
#endif // FEATURE_EH_FUNCLETS
#ifdef DEBUG
if (compiler->opts.dspEHTable)
{
#if defined(FEATURE_EH_FUNCLETS)
#if FEATURE_EH_CALLFINALLY_THUNKS
printf("%d EH table entries, %d duplicate clauses, %d cloned finallys, %d total EH entries reported to VM\n",
compiler->compHndBBtabCount, duplicateClauseCount, clonedFinallyCount, EHCount);
assert(compiler->compHndBBtabCount + duplicateClauseCount + clonedFinallyCount == EHCount);
#else // !FEATURE_EH_CALLFINALLY_THUNKS
printf("%d EH table entries, %d duplicate clauses, %d total EH entries reported to VM\n",
compiler->compHndBBtabCount, duplicateClauseCount, EHCount);
assert(compiler->compHndBBtabCount + duplicateClauseCount == EHCount);
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
#else // !FEATURE_EH_FUNCLETS
printf("%d EH table entries, %d total EH entries reported to VM\n", compiler->compHndBBtabCount, EHCount);
assert(compiler->compHndBBtabCount == EHCount);
#endif // !FEATURE_EH_FUNCLETS
}
#endif // DEBUG
// Tell the VM how many EH clauses to expect.
compiler->eeSetEHcount(EHCount);
XTnum = 0; // This is the index we pass to the VM
for (EHblkDsc* const HBtab : EHClauses(compiler))
{
UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
tryBeg = compiler->ehCodeOffset(HBtab->ebdTryBeg);
hndBeg = compiler->ehCodeOffset(HBtab->ebdHndBeg);
tryEnd = (HBtab->ebdTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(HBtab->ebdTryLast->bbNext);
hndEnd = (HBtab->ebdHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(HBtab->ebdHndLast->bbNext);
if (HBtab->HasFilter())
{
hndTyp = compiler->ehCodeOffset(HBtab->ebdFilter);
}
else
{
hndTyp = HBtab->ebdTyp;
}
CORINFO_EH_CLAUSE_FLAGS flags = ToCORINFO_EH_CLAUSE_FLAGS(HBtab->ebdHandlerType);
if (isCoreRTABI && (XTnum > 0))
{
// For CoreRT, CORINFO_EH_CLAUSE_SAMETRY flag means that the current clause covers same
// try block as the previous one. The runtime cannot reliably infer this information from
// native code offsets because of different try blocks can have same offsets. Alternative
// solution to this problem would be inserting extra nops to ensure that different try
// blocks have different offsets.
if (EHblkDsc::ebdIsSameTry(HBtab, HBtab - 1))
{
// The SAMETRY bit should only be set on catch clauses. This is ensured in IL, where only 'catch' is
// allowed to be mutually-protect. E.g., the C# "try {} catch {} catch {} finally {}" actually exists in
// IL as "try { try {} catch {} catch {} } finally {}".
assert(HBtab->HasCatchHandler());
flags = (CORINFO_EH_CLAUSE_FLAGS)(flags | CORINFO_EH_CLAUSE_SAMETRY);
}
}
// Note that we reuse the CORINFO_EH_CLAUSE type, even though the names of
// the fields aren't accurate.
CORINFO_EH_CLAUSE clause;
clause.ClassToken = hndTyp; /* filter offset is passed back here for filter-based exception handlers */
clause.Flags = flags;
clause.TryOffset = tryBeg;
clause.TryLength = tryEnd;
clause.HandlerOffset = hndBeg;
clause.HandlerLength = hndEnd;
assert(XTnum < EHCount);
// Tell the VM about this EH clause.
compiler->eeSetEHinfo(XTnum, &clause);
++XTnum;
}
#if defined(FEATURE_EH_FUNCLETS)
// Now output duplicated clauses.
//
// If a funclet has been created by moving a handler out of a try region that it was originally nested
// within, then we need to report a "duplicate" clause representing the fact that an exception in that
// handler can be caught by the 'try' it has been moved out of. This is because the original 'try' region
// descriptor can only specify a single, contiguous protected range, but the funclet we've moved out is
// no longer contiguous with the original 'try' region. The new EH descriptor will have the same handler
// region as the enclosing try region's handler region. This is the sense in which it is duplicated:
// there is now a "duplicate" clause with the same handler region as another, but a different 'try'
// region.
//
// For example, consider this (capital letters represent an unknown code sequence, numbers identify a
// try or handler region):
//
// A
// try (1) {
// B
// try (2) {
// C
// } catch (3) {
// D
// } catch (4) {
// E
// }
// F
// } catch (5) {
// G
// }
// H
//
// Here, we have try region (1) BCDEF protected by catch (5) G, and region (2) C protected
// by catch (3) D and catch (4) E. Note that catch (4) E does *NOT* protect the code "D".
// This is an example of 'mutually protect' regions. First, we move handlers (3) and (4)
// to the end of the code. However, (3) and (4) are nested inside, and protected by, try (1). Again
// note that (3) is not nested inside (4), despite ebdEnclosingTryIndex indicating that.
// The code "D" and "E" won't be contiguous with the protected region for try (1) (which
// will, after moving catch (3) AND (4), be BCF). Thus, we need to add a new EH descriptor
// representing try (1) protecting the new funclets catch (3) and (4).
// The code will be generated as follows:
//
// ABCFH // "main" code
// D // funclet
// E // funclet
// G // funclet
//
// The EH regions are:
//
// C -> D
// C -> E
// BCF -> G
// D -> G // "duplicate" clause
// E -> G // "duplicate" clause
//
// Note that we actually need to generate one of these additional "duplicate" clauses for every
// region the funclet is nested in. Take this example:
//
// A
// try (1) {
// B
// try (2,3) {
// C
// try (4) {
// D
// try (5,6) {
// E
// } catch {
// F
// } catch {
// G
// }
// H
// } catch {
// I
// }
// J
// } catch {
// K
// } catch {
// L
// }
// M
// } catch {
// N
// }
// O
//
// When we pull out funclets, we get the following generated code:
//
// ABCDEHJMO // "main" function
// F // funclet
// G // funclet
// I // funclet
// K // funclet
// L // funclet
// N // funclet
//
// And the EH regions we report to the VM are (in order; main clauses
// first in most-to-least nested order, funclets ("duplicated clauses")
// last, in most-to-least nested) are:
//
// E -> F
// E -> G
// DEH -> I
// CDEHJ -> K
// CDEHJ -> L
// BCDEHJM -> N
// F -> I // funclet clause #1 for F
// F -> K // funclet clause #2 for F
// F -> L // funclet clause #3 for F
// F -> N // funclet clause #4 for F
// G -> I // funclet clause #1 for G
// G -> K // funclet clause #2 for G
// G -> L // funclet clause #3 for G
// G -> N // funclet clause #4 for G
// I -> K // funclet clause #1 for I
// I -> L // funclet clause #2 for I
// I -> N // funclet clause #3 for I
// K -> N // funclet clause #1 for K
// L -> N // funclet clause #1 for L
//
// So whereas the IL had 6 EH clauses, we need to report 19 EH clauses to the VM.
// Note that due to the nature of 'mutually protect' clauses, it would be incorrect
// to add a clause "F -> G" because F is NOT protected by G, but we still have
// both "F -> K" and "F -> L" because F IS protected by both of those handlers.
//
// The overall ordering of the clauses is still the same most-to-least nesting
// after front-to-back start offset. Because we place the funclets at the end
// these new clauses should also go at the end by this ordering.
//
if (duplicateClauseCount > 0)
{
unsigned reportedDuplicateClauseCount = 0; // How many duplicated clauses have we reported?
unsigned XTnum2;
EHblkDsc* HBtab;
for (XTnum2 = 0, HBtab = compiler->compHndBBtab; XTnum2 < compiler->compHndBBtabCount; XTnum2++, HBtab++)
{
unsigned enclosingTryIndex;
EHblkDsc* fletTab = compiler->ehGetDsc(XTnum2);
for (enclosingTryIndex = compiler->ehTrueEnclosingTryIndexIL(XTnum2); // find the true enclosing try index,
// ignoring 'mutual protect' trys
enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX;
enclosingTryIndex = compiler->ehGetEnclosingTryIndex(enclosingTryIndex))
{
// The funclet we moved out is nested in a try region, so create a new EH descriptor for the funclet
// that will have the enclosing try protecting the funclet.
noway_assert(XTnum2 < enclosingTryIndex); // the enclosing region must be less nested, and hence have a
// greater EH table index
EHblkDsc* encTab = compiler->ehGetDsc(enclosingTryIndex);
// The try region is the handler of the funclet. Note that for filters, we don't protect the
// filter region, only the filter handler region. This is because exceptions in filters never
// escape; the VM swallows them.
BasicBlock* bbTryBeg = fletTab->ebdHndBeg;
BasicBlock* bbTryLast = fletTab->ebdHndLast;
BasicBlock* bbHndBeg = encTab->ebdHndBeg; // The handler region is the same as the enclosing try
BasicBlock* bbHndLast = encTab->ebdHndLast;
UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
tryBeg = compiler->ehCodeOffset(bbTryBeg);
hndBeg = compiler->ehCodeOffset(bbHndBeg);
tryEnd = (bbTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(bbTryLast->bbNext);
hndEnd = (bbHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
: compiler->ehCodeOffset(bbHndLast->bbNext);
if (encTab->HasFilter())
{
hndTyp = compiler->ehCodeOffset(encTab->ebdFilter);
}
else
{
hndTyp = encTab->ebdTyp;
}
CORINFO_EH_CLAUSE_FLAGS flags = ToCORINFO_EH_CLAUSE_FLAGS(encTab->ebdHandlerType);
// Tell the VM this is an extra clause caused by moving funclets out of line.
flags = (CORINFO_EH_CLAUSE_FLAGS)(flags | CORINFO_EH_CLAUSE_DUPLICATE);
// Note that the JIT-EE interface reuses the CORINFO_EH_CLAUSE type, even though the names of
// the fields aren't really accurate. For example, we set "TryLength" to the offset of the
// instruction immediately after the 'try' body. So, it really could be more accurately named
// "TryEndOffset".
CORINFO_EH_CLAUSE clause;
clause.ClassToken = hndTyp; /* filter offset is passed back here for filter-based exception handlers */
clause.Flags = flags;
clause.TryOffset = tryBeg;
clause.TryLength = tryEnd;
clause.HandlerOffset = hndBeg;
clause.HandlerLength = hndEnd;
assert(XTnum < EHCount);
// Tell the VM about this EH clause (a duplicated clause).
compiler->eeSetEHinfo(XTnum, &clause);
++XTnum;
++reportedDuplicateClauseCount;
#ifndef DEBUG
if (duplicateClauseCount == reportedDuplicateClauseCount)
{
break; // we've reported all of them; no need to continue looking
}
#endif // !DEBUG
} // for each 'true' enclosing 'try'
} // for each EH table entry
assert(duplicateClauseCount == reportedDuplicateClauseCount);
} // if (duplicateClauseCount > 0)
#if FEATURE_EH_CALLFINALLY_THUNKS
if (clonedFinallyCount > 0)
{
unsigned reportedClonedFinallyCount = 0;
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
UNATIVE_OFFSET hndBeg, hndEnd;
hndBeg = compiler->ehCodeOffset(block);
// How big is it? The BBJ_ALWAYS has a null bbEmitCookie! Look for the block after, which must be
// a label or jump target, since the BBJ_CALLFINALLY doesn't fall through.
BasicBlock* bbLabel = block->bbNext;
if (block->isBBCallAlwaysPair())
{
bbLabel = bbLabel->bbNext; // skip the BBJ_ALWAYS
}
if (bbLabel == nullptr)
{
hndEnd = compiler->info.compNativeCodeSize;
}
else
{
assert(bbLabel->bbEmitCookie != nullptr);
hndEnd = compiler->ehCodeOffset(bbLabel);
}
CORINFO_EH_CLAUSE clause;
clause.ClassToken = 0; // unused
clause.Flags = (CORINFO_EH_CLAUSE_FLAGS)(CORINFO_EH_CLAUSE_FINALLY | CORINFO_EH_CLAUSE_DUPLICATE);
clause.TryOffset = hndBeg;
clause.TryLength = hndBeg;
clause.HandlerOffset = hndBeg;
clause.HandlerLength = hndEnd;
assert(XTnum < EHCount);
// Tell the VM about this EH clause (a cloned finally clause).
compiler->eeSetEHinfo(XTnum, &clause);
++XTnum;
++reportedClonedFinallyCount;
#ifndef DEBUG
if (clonedFinallyCount == reportedClonedFinallyCount)
{
break; // we're done; no need to keep looking
}
#endif // !DEBUG
} // block is BBJ_CALLFINALLY
} // for each block
assert(clonedFinallyCount == reportedClonedFinallyCount);
} // if (clonedFinallyCount > 0)
#endif // FEATURE_EH_CALLFINALLY_THUNKS
#endif // FEATURE_EH_FUNCLETS
assert(XTnum == EHCount);
}
//----------------------------------------------------------------------
// genUseOptimizedWriteBarriers: Determine if an optimized write barrier
// helper should be used.
//
// Arguments:
// wbf - The WriteBarrierForm of the write (GT_STOREIND) that is happening.
//
// Return Value:
// true if an optimized write barrier helper should be used, false otherwise.
// Note: only x86 implements register-specific source optimized write
// barriers currently.
//
bool CodeGenInterface::genUseOptimizedWriteBarriers(GCInfo::WriteBarrierForm wbf)
{
#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS
#ifdef DEBUG
return (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
#else
return true;
#endif
#else
return false;
#endif
}
//----------------------------------------------------------------------
// genUseOptimizedWriteBarriers: Determine if an optimized write barrier
// helper should be used.
//
// This has the same functionality as the version of
// genUseOptimizedWriteBarriers that takes a WriteBarrierForm, but avoids
// determining what the required write barrier form is, if possible.
//
// Arguments:
// tgt - target tree of write (e.g., GT_STOREIND)
// assignVal - tree with value to write
//
// Return Value:
// true if an optimized write barrier helper should be used, false otherwise.
// Note: only x86 implements register-specific source optimized write
// barriers currently.
//
bool CodeGenInterface::genUseOptimizedWriteBarriers(GenTree* tgt, GenTree* assignVal)
{
#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS
#ifdef DEBUG
GCInfo::WriteBarrierForm wbf = compiler->codeGen->gcInfo.gcIsWriteBarrierCandidate(tgt, assignVal);
return (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
#else
return true;
#endif
#else
return false;
#endif
}
//----------------------------------------------------------------------
// genWriteBarrierHelperForWriteBarrierForm: Given a write node requiring a write
// barrier, and the write barrier form required, determine the helper to call.
//
// Arguments:
// tgt - target tree of write (e.g., GT_STOREIND)
// wbf - already computed write barrier form to use
//
// Return Value:
// Write barrier helper to use.
//
// Note: do not call this function to get an optimized write barrier helper (e.g.,
// for x86).
//
CorInfoHelpFunc CodeGenInterface::genWriteBarrierHelperForWriteBarrierForm(GenTree* tgt, GCInfo::WriteBarrierForm wbf)
{
noway_assert(tgt->gtOper == GT_STOREIND);
CorInfoHelpFunc helper = CORINFO_HELP_ASSIGN_REF;
#ifdef DEBUG
if (wbf == GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)
{
helper = CORINFO_HELP_ASSIGN_REF_ENSURE_NONHEAP;
}
else
#endif
if (tgt->gtOper != GT_CLS_VAR)
{
if (wbf != GCInfo::WBF_BarrierUnchecked) // This overrides the tests below.
{
if (tgt->gtFlags & GTF_IND_TGTANYWHERE)
{
helper = CORINFO_HELP_CHECKED_ASSIGN_REF;
}
else if (tgt->AsOp()->gtOp1->TypeGet() == TYP_I_IMPL)
{
helper = CORINFO_HELP_CHECKED_ASSIGN_REF;
}
}
}
assert(((helper == CORINFO_HELP_ASSIGN_REF_ENSURE_NONHEAP) && (wbf == GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)) ||
((helper == CORINFO_HELP_CHECKED_ASSIGN_REF) &&
(wbf == GCInfo::WBF_BarrierChecked || wbf == GCInfo::WBF_BarrierUnknown)) ||
((helper == CORINFO_HELP_ASSIGN_REF) &&
(wbf == GCInfo::WBF_BarrierUnchecked || wbf == GCInfo::WBF_BarrierUnknown)));
return helper;
}
//----------------------------------------------------------------------
// genGCWriteBarrier: Generate a write barrier for a node.
//
// Arguments:
// tgt - target tree of write (e.g., GT_STOREIND)
// wbf - already computed write barrier form to use
//
void CodeGen::genGCWriteBarrier(GenTree* tgt, GCInfo::WriteBarrierForm wbf)
{
CorInfoHelpFunc helper = genWriteBarrierHelperForWriteBarrierForm(tgt, wbf);
#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
// We classify the "tgt" trees as follows:
// If "tgt" is of the form (where [ x ] indicates an optional x, and { x1, ..., xn } means "one of the x_i forms"):
// IND [-> ADDR -> IND] -> { GT_LCL_VAR, ADD({GT_LCL_VAR}, X), ADD(X, (GT_LCL_VAR)) }
// then let "v" be the GT_LCL_VAR.
// * If "v" is the return buffer argument, classify as CWBKind_RetBuf.
// * If "v" is another by-ref argument, classify as CWBKind_ByRefArg.
// * Otherwise, classify as CWBKind_OtherByRefLocal.
// If "tgt" is of the form IND -> ADDR -> GT_LCL_VAR, clasify as CWBKind_AddrOfLocal.
// Otherwise, classify as CWBKind_Unclassified.
CheckedWriteBarrierKinds wbKind = CWBKind_Unclassified;
if (tgt->gtOper == GT_IND)
{
GenTree* lcl = NULL;
GenTree* indArg = tgt->AsOp()->gtOp1;
if (indArg->gtOper == GT_ADDR && indArg->AsOp()->gtOp1->gtOper == GT_IND)
{
indArg = indArg->AsOp()->gtOp1->AsOp()->gtOp1;
}
if (indArg->gtOper == GT_LCL_VAR)
{
lcl = indArg;
}
else if (indArg->gtOper == GT_ADD)
{
if (indArg->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
lcl = indArg->AsOp()->gtOp1;
}
else if (indArg->AsOp()->gtOp2->gtOper == GT_LCL_VAR)
{
lcl = indArg->AsOp()->gtOp2;
}
}
if (lcl != NULL)
{
wbKind = CWBKind_OtherByRefLocal; // Unclassified local variable.
unsigned lclNum = lcl->AsLclVar()->GetLclNum();
if (lclNum == compiler->info.compRetBuffArg)
{
wbKind = CWBKind_RetBuf; // Ret buff. Can happen if the struct exceeds the size limit.
}
else
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
if (varDsc->lvIsParam && varDsc->lvType == TYP_BYREF)
{
wbKind = CWBKind_ByRefArg; // Out (or in/out) arg
}
}
}
else
{
// We should have eliminated the barrier for this case.
assert(!(indArg->gtOper == GT_ADDR && indArg->AsOp()->gtOp1->gtOper == GT_LCL_VAR));
}
}
if (helper == CORINFO_HELP_CHECKED_ASSIGN_REF)
{
#if 0
#ifdef DEBUG
// Enable this to sample the unclassified trees.
static int unclassifiedBarrierSite = 0;
if (wbKind == CWBKind_Unclassified)
{
unclassifiedBarrierSite++;
printf("unclassifiedBarrierSite = %d:\n", unclassifiedBarrierSite); compiler->gtDispTree(tgt); printf(""); printf("\n");
}
#endif // DEBUG
#endif // 0
AddStackLevel(4);
inst_IV(INS_push, wbKind);
genEmitHelperCall(helper,
4, // argSize
EA_PTRSIZE); // retSize
SubtractStackLevel(4);
}
else
{
genEmitHelperCall(helper,
0, // argSize
EA_PTRSIZE); // retSize
}
#else // !FEATURE_COUNT_GC_WRITE_BARRIERS
genEmitHelperCall(helper,
0, // argSize
EA_PTRSIZE); // retSize
#endif // !FEATURE_COUNT_GC_WRITE_BARRIERS
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Prolog / Epilog XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************
*
* Generates code for moving incoming register arguments to their
* assigned location, in the function prolog.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFnPrologCalleeRegArgs() for %s regs\n", regState->rsIsFloat ? "float" : "int");
}
#endif
unsigned argMax; // maximum argNum value plus 1, (including the RetBuffArg)
unsigned argNum; // current argNum, always in [0..argMax-1]
unsigned fixedRetBufIndex; // argNum value used by the fixed return buffer argument (ARM64)
unsigned regArgNum; // index into the regArgTab[] table
regMaskTP regArgMaskLive = regState->rsCalleeRegArgMaskLiveIn;
bool doingFloat = regState->rsIsFloat;
// We should be generating the prolog block when we are called
assert(compiler->compGeneratingProlog);
// We expect to have some registers of the type we are doing, that are LiveIn, otherwise we don't need to be called.
noway_assert(regArgMaskLive != 0);
// If a method has 3 args (and no fixed return buffer) then argMax is 3 and valid indexes are 0,1,2
// If a method has a fixed return buffer (on ARM64) then argMax gets set to 9 and valid index are 0-8
//
// The regArgTab can always have unused entries,
// for example if an architecture always increments the arg register number but uses either
// an integer register or a floating point register to hold the next argument
// then with a mix of float and integer args you could have:
//
// sampleMethod(int i, float x, int j, float y, int k, float z);
// r0, r2 and r4 as valid integer arguments with argMax as 5
// and f1, f3 and f5 and valid floating point arguments with argMax as 6
// The first one is doingFloat==false and the second one is doingFloat==true
//
// If a fixed return buffer (in r8) was also present then the first one would become:
// r0, r2, r4 and r8 as valid integer arguments with argMax as 9
//
argMax = regState->rsCalleeRegArgCount;
fixedRetBufIndex = (unsigned)-1; // Invalid value
// If necessary we will select a correct xtraReg for circular floating point args later.
if (doingFloat)
{
xtraReg = REG_NA;
noway_assert(argMax <= MAX_FLOAT_REG_ARG);
}
else // we are doing the integer registers
{
noway_assert(argMax <= MAX_REG_ARG);
if (hasFixedRetBuffReg())
{
fixedRetBufIndex = theFixedRetBuffArgNum();
// We have an additional integer register argument when hasFixedRetBuffReg() is true
argMax = fixedRetBufIndex + 1;
assert(argMax == (MAX_REG_ARG + 1));
}
}
//
// Construct a table with the register arguments, for detecting circular and
// non-circular dependencies between the register arguments. A dependency is when
// an argument register Rn needs to be moved to register Rm that is also an argument
// register. The table is constructed in the order the arguments are passed in
// registers: the first register argument is in regArgTab[0], the second in
// regArgTab[1], etc. Note that on ARM, a TYP_DOUBLE takes two entries, starting
// at an even index. The regArgTab is indexed from 0 to argMax - 1.
// Note that due to an extra argument register for ARM64 (i.e theFixedRetBuffReg())
// we have increased the allocated size of the regArgTab[] by one.
//
struct regArgElem
{
unsigned varNum; // index into compiler->lvaTable[] for this register argument
#if defined(UNIX_AMD64_ABI)
var_types type; // the Jit type of this regArgTab entry
#endif // defined(UNIX_AMD64_ABI)
unsigned trashBy; // index into this regArgTab[] table of the register that will be copied to this register.
// That is, for regArgTab[x].trashBy = y, argument register number 'y' will be copied to
// argument register number 'x'. Only used when circular = true.
char slot; // 0 means the register is not used for a register argument
// 1 means the first part of a register argument
// 2, 3 or 4 means the second,third or fourth part of a multireg argument
bool stackArg; // true if the argument gets homed to the stack
bool writeThru; // true if the argument gets homed to both stack and register
bool processed; // true after we've processed the argument (and it is in its final location)
bool circular; // true if this register participates in a circular dependency loop.
#ifdef UNIX_AMD64_ABI
// For UNIX AMD64 struct passing, the type of the register argument slot can differ from
// the type of the lclVar in ways that are not ascertainable from lvType.
// So, for that case we retain the type of the register in the regArgTab.
var_types getRegType(Compiler* compiler)
{
return type; // UNIX_AMD64 implementation
}
#else // !UNIX_AMD64_ABI
// In other cases, we simply use the type of the lclVar to determine the type of the register.
var_types getRegType(Compiler* compiler)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
// Check if this is an HFA register arg and return the HFA type
if (varDsc->lvIsHfaRegArg())
{
// Cannot have hfa types on windows arm targets
// in vararg methods.
assert(!TargetOS::IsWindows || !compiler->info.compIsVarArgs);
return varDsc->GetHfaType();
}
return compiler->mangleVarArgsType(varDsc->lvType);
}
#endif // !UNIX_AMD64_ABI
} regArgTab[max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {};
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0; varNum < compiler->lvaCount; ++varNum)
{
varDsc = compiler->lvaGetDesc(varNum);
// Is this variable a register arg?
if (!varDsc->lvIsParam)
{
continue;
}
if (!varDsc->lvIsRegArg)
{
continue;
}
// When we have a promoted struct we have two possible LclVars that can represent the incoming argument
// in the regArgTab[], either the original TYP_STRUCT argument or the introduced lvStructField.
// We will use the lvStructField if we have a TYPE_INDEPENDENT promoted struct field otherwise
// use the the original TYP_STRUCT argument.
//
if (varDsc->lvPromoted || varDsc->lvIsStructField)
{
LclVarDsc* parentVarDsc = varDsc;
if (varDsc->lvIsStructField)
{
assert(!varDsc->lvPromoted);
parentVarDsc = compiler->lvaGetDesc(varDsc->lvParentLcl);
}
Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(parentVarDsc);
if (promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT)
{
// For register arguments that are independent promoted structs we put the promoted field varNum in the
// regArgTab[]
if (varDsc->lvPromoted)
{
continue;
}
}
else
{
// For register arguments that are not independent promoted structs we put the parent struct varNum in
// the regArgTab[]
if (varDsc->lvIsStructField)
{
continue;
}
}
}
var_types regType = compiler->mangleVarArgsType(varDsc->TypeGet());
// Change regType to the HFA type when we have a HFA argument
if (varDsc->lvIsHfaRegArg())
{
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows && compiler->info.compIsVarArgs)
{
assert(!"Illegal incoming HFA arg encountered in Vararg method.");
}
#endif // defined(TARGET_ARM64)
regType = varDsc->GetHfaType();
}
#if defined(UNIX_AMD64_ABI)
if (!varTypeIsStruct(regType))
#endif // defined(UNIX_AMD64_ABI)
{
// A struct might be passed partially in XMM register for System V calls.
// So a single arg might use both register files.
if (emitter::isFloatReg(varDsc->GetArgReg()) != doingFloat)
{
continue;
}
}
int slots = 0;
#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
if (!structDesc.passedInRegisters)
{
// The var is not passed in registers.
continue;
}
unsigned firstRegSlot = 0;
for (unsigned slotCounter = 0; slotCounter < structDesc.eightByteCount; slotCounter++)
{
regNumber regNum = varDsc->lvRegNumForSlot(slotCounter);
var_types regType;
#ifdef FEATURE_SIMD
// Assumption 1:
// RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off
// to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for
// reading and writing purposes. Hence while homing a Vector3 type arg on stack we should
// home entire 16-bytes so that the upper-most 4-bytes will be zeroed when written to stack.
//
// Assumption 2:
// RyuJit backend is making another implicit assumption that Vector3 type args when passed in
// registers or on stack, the upper most 4-bytes will be zero.
//
// For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
// that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
// invalid.
//
// RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
// bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and
// passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason,
// there is no need to clear upper 4-bytes of Vector3 type args.
//
// RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
// Vector3 return values are returned two return registers and Caller assembles them into a
// single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
// type args in prolog and Vector3 type return value of a call
if (varDsc->lvType == TYP_SIMD12)
{
regType = TYP_DOUBLE;
}
else
#endif
{
regType = compiler->GetEightByteType(structDesc, slotCounter);
}
regArgNum = genMapRegNumToRegArgNum(regNum, regType);
if ((!doingFloat && (structDesc.IsIntegralSlot(slotCounter))) ||
(doingFloat && (structDesc.IsSseSlot(slotCounter))))
{
// Store the reg for the first slot.
if (slots == 0)
{
firstRegSlot = regArgNum;
}
// Bingo - add it to our table
noway_assert(regArgNum < argMax);
noway_assert(regArgTab[regArgNum].slot == 0); // we better not have added it already (there better
// not be multiple vars representing this argument
// register)
regArgTab[regArgNum].varNum = varNum;
regArgTab[regArgNum].slot = (char)(slotCounter + 1);
regArgTab[regArgNum].type = regType;
slots++;
}
}
if (slots == 0)
{
continue; // Nothing to do for this regState set.
}
regArgNum = firstRegSlot;
}
else
#endif // defined(UNIX_AMD64_ABI)
{
// Bingo - add it to our table
regArgNum = genMapRegNumToRegArgNum(varDsc->GetArgReg(), regType);
noway_assert(regArgNum < argMax);
// We better not have added it already (there better not be multiple vars representing this argument
// register)
noway_assert(regArgTab[regArgNum].slot == 0);
#if defined(UNIX_AMD64_ABI)
// Set the register type.
regArgTab[regArgNum].type = regType;
#endif // defined(UNIX_AMD64_ABI)
regArgTab[regArgNum].varNum = varNum;
regArgTab[regArgNum].slot = 1;
slots = 1;
#if FEATURE_MULTIREG_ARGS
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
if (varDsc->lvIsHfaRegArg())
{
// We have an HFA argument, set slots to the number of registers used
slots = varDsc->lvHfaSlots();
}
else
{
// Currently all non-HFA multireg structs are two registers in size (i.e. two slots)
assert(varDsc->lvSize() == (2 * TARGET_POINTER_SIZE));
// We have a non-HFA multireg argument, set slots to two
slots = 2;
}
// Note that regArgNum+1 represents an argument index not an actual argument register.
// see genMapRegArgNumToRegNum(unsigned argNum, var_types type)
// This is the setup for the rest of a multireg struct arg
for (int i = 1; i < slots; i++)
{
noway_assert((regArgNum + i) < argMax);
// We better not have added it already (there better not be multiple vars representing this argument
// register)
noway_assert(regArgTab[regArgNum + i].slot == 0);
regArgTab[regArgNum + i].varNum = varNum;
regArgTab[regArgNum + i].slot = (char)(i + 1);
}
}
#endif // FEATURE_MULTIREG_ARGS
}
#ifdef TARGET_ARM
int lclSize = compiler->lvaLclSize(varNum);
if (lclSize > REGSIZE_BYTES)
{
unsigned maxRegArgNum = doingFloat ? MAX_FLOAT_REG_ARG : MAX_REG_ARG;
slots = lclSize / REGSIZE_BYTES;
if (regArgNum + slots > maxRegArgNum)
{
slots = maxRegArgNum - regArgNum;
}
}
C_ASSERT((char)MAX_REG_ARG == MAX_REG_ARG);
assert(slots < INT8_MAX);
for (char i = 1; i < slots; i++)
{
regArgTab[regArgNum + i].varNum = varNum;
regArgTab[regArgNum + i].slot = i + 1;
}
#endif // TARGET_ARM
for (int i = 0; i < slots; i++)
{
regType = regArgTab[regArgNum + i].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(regArgNum + i, regType);
#if !defined(UNIX_AMD64_ABI)
assert((i > 0) || (regNum == varDsc->GetArgReg()));
#endif // defined(UNIX_AMD64_ABI)
// Is the arg dead on entry to the method ?
if ((regArgMaskLive & genRegMask(regNum)) == 0)
{
if (varDsc->lvTrackedNonStruct())
{
// We may now see some tracked locals with zero refs.
// See Lowering::DoPhase. Tolerate these.
if (varDsc->lvRefCnt() > 0)
{
noway_assert(!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex));
}
}
else
{
#ifdef TARGET_X86
noway_assert(varDsc->lvType == TYP_STRUCT);
#else // !TARGET_X86
// For LSRA, it may not be in regArgMaskLive if it has a zero
// refcnt. This is in contrast with the non-LSRA case in which all
// non-tracked args are assumed live on entry.
noway_assert((varDsc->lvRefCnt() == 0) || (varDsc->lvType == TYP_STRUCT) ||
(varDsc->IsAddressExposed() && compiler->info.compIsVarArgs) ||
(varDsc->IsAddressExposed() && compiler->opts.compUseSoftFP));
#endif // !TARGET_X86
}
// Mark it as processed and be done with it
regArgTab[regArgNum + i].processed = true;
goto NON_DEP;
}
#ifdef TARGET_ARM
// On the ARM when the varDsc is a struct arg (or pre-spilled due to varargs) the initReg/xtraReg
// could be equal to GetArgReg(). The pre-spilled registers are also not considered live either since
// they've already been spilled.
//
if ((regSet.rsMaskPreSpillRegs(false) & genRegMask(regNum)) == 0)
#endif // TARGET_ARM
{
#if !defined(UNIX_AMD64_ABI)
noway_assert(xtraReg != (varDsc->GetArgReg() + i));
#endif
noway_assert(regArgMaskLive & genRegMask(regNum));
}
regArgTab[regArgNum + i].processed = false;
regArgTab[regArgNum + i].writeThru = (varDsc->lvIsInReg() && varDsc->lvLiveInOutOfHndlr);
/* mark stack arguments since we will take care of those first */
regArgTab[regArgNum + i].stackArg = (varDsc->lvIsInReg()) ? false : true;
/* If it goes on the stack or in a register that doesn't hold
* an argument anymore -> CANNOT form a circular dependency */
if (varDsc->lvIsInReg() && (genRegMask(regNum) & regArgMaskLive))
{
/* will trash another argument -> possible dependency
* We may need several passes after the table is constructed
* to decide on that */
/* Maybe the argument stays in the register (IDEAL) */
if ((i == 0) && (varDsc->GetRegNum() == regNum))
{
goto NON_DEP;
}
#if !defined(TARGET_64BIT)
if ((i == 1) && varTypeIsStruct(varDsc) && (varDsc->GetOtherReg() == regNum))
{
goto NON_DEP;
}
if ((i == 1) && (genActualType(varDsc->TypeGet()) == TYP_LONG) && (varDsc->GetOtherReg() == regNum))
{
goto NON_DEP;
}
if ((i == 1) && (genActualType(varDsc->TypeGet()) == TYP_DOUBLE) &&
(REG_NEXT(varDsc->GetRegNum()) == regNum))
{
goto NON_DEP;
}
#endif // !defined(TARGET_64BIT)
regArgTab[regArgNum + i].circular = true;
}
else
{
NON_DEP:
regArgTab[regArgNum + i].circular = false;
/* mark the argument register as free */
regArgMaskLive &= ~genRegMask(regNum);
}
}
}
/* Find the circular dependencies for the argument registers, if any.
* A circular dependency is a set of registers R1, R2, ..., Rn
* such that R1->R2 (that is, R1 needs to be moved to R2), R2->R3, ..., Rn->R1 */
bool change = true;
if (regArgMaskLive)
{
/* Possible circular dependencies still exist; the previous pass was not enough
* to filter them out. Use a "sieve" strategy to find all circular dependencies. */
while (change)
{
change = false;
for (argNum = 0; argNum < argMax; argNum++)
{
// If we already marked the argument as non-circular then continue
if (!regArgTab[argNum].circular)
{
continue;
}
if (regArgTab[argNum].slot == 0) // Not a register argument
{
continue;
}
varNum = regArgTab[argNum].varNum;
varDsc = compiler->lvaGetDesc(varNum);
const var_types varRegType = varDsc->GetRegisterType();
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
/* cannot possibly have stack arguments */
noway_assert(varDsc->lvIsInReg());
noway_assert(!regArgTab[argNum].stackArg);
var_types regType = regArgTab[argNum].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
regNumber destRegNum = REG_NA;
if (varTypeIsStruct(varDsc) &&
(compiler->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
assert(regArgTab[argNum].slot <= varDsc->lvFieldCnt);
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + regArgTab[argNum].slot - 1);
destRegNum = fieldVarDsc->GetRegNum();
}
else if (regArgTab[argNum].slot == 1)
{
destRegNum = varDsc->GetRegNum();
}
#if defined(TARGET_ARM64) && defined(FEATURE_SIMD)
else if (varDsc->lvIsHfa())
{
// This must be a SIMD type that's fully enregistered, but is passed as an HFA.
// Each field will be inserted into the same destination register.
assert(varTypeIsSIMD(varDsc) &&
!compiler->isOpaqueSIMDType(varDsc->lvVerTypeInfo.GetClassHandle()));
assert(regArgTab[argNum].slot <= (int)varDsc->lvHfaSlots());
assert(argNum > 0);
assert(regArgTab[argNum - 1].varNum == varNum);
regArgMaskLive &= ~genRegMask(regNum);
regArgTab[argNum].circular = false;
change = true;
continue;
}
#elif defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
else
{
assert(regArgTab[argNum].slot == 2);
assert(argNum > 0);
assert(regArgTab[argNum - 1].slot == 1);
assert(regArgTab[argNum - 1].varNum == varNum);
assert((varRegType == TYP_SIMD12) || (varRegType == TYP_SIMD16));
regArgMaskLive &= ~genRegMask(regNum);
regArgTab[argNum].circular = false;
change = true;
continue;
}
#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#if !defined(TARGET_64BIT)
else if (regArgTab[argNum].slot == 2 && genActualType(varDsc->TypeGet()) == TYP_LONG)
{
destRegNum = varDsc->GetOtherReg();
}
else
{
assert(regArgTab[argNum].slot == 2);
assert(varDsc->TypeGet() == TYP_DOUBLE);
destRegNum = REG_NEXT(varDsc->GetRegNum());
}
#endif // !defined(TARGET_64BIT)
noway_assert(destRegNum != REG_NA);
if (genRegMask(destRegNum) & regArgMaskLive)
{
/* we are trashing a live argument register - record it */
unsigned destRegArgNum = genMapRegNumToRegArgNum(destRegNum, regType);
noway_assert(destRegArgNum < argMax);
regArgTab[destRegArgNum].trashBy = argNum;
}
else
{
/* argument goes to a free register */
regArgTab[argNum].circular = false;
change = true;
/* mark the argument register as free */
regArgMaskLive &= ~genRegMask(regNum);
}
}
}
}
/* At this point, everything that has the "circular" flag
* set to "true" forms a circular dependency */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (regArgMaskLive)
{
if (verbose)
{
printf("Circular dependencies found while home-ing the incoming arguments.\n");
}
}
#endif
// LSRA allocates registers to incoming parameters in order and will not overwrite
// a register still holding a live parameter.
noway_assert(((regArgMaskLive & RBM_FLTARG_REGS) == 0) &&
"Homing of float argument registers with circular dependencies not implemented.");
// Now move the arguments to their locations.
// First consider ones that go on the stack since they may free some registers.
// Also home writeThru args, since they're also homed to the stack.
regArgMaskLive = regState->rsCalleeRegArgMaskLiveIn; // reset the live in to what it was at the start
for (argNum = 0; argNum < argMax; argNum++)
{
emitAttr size;
#if defined(UNIX_AMD64_ABI)
// If this is the wrong register file, just continue.
if (regArgTab[argNum].type == TYP_UNDEF)
{
// This could happen if the reg in regArgTab[argNum] is of the other register file -
// for System V register passed structs where the first reg is GPR and the second an XMM reg.
// The next register file processing will process it.
continue;
}
#endif // defined(UNIX_AMD64_ABI)
// If the arg is dead on entry to the method, skip it
if (regArgTab[argNum].processed)
{
continue;
}
if (regArgTab[argNum].slot == 0) // Not a register argument
{
continue;
}
varNum = regArgTab[argNum].varNum;
varDsc = compiler->lvaGetDesc(varNum);
#ifndef TARGET_64BIT
// If this arg is never on the stack, go to the next one.
if (varDsc->lvType == TYP_LONG)
{
if (regArgTab[argNum].slot == 1 && !regArgTab[argNum].stackArg && !regArgTab[argNum].writeThru)
{
continue;
}
else if (varDsc->GetOtherReg() != REG_STK)
{
continue;
}
}
else
#endif // !TARGET_64BIT
{
// If this arg is never on the stack, go to the next one.
if (!regArgTab[argNum].stackArg && !regArgTab[argNum].writeThru)
{
continue;
}
}
#if defined(TARGET_ARM)
if (varDsc->lvType == TYP_DOUBLE)
{
if (regArgTab[argNum].slot == 2)
{
// We handled the entire double when processing the first half (slot == 1)
continue;
}
}
#endif
noway_assert(regArgTab[argNum].circular == false);
noway_assert(varDsc->lvIsParam);
noway_assert(varDsc->lvIsRegArg);
noway_assert(varDsc->lvIsInReg() == false || varDsc->lvLiveInOutOfHndlr ||
(varDsc->lvType == TYP_LONG && varDsc->GetOtherReg() == REG_STK && regArgTab[argNum].slot == 2));
var_types storeType = TYP_UNDEF;
unsigned slotSize = TARGET_POINTER_SIZE;
if (varTypeIsStruct(varDsc))
{
storeType = TYP_I_IMPL; // Default store type for a struct type is a pointer sized integer
#if FEATURE_MULTIREG_ARGS
// Must be <= MAX_PASS_MULTIREG_BYTES or else it wouldn't be passed in registers
noway_assert(varDsc->lvSize() <= MAX_PASS_MULTIREG_BYTES);
#endif // FEATURE_MULTIREG_ARGS
#ifdef UNIX_AMD64_ABI
storeType = regArgTab[argNum].type;
#endif // !UNIX_AMD64_ABI
if (varDsc->lvIsHfaRegArg())
{
#ifdef TARGET_ARM
// On ARM32 the storeType for HFA args is always TYP_FLOAT
storeType = TYP_FLOAT;
slotSize = (unsigned)emitActualTypeSize(storeType);
#else // TARGET_ARM64
storeType = genActualType(varDsc->GetHfaType());
slotSize = (unsigned)emitActualTypeSize(storeType);
#endif // TARGET_ARM64
}
}
else // Not a struct type
{
storeType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
}
size = emitActualTypeSize(storeType);
#ifdef TARGET_X86
noway_assert(genTypeSize(storeType) == TARGET_POINTER_SIZE);
#endif // TARGET_X86
regNumber srcRegNum = genMapRegArgNumToRegNum(argNum, storeType);
// Stack argument - if the ref count is 0 don't care about it
if (!varDsc->lvOnFrame)
{
noway_assert(varDsc->lvRefCnt() == 0);
}
else
{
// Since slot is typically 1, baseOffset is typically 0
int baseOffset = (regArgTab[argNum].slot - 1) * slotSize;
GetEmitter()->emitIns_S_R(ins_Store(storeType), size, srcRegNum, varNum, baseOffset);
#ifndef UNIX_AMD64_ABI
// Check if we are writing past the end of the struct
if (varTypeIsStruct(varDsc))
{
assert(varDsc->lvSize() >= baseOffset + (unsigned)size);
}
#endif // !UNIX_AMD64_ABI
#ifdef USING_SCOPE_INFO
if (regArgTab[argNum].slot == 1)
{
psiMoveToStack(varNum);
}
#endif // USING_SCOPE_INFO
}
// Mark the argument as processed, and set it as no longer live in srcRegNum,
// unless it is a writeThru var, in which case we home it to the stack, but
// don't mark it as processed until below.
if (!regArgTab[argNum].writeThru)
{
regArgTab[argNum].processed = true;
regArgMaskLive &= ~genRegMask(srcRegNum);
}
#if defined(TARGET_ARM)
if ((storeType == TYP_DOUBLE) && !regArgTab[argNum].writeThru)
{
regArgTab[argNum + 1].processed = true;
regArgMaskLive &= ~genRegMask(REG_NEXT(srcRegNum));
}
#endif
}
/* Process any circular dependencies */
if (regArgMaskLive)
{
unsigned begReg, destReg, srcReg;
unsigned varNumDest, varNumSrc;
LclVarDsc* varDscDest;
LclVarDsc* varDscSrc;
instruction insCopy = INS_mov;
if (doingFloat)
{
#ifndef UNIX_AMD64_ABI
if (GlobalJitOptions::compFeatureHfa)
#endif // !UNIX_AMD64_ABI
{
insCopy = ins_Copy(TYP_DOUBLE);
// Compute xtraReg here when we have a float argument
assert(xtraReg == REG_NA);
regMaskTP fpAvailMask;
fpAvailMask = RBM_FLT_CALLEE_TRASH & ~regArgMaskLive;
if (GlobalJitOptions::compFeatureHfa)
{
fpAvailMask &= RBM_ALLDOUBLE;
}
if (fpAvailMask == RBM_NONE)
{
fpAvailMask = RBM_ALLFLOAT & ~regArgMaskLive;
if (GlobalJitOptions::compFeatureHfa)
{
fpAvailMask &= RBM_ALLDOUBLE;
}
}
assert(fpAvailMask != RBM_NONE);
// We pick the lowest avail register number
regMaskTP tempMask = genFindLowestBit(fpAvailMask);
xtraReg = genRegNumFromMask(tempMask);
}
#if defined(TARGET_X86)
// This case shouldn't occur on x86 since NYI gets converted to an assert
NYI("Homing circular FP registers via xtraReg");
#endif
}
for (argNum = 0; argNum < argMax; argNum++)
{
// If not a circular dependency then continue
if (!regArgTab[argNum].circular)
{
continue;
}
// If already processed the dependency then continue
if (regArgTab[argNum].processed)
{
continue;
}
if (regArgTab[argNum].slot == 0) // Not a register argument
{
continue;
}
destReg = begReg = argNum;
srcReg = regArgTab[argNum].trashBy;
varNumDest = regArgTab[destReg].varNum;
varDscDest = compiler->lvaGetDesc(varNumDest);
noway_assert(varDscDest->lvIsParam && varDscDest->lvIsRegArg);
noway_assert(srcReg < argMax);
varNumSrc = regArgTab[srcReg].varNum;
varDscSrc = compiler->lvaGetDesc(varNumSrc);
noway_assert(varDscSrc->lvIsParam && varDscSrc->lvIsRegArg);
emitAttr size = EA_PTRSIZE;
#ifdef TARGET_XARCH
//
// The following code relies upon the target architecture having an
// 'xchg' instruction which directly swaps the values held in two registers.
// On the ARM architecture we do not have such an instruction.
//
if (destReg == regArgTab[srcReg].trashBy)
{
/* only 2 registers form the circular dependency - use "xchg" */
varNum = regArgTab[argNum].varNum;
varDsc = compiler->lvaGetDesc(varNum);
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
noway_assert(genTypeSize(genActualType(varDscSrc->TypeGet())) <= REGSIZE_BYTES);
/* Set "size" to indicate GC if one and only one of
* the operands is a pointer
* RATIONALE: If both are pointers, nothing changes in
* the GC pointer tracking. If only one is a pointer we
* have to "swap" the registers in the GC reg pointer mask
*/
if (varTypeGCtype(varDscSrc->TypeGet()) != varTypeGCtype(varDscDest->TypeGet()))
{
size = EA_GCREF;
}
noway_assert(varDscDest->GetArgReg() == varDscSrc->GetRegNum());
GetEmitter()->emitIns_R_R(INS_xchg, size, varDscSrc->GetRegNum(), varDscSrc->GetArgReg());
regSet.verifyRegUsed(varDscSrc->GetRegNum());
regSet.verifyRegUsed(varDscSrc->GetArgReg());
/* mark both arguments as processed */
regArgTab[destReg].processed = true;
regArgTab[srcReg].processed = true;
regArgMaskLive &= ~genRegMask(varDscSrc->GetArgReg());
regArgMaskLive &= ~genRegMask(varDscDest->GetArgReg());
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNumSrc);
psiMoveToReg(varNumDest);
#endif // USING_SCOPE_INFO
}
else
#endif // TARGET_XARCH
{
var_types destMemType = varDscDest->TypeGet();
#ifdef TARGET_ARM
bool cycleAllDouble = true; // assume the best
unsigned iter = begReg;
do
{
if (compiler->lvaGetDesc(regArgTab[iter].varNum)->TypeGet() != TYP_DOUBLE)
{
cycleAllDouble = false;
break;
}
iter = regArgTab[iter].trashBy;
} while (iter != begReg);
// We may treat doubles as floats for ARM because we could have partial circular
// dependencies of a float with a lo/hi part of the double. We mark the
// trashBy values for each slot of the double, so let the circular dependency
// logic work its way out for floats rather than doubles. If a cycle has all
// doubles, then optimize so that instead of two vmov.f32's to move a double,
// we can use one vmov.f64.
//
if (!cycleAllDouble && destMemType == TYP_DOUBLE)
{
destMemType = TYP_FLOAT;
}
#endif // TARGET_ARM
if (destMemType == TYP_REF)
{
size = EA_GCREF;
}
else if (destMemType == TYP_BYREF)
{
size = EA_BYREF;
}
else if (destMemType == TYP_DOUBLE)
{
size = EA_8BYTE;
}
else if (destMemType == TYP_FLOAT)
{
size = EA_4BYTE;
}
/* move the dest reg (begReg) in the extra reg */
assert(xtraReg != REG_NA);
regNumber begRegNum = genMapRegArgNumToRegNum(begReg, destMemType);
GetEmitter()->emitIns_Mov(insCopy, size, xtraReg, begRegNum, /* canSkip */ false);
regSet.verifyRegUsed(xtraReg);
*pXtraRegClobbered = true;
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNumDest, xtraReg);
#endif // USING_SCOPE_INFO
/* start moving everything to its right place */
while (srcReg != begReg)
{
/* mov dest, src */
regNumber destRegNum = genMapRegArgNumToRegNum(destReg, destMemType);
regNumber srcRegNum = genMapRegArgNumToRegNum(srcReg, destMemType);
GetEmitter()->emitIns_Mov(insCopy, size, destRegNum, srcRegNum, /* canSkip */ false);
regSet.verifyRegUsed(destRegNum);
/* mark 'src' as processed */
noway_assert(srcReg < argMax);
regArgTab[srcReg].processed = true;
#ifdef TARGET_ARM
if (size == EA_8BYTE)
regArgTab[srcReg + 1].processed = true;
#endif
regArgMaskLive &= ~genMapArgNumToRegMask(srcReg, destMemType);
/* move to the next pair */
destReg = srcReg;
srcReg = regArgTab[srcReg].trashBy;
varDscDest = varDscSrc;
destMemType = varDscDest->TypeGet();
#ifdef TARGET_ARM
if (!cycleAllDouble && destMemType == TYP_DOUBLE)
{
destMemType = TYP_FLOAT;
}
#endif
varNumSrc = regArgTab[srcReg].varNum;
varDscSrc = compiler->lvaGetDesc(varNumSrc);
noway_assert(varDscSrc->lvIsParam && varDscSrc->lvIsRegArg);
if (destMemType == TYP_REF)
{
size = EA_GCREF;
}
else if (destMemType == TYP_DOUBLE)
{
size = EA_8BYTE;
}
else
{
size = EA_4BYTE;
}
}
/* take care of the beginning register */
noway_assert(srcReg == begReg);
/* move the dest reg (begReg) in the extra reg */
regNumber destRegNum = genMapRegArgNumToRegNum(destReg, destMemType);
GetEmitter()->emitIns_Mov(insCopy, size, destRegNum, xtraReg, /* canSkip */ false);
regSet.verifyRegUsed(destRegNum);
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNumSrc);
#endif // USING_SCOPE_INFO
/* mark the beginning register as processed */
regArgTab[srcReg].processed = true;
#ifdef TARGET_ARM
if (size == EA_8BYTE)
regArgTab[srcReg + 1].processed = true;
#endif
regArgMaskLive &= ~genMapArgNumToRegMask(srcReg, destMemType);
}
}
}
/* Finally take care of the remaining arguments that must be enregistered */
while (regArgMaskLive)
{
regMaskTP regArgMaskLiveSave = regArgMaskLive;
for (argNum = 0; argNum < argMax; argNum++)
{
/* If already processed go to the next one */
if (regArgTab[argNum].processed)
{
continue;
}
if (regArgTab[argNum].slot == 0)
{ // Not a register argument
continue;
}
varNum = regArgTab[argNum].varNum;
varDsc = compiler->lvaGetDesc(varNum);
const var_types regType = regArgTab[argNum].getRegType(compiler);
const regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
const var_types varRegType = varDsc->GetRegisterType();
#if defined(UNIX_AMD64_ABI)
if (regType == TYP_UNDEF)
{
// This could happen if the reg in regArgTab[argNum] is of the other register file -
// for System V register passed structs where the first reg is GPR and the second an XMM reg.
// The next register file processing will process it.
regArgMaskLive &= ~genRegMask(regNum);
continue;
}
#endif // defined(UNIX_AMD64_ABI)
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
#ifdef TARGET_X86
// On x86 we don't enregister args that are not pointer sized.
noway_assert(genTypeSize(varDsc->GetActualRegisterType()) == TARGET_POINTER_SIZE);
#endif // TARGET_X86
noway_assert(varDsc->lvIsInReg() && !regArgTab[argNum].circular);
/* Register argument - hopefully it stays in the same register */
regNumber destRegNum = REG_NA;
var_types destMemType = varDsc->GetRegisterType();
if (regArgTab[argNum].slot == 1)
{
destRegNum = varDsc->GetRegNum();
#ifdef TARGET_ARM
if (genActualType(destMemType) == TYP_DOUBLE && regArgTab[argNum + 1].processed)
{
// The second half of the double has already been processed! Treat this as a single.
destMemType = TYP_FLOAT;
}
#endif // TARGET_ARM
}
#ifndef TARGET_64BIT
else if (regArgTab[argNum].slot == 2 && genActualType(destMemType) == TYP_LONG)
{
assert(genActualType(varDsc->TypeGet()) == TYP_LONG || genActualType(varDsc->TypeGet()) == TYP_DOUBLE);
if (genActualType(varDsc->TypeGet()) == TYP_DOUBLE)
{
destRegNum = regNum;
}
else
{
destRegNum = varDsc->GetOtherReg();
}
assert(destRegNum != REG_STK);
}
else
{
assert(regArgTab[argNum].slot == 2);
assert(destMemType == TYP_DOUBLE);
// For doubles, we move the entire double using the argNum representing
// the first half of the double. There are two things we won't do:
// (1) move the double when the 1st half of the destination is free but the
// 2nd half is occupied, and (2) move the double when the 2nd half of the
// destination is free but the 1st half is occupied. Here we consider the
// case where the first half can't be moved initially because its target is
// still busy, but the second half can be moved. We wait until the entire
// double can be moved, if possible. For example, we have F0/F1 double moving to F2/F3,
// and F2 single moving to F16. When we process F0, its target F2 is busy,
// so we skip it on the first pass. When we process F1, its target F3 is
// available. However, we want to move F0/F1 all at once, so we skip it here.
// We process F2, which frees up F2. The next pass through, we process F0 and
// F2/F3 are empty, so we move it. Note that if half of a double is involved
// in a circularity with a single, then we will have already moved that half
// above, so we go ahead and move the remaining half as a single.
// Because there are no circularities left, we are guaranteed to terminate.
assert(argNum > 0);
assert(regArgTab[argNum - 1].slot == 1);
if (!regArgTab[argNum - 1].processed)
{
// The first half of the double hasn't been processed; try to be processed at the same time
continue;
}
// The first half of the double has been processed but the second half hasn't!
// This could happen for double F2/F3 moving to F0/F1, and single F0 moving to F2.
// In that case, there is a F0/F2 loop that is not a double-only loop. The circular
// dependency logic above will move them as singles, leaving just F3 to move. Treat
// it as a single to finish the shuffling.
destMemType = TYP_FLOAT;
destRegNum = REG_NEXT(varDsc->GetRegNum());
}
#endif // !TARGET_64BIT
#if (defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64)) && defined(FEATURE_SIMD)
else
{
assert(regArgTab[argNum].slot == 2);
assert(argNum > 0);
assert(regArgTab[argNum - 1].slot == 1);
assert((varRegType == TYP_SIMD12) || (varRegType == TYP_SIMD16));
destRegNum = varDsc->GetRegNum();
noway_assert(regNum != destRegNum);
continue;
}
#endif // (defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64)) && defined(FEATURE_SIMD)
noway_assert(destRegNum != REG_NA);
if (destRegNum != regNum)
{
/* Cannot trash a currently live register argument.
* Skip this one until its target will be free
* which is guaranteed to happen since we have no circular dependencies. */
regMaskTP destMask = genRegMask(destRegNum);
#ifdef TARGET_ARM
// Don't process the double until both halves of the destination are clear.
if (genActualType(destMemType) == TYP_DOUBLE)
{
assert((destMask & RBM_DBL_REGS) != 0);
destMask |= genRegMask(REG_NEXT(destRegNum));
}
#endif
if (destMask & regArgMaskLive)
{
continue;
}
/* Move it to the new register */
emitAttr size = emitActualTypeSize(destMemType);
#if defined(TARGET_ARM64)
if (varTypeIsSIMD(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
// For a SIMD type that is passed in two integer registers,
// Limit the copy below to the first 8 bytes from the first integer register.
// Handle the remaining 8 bytes from the second slot in the code further below
assert(EA_SIZE(size) >= 8);
size = EA_8BYTE;
}
#endif
inst_Mov(destMemType, destRegNum, regNum, /* canSkip */ false, size);
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNum);
#endif // USING_SCOPE_INFO
}
/* mark the argument as processed */
assert(!regArgTab[argNum].processed);
regArgTab[argNum].processed = true;
regArgMaskLive &= ~genRegMask(regNum);
#if FEATURE_MULTIREG_ARGS
int argRegCount = 1;
#ifdef TARGET_ARM
if (genActualType(destMemType) == TYP_DOUBLE)
{
argRegCount = 2;
}
#endif
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
if (varTypeIsStruct(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
argRegCount = 2;
int nextArgNum = argNum + 1;
regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].getRegType(compiler));
noway_assert(regArgTab[nextArgNum].varNum == varNum);
// Emit a shufpd with a 0 immediate, which preserves the 0th element of the dest reg
// and moves the 0th element of the src reg into the 1st element of the dest reg.
GetEmitter()->emitIns_R_R_I(INS_shufpd, emitActualTypeSize(varRegType), destRegNum, nextRegNum, 0);
// Set destRegNum to regNum so that we skip the setting of the register below,
// but mark argNum as processed and clear regNum from the live mask.
destRegNum = regNum;
}
#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#ifdef TARGET_ARMARCH
if (varDsc->lvIsHfa())
{
// This includes both fixed-size SIMD types that are independently promoted, as well
// as other HFA structs.
argRegCount = varDsc->lvHfaSlots();
if (argNum < (argMax - argRegCount + 1))
{
if (compiler->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT)
{
// For an HFA type that is passed in multiple registers and promoted, we copy each field to its
// destination register.
for (int i = 0; i < argRegCount; i++)
{
int nextArgNum = argNum + i;
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i);
regNumber nextRegNum =
genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].getRegType(compiler));
destRegNum = fieldVarDsc->GetRegNum();
noway_assert(regArgTab[nextArgNum].varNum == varNum);
noway_assert(genIsValidFloatReg(nextRegNum));
noway_assert(genIsValidFloatReg(destRegNum));
GetEmitter()->emitIns_Mov(INS_mov, EA_8BYTE, destRegNum, nextRegNum, /* canSkip */ false);
}
}
#if defined(TARGET_ARM64) && defined(FEATURE_SIMD)
else
{
// For a SIMD type that is passed in multiple registers but enregistered as a vector,
// the code above copies the first argument register into the lower 4 or 8 bytes
// of the target register. Here we must handle the subsequent fields by
// inserting them into the upper bytes of the target SIMD floating point register.
argRegCount = varDsc->lvHfaSlots();
for (int i = 1; i < argRegCount; i++)
{
int nextArgNum = argNum + i;
regArgElem* nextArgElem = ®ArgTab[nextArgNum];
var_types nextArgType = nextArgElem->getRegType(compiler);
regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, nextArgType);
noway_assert(nextArgElem->varNum == varNum);
noway_assert(genIsValidFloatReg(nextRegNum));
noway_assert(genIsValidFloatReg(destRegNum));
GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_4BYTE, destRegNum, nextRegNum, i, 0);
}
}
#endif // defined(TARGET_ARM64) && defined(FEATURE_SIMD)
}
}
#endif // TARGET_ARMARCH
// Mark the rest of the argument registers corresponding to this multi-reg type as
// being processed and no longer live.
for (int regSlot = 1; regSlot < argRegCount; regSlot++)
{
int nextArgNum = argNum + regSlot;
assert(!regArgTab[nextArgNum].processed);
regArgTab[nextArgNum].processed = true;
regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].getRegType(compiler));
regArgMaskLive &= ~genRegMask(nextRegNum);
}
#endif // FEATURE_MULTIREG_ARGS
}
noway_assert(regArgMaskLiveSave != regArgMaskLive); // if it doesn't change, we have an infinite loop
}
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
/*****************************************************************************
* If any incoming stack arguments live in registers, load them.
*/
void CodeGen::genEnregisterIncomingStackArgs()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genEnregisterIncomingStackArgs()\n");
}
#endif
// OSR handles this specially -- see genEnregisterOSRArgsAndLocals
//
assert(!compiler->opts.IsOSR());
assert(compiler->compGeneratingProlog);
unsigned varNum = 0;
for (LclVarDsc *varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
/* Is this variable a parameter? */
if (!varDsc->lvIsParam)
{
continue;
}
/* If it's a register argument then it's already been taken care of.
But, on Arm when under a profiler, we would have prespilled a register argument
and hence here we need to load it from its prespilled location.
*/
bool isPrespilledForProfiling = false;
#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED)
isPrespilledForProfiling =
compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(varNum, regSet.rsMaskPreSpillRegs(false));
#endif
if (varDsc->lvIsRegArg && !isPrespilledForProfiling)
{
continue;
}
/* Has the parameter been assigned to a register? */
if (!varDsc->lvIsInReg())
{
continue;
}
/* Is the variable dead on entry */
if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
continue;
}
/* Load the incoming parameter into the register */
/* Figure out the home offset of the incoming argument */
regNumber regNum = varDsc->GetArgInitReg();
assert(regNum != REG_STK);
var_types regType = varDsc->GetActualRegisterType();
GetEmitter()->emitIns_R_S(ins_Load(regType), emitTypeSize(regType), regNum, varNum, 0);
regSet.verifyRegUsed(regNum);
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNum);
#endif // USING_SCOPE_INFO
}
}
/*-------------------------------------------------------------------------
*
* We have to decide whether we're going to use block initialization
* in the prolog before we assign final stack offsets. This is because
* when using block initialization we may need additional callee-saved
* registers which need to be saved on the frame, thus increasing the
* frame size.
*
* We'll count the number of locals we have to initialize,
* and if there are lots of them we'll use block initialization.
* Thus, the local variable table must have accurate register location
* information for enregistered locals for their register state on entry
* to the function.
*
* At the same time we set lvMustInit for locals (enregistered or on stack)
* that must be initialized (e.g. initialize memory (comInitMem),
* untracked pointers or disable DFA)
*/
void CodeGen::genCheckUseBlockInit()
{
assert(!compiler->compGeneratingProlog);
unsigned initStkLclCnt = 0; // The number of int-sized stack local variables that need to be initialized (variables
// larger than int count for more than 1).
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
// The logic below is complex. Make sure we are not
// double-counting the initialization impact of any locals.
bool counted = false;
if (!varDsc->lvIsInReg() && !varDsc->lvOnFrame)
{
noway_assert(varDsc->lvRefCnt() == 0);
varDsc->lvMustInit = 0;
continue;
}
// Initialization of OSR locals must be handled specially
if (compiler->lvaIsOSRLocal(varNum))
{
varDsc->lvMustInit = 0;
continue;
}
if (compiler->fgVarIsNeverZeroInitializedInProlog(varNum))
{
varDsc->lvMustInit = 0;
continue;
}
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
// For Compiler::PROMOTION_TYPE_DEPENDENT type of promotion, the whole struct should have been
// initialized by the parent struct. No need to set the lvMustInit bit in the
// field locals.
varDsc->lvMustInit = 0;
continue;
}
if (varDsc->lvHasExplicitInit)
{
varDsc->lvMustInit = 0;
continue;
}
const bool isTemp = varDsc->lvIsTemp;
const bool hasGCPtr = varDsc->HasGCPtr();
const bool isTracked = varDsc->lvTracked;
const bool isStruct = varTypeIsStruct(varDsc);
const bool compInitMem = compiler->info.compInitMem;
if (isTemp && !hasGCPtr)
{
varDsc->lvMustInit = 0;
continue;
}
if (compInitMem || hasGCPtr || varDsc->lvMustInit)
{
if (isTracked)
{
/* For uninitialized use of tracked variables, the liveness
* will bubble to the top (compiler->fgFirstBB) in fgInterBlockLocalVarLiveness()
*/
if (varDsc->lvMustInit ||
VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
/* This var must be initialized */
varDsc->lvMustInit = 1;
/* See if the variable is on the stack will be initialized
* using rep stos - compute the total size to be zero-ed */
if (varDsc->lvOnFrame)
{
if (!varDsc->lvRegister)
{
if (!varDsc->lvIsInReg() || varDsc->lvLiveInOutOfHndlr)
{
// Var is on the stack at entry.
initStkLclCnt +=
roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int);
counted = true;
}
}
else
{
// Var is partially enregistered
noway_assert(genTypeSize(varDsc->TypeGet()) > sizeof(int) &&
varDsc->GetOtherReg() == REG_STK);
initStkLclCnt += genTypeStSz(TYP_INT);
counted = true;
}
}
}
}
if (varDsc->lvOnFrame)
{
bool mustInitThisVar = false;
if (hasGCPtr && !isTracked)
{
JITDUMP("must init V%02u because it has a GC ref\n", varNum);
mustInitThisVar = true;
}
else if (hasGCPtr && isStruct)
{
// TODO-1stClassStructs: support precise liveness reporting for such structs.
JITDUMP("must init a tracked V%02u because it a struct with a GC ref\n", varNum);
mustInitThisVar = true;
}
else
{
// We are done with tracked or GC vars, now look at untracked vars without GC refs.
if (!isTracked)
{
assert(!hasGCPtr && !isTemp);
if (compInitMem)
{
JITDUMP("must init V%02u because compInitMem is set and it is not a temp\n", varNum);
mustInitThisVar = true;
}
}
}
if (mustInitThisVar)
{
varDsc->lvMustInit = true;
if (!counted)
{
initStkLclCnt += roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int);
counted = true;
}
}
}
}
}
/* Don't forget about spill temps that hold pointers */
assert(regSet.tmpAllFree());
for (TempDsc* tempThis = regSet.tmpListBeg(); tempThis != nullptr; tempThis = regSet.tmpListNxt(tempThis))
{
if (varTypeIsGC(tempThis->tdTempType()))
{
initStkLclCnt++;
}
}
// Record number of 4 byte slots that need zeroing.
genInitStkLclCnt = initStkLclCnt;
// Decide if we will do block initialization in the prolog, or use
// a series of individual stores.
//
// Primary factor is the number of slots that need zeroing. We've
// been counting by sizeof(int) above. We assume for now we can
// only zero register width bytes per store.
//
// Current heuristic is to use block init when more than 4 stores
// are required.
//
// TODO: Consider taking into account the presence of large structs that
// potentially only need some fields set to zero.
//
// Compiler::fgVarNeedsExplicitZeroInit relies on this logic to
// find structs that are guaranteed to be block initialized.
// If this logic changes, Compiler::fgVarNeedsExplicitZeroInit needs
// to be modified.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
#if defined(TARGET_AMD64)
// We can clear using aligned SIMD so the threshold is lower,
// and clears in order which is better for auto-prefetching
genUseBlockInit = (genInitStkLclCnt > 4);
#else // !defined(TARGET_AMD64)
genUseBlockInit = (genInitStkLclCnt > 8);
#endif
#else
genUseBlockInit = (genInitStkLclCnt > 4);
#endif // TARGET_64BIT
if (genUseBlockInit)
{
regMaskTP maskCalleeRegArgMask = intRegState.rsCalleeRegArgMaskLiveIn;
// If there is a secret stub param, don't count it, as it will no longer
// be live when we do block init.
if (compiler->info.compPublishStubParam)
{
maskCalleeRegArgMask &= ~RBM_SECRET_STUB_PARAM;
}
#ifdef TARGET_ARM
//
// On the Arm if we are using a block init to initialize, then we
// must force spill R4/R5/R6 so that we can use them during
// zero-initialization process.
//
int forceSpillRegCount = genCountBits(maskCalleeRegArgMask & ~regSet.rsMaskPreSpillRegs(false)) - 1;
if (forceSpillRegCount > 0)
regSet.rsSetRegsModified(RBM_R4);
if (forceSpillRegCount > 1)
regSet.rsSetRegsModified(RBM_R5);
if (forceSpillRegCount > 2)
regSet.rsSetRegsModified(RBM_R6);
#endif // TARGET_ARM
}
}
/*****************************************************************************
*
* initFltRegs -- The mask of float regs to be zeroed.
* initDblRegs -- The mask of double regs to be zeroed.
* initReg -- A zero initialized integer reg to copy from.
*
* Does best effort to move between VFP/xmm regs if one is already
* initialized to 0. (Arm Only) Else copies from the integer register which
* is slower.
*/
void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& initDblRegs, const regNumber& initReg)
{
assert(compiler->compGeneratingProlog);
// The first float/double reg that is initialized to 0. So they can be used to
// initialize the remaining registers.
regNumber fltInitReg = REG_NA;
regNumber dblInitReg = REG_NA;
// Iterate through float/double registers and initialize them to 0 or
// copy from already initialized register of the same type.
regMaskTP regMask = genRegMask(REG_FP_FIRST);
for (regNumber reg = REG_FP_FIRST; reg <= REG_FP_LAST; reg = REG_NEXT(reg), regMask <<= 1)
{
if (regMask & initFltRegs)
{
// Do we have a float register already set to 0?
if (fltInitReg != REG_NA)
{
// Copy from float.
inst_Mov(TYP_FLOAT, reg, fltInitReg, /* canSkip */ false);
}
else
{
#ifdef TARGET_ARM
// Do we have a double register initialized to 0?
if (dblInitReg != REG_NA)
{
// Copy from double.
inst_RV_RV(INS_vcvt_d2f, reg, dblInitReg, TYP_FLOAT);
}
else
{
// Copy from int.
inst_Mov(TYP_FLOAT, reg, initReg, /* canSkip */ false);
}
#elif defined(TARGET_XARCH)
// XORPS is the fastest and smallest way to initialize a XMM register to zero.
inst_RV_RV(INS_xorps, reg, reg, TYP_DOUBLE);
dblInitReg = reg;
#elif defined(TARGET_ARM64)
// We will just zero out the entire vector register. This sets it to a double/float zero value
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
#else // TARGET*
#error Unsupported or unset target architecture
#endif
fltInitReg = reg;
}
}
else if (regMask & initDblRegs)
{
// Do we have a double register already set to 0?
if (dblInitReg != REG_NA)
{
// Copy from double.
inst_Mov(TYP_DOUBLE, reg, dblInitReg, /* canSkip */ false);
}
else
{
#ifdef TARGET_ARM
// Do we have a float register initialized to 0?
if (fltInitReg != REG_NA)
{
// Copy from float.
inst_RV_RV(INS_vcvt_f2d, reg, fltInitReg, TYP_DOUBLE);
}
else
{
// Copy from int.
inst_RV_RV_RV(INS_vmov_i2d, reg, initReg, initReg, EA_8BYTE);
}
#elif defined(TARGET_XARCH)
// XORPS is the fastest and smallest way to initialize a XMM register to zero.
inst_RV_RV(INS_xorps, reg, reg, TYP_DOUBLE);
fltInitReg = reg;
#elif defined(TARGET_ARM64)
// We will just zero out the entire vector register. This sets it to a double/float zero value
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
#else // TARGET*
#error Unsupported or unset target architecture
#endif
dblInitReg = reg;
}
}
}
}
// We need a register with value zero. Zero the initReg, if necessary, and set *pInitRegZeroed if so.
// Return the register to use. On ARM64, we never touch the initReg, and always just return REG_ZR.
regNumber CodeGen::genGetZeroReg(regNumber initReg, bool* pInitRegZeroed)
{
#ifdef TARGET_ARM64
return REG_ZR;
#else // !TARGET_ARM64
if (*pInitRegZeroed == false)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg);
*pInitRegZeroed = true;
}
return initReg;
#endif // !TARGET_ARM64
}
//-----------------------------------------------------------------------------
// genZeroInitFrame: Zero any untracked pointer locals and/or initialize memory for locspace
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (genUseBlockInit)
{
genZeroInitFrameUsingBlockInit(untrLclHi, untrLclLo, initReg, pInitRegZeroed);
}
else if (genInitStkLclCnt > 0)
{
assert((genRegMask(initReg) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // initReg is not a live incoming
// argument reg
/* Initialize any lvMustInit vars on the stack */
LclVarDsc* varDsc;
unsigned varNum;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (!varDsc->lvMustInit)
{
continue;
}
// TODO-Review: I'm not sure that we're correctly handling the mustInit case for
// partially-enregistered vars in the case where we don't use a block init.
noway_assert(varDsc->lvIsInReg() || varDsc->lvOnFrame);
// lvMustInit can only be set for GC types or TYP_STRUCT types
// or when compInitMem is true
// or when in debug code
noway_assert(varTypeIsGC(varDsc->TypeGet()) || (varDsc->TypeGet() == TYP_STRUCT) ||
compiler->info.compInitMem || compiler->opts.compDbgCode);
if (!varDsc->lvOnFrame)
{
continue;
}
if ((varDsc->TypeGet() == TYP_STRUCT) && !compiler->info.compInitMem &&
(varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
// We only initialize the GC variables in the TYP_STRUCT
const unsigned slots = (unsigned)compiler->lvaLclSize(varNum) / REGSIZE_BYTES;
ClassLayout* layout = varDsc->GetLayout();
for (unsigned i = 0; i < slots; i++)
{
if (layout->IsGCPtr(i))
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE,
genGetZeroReg(initReg, pInitRegZeroed), varNum, i * REGSIZE_BYTES);
}
}
}
else
{
regNumber zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
// zero out the whole thing rounded up to a single stack slot size
unsigned lclSize = roundUp(compiler->lvaLclSize(varNum), (unsigned)sizeof(int));
unsigned i;
for (i = 0; i + REGSIZE_BYTES <= lclSize; i += REGSIZE_BYTES)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, varNum, i);
}
#ifdef TARGET_64BIT
assert(i == lclSize || (i + sizeof(int) == lclSize));
if (i != lclSize)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, varNum, i);
i += sizeof(int);
}
#endif // TARGET_64BIT
assert(i == lclSize);
}
}
assert(regSet.tmpAllFree());
for (TempDsc* tempThis = regSet.tmpListBeg(); tempThis != nullptr; tempThis = regSet.tmpListNxt(tempThis))
{
if (!varTypeIsGC(tempThis->tdTempType()))
{
continue;
}
// printf("initialize untracked spillTmp [EBP-%04X]\n", stkOffs);
inst_ST_RV(ins_Store(TYP_I_IMPL), tempThis, 0, genGetZeroReg(initReg, pInitRegZeroed), TYP_I_IMPL);
}
}
}
//-----------------------------------------------------------------------------
// genEnregisterOSRArgsAndLocals: Initialize any enregistered args or locals
// that get values from the tier0 frame.
//
// Arguments:
// initReg -- scratch register to use if needed
// pInitRegZeroed -- [IN,OUT] if init reg is zero (on entry/exit)
//
#if defined(TARGET_ARM64)
void CodeGen::genEnregisterOSRArgsAndLocals(regNumber initReg, bool* pInitRegZeroed)
#else
void CodeGen::genEnregisterOSRArgsAndLocals()
#endif
{
assert(compiler->opts.IsOSR());
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
// basic sanity checks (make sure we're OSRing the right method)
assert(patchpointInfo->NumberOfLocals() == compiler->info.compLocalsCount);
const int originalFrameSize = patchpointInfo->TotalFrameSize();
const unsigned patchpointInfoLen = patchpointInfo->NumberOfLocals();
for (unsigned varNum = 0; varNum < compiler->lvaCount; varNum++)
{
if (!compiler->lvaIsOSRLocal(varNum))
{
// This local was not part of the tier0 method's state.
// No work required.
//
continue;
}
LclVarDsc* const varDsc = compiler->lvaGetDesc(varNum);
if (!varDsc->lvIsInReg())
{
// For args/locals in memory, the OSR frame will continue to access
// that memory location. No work required.
//
JITDUMP("---OSR--- V%02u in memory\n", varNum);
continue;
}
// This local was part of the live tier0 state and is enregistered in the
// OSR method. Initialize the register from the right frame slot.
//
// If we ever enable promotion we'll need to generalize what follows to copy each
// field from the tier0 frame to its OSR home.
//
if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
// This arg or local is not live at entry to the OSR method.
// No work required.
//
JITDUMP("---OSR--- V%02u (reg) not live at entry\n", varNum);
continue;
}
int fieldOffset = 0;
unsigned lclNum = varNum;
if (varDsc->lvIsStructField)
{
lclNum = varDsc->lvParentLcl;
assert(lclNum < patchpointInfoLen);
fieldOffset = varDsc->lvFldOffset;
JITDUMP("---OSR--- V%02u is promoted field of V%02u at offset %d\n", varNum, lclNum, fieldOffset);
}
// Note we are always reading from the tier0 frame here
//
const var_types lclTyp = varDsc->GetActualRegisterType();
const emitAttr size = emitTypeSize(lclTyp);
const int stkOffs = patchpointInfo->Offset(lclNum) + fieldOffset;
#if defined(TARGET_AMD64)
// Original frames always use frame pointers, so
// stkOffs is the tier0 frame's frame-relative offset
// to the variable.
//
// We need to determine the stack or frame-pointer relative
// offset for this variable in the current frame.
//
// If current frame does not use a frame pointer, we need to
// add the SP-to-FP delta of this frame and the SP-to-FP delta
// of the original frame; that translates from this frame's
// stack pointer the old frame frame pointer.
//
// We then add the original frame's frame-pointer relative
// offset (note this offset is usually negative -- the stack
// grows down, so locals are below the frame pointer).
//
// /-----original frame-----/
// / return address /
// / saved RBP --+ / <--- Original frame ptr --+
// / ... | / |
// / ... (stkOffs) / |
// / ... | / |
// / variable --+ / |
// / ... / (original frame sp-fp delta)
// / ... / |
// /-----OSR frame ---------/ |
// / pseudo return address / --+
// / ... / |
// / ... / (this frame sp-fp delta)
// / ... / |
// /------------------------/ <--- Stack ptr --+
//
// If the current frame is using a frame pointer, we need to
// add the SP-to-FP delta of/ the original frame and then add
// the original frame's frame-pointer relative offset.
//
// /-----original frame-----/
// / return address /
// / saved RBP --+ / <--- Original frame ptr --+
// / ... | / |
// / ... (stkOffs) / |
// / ... | / |
// / variable --+ / |
// / ... / (original frame sp-fp delta)
// / ... / |
// /-----OSR frame ---------/ |
// / pseudo return address / --+
// / saved RBP / <--- Frame ptr --+
// / ... /
// / ... /
// / ... /
// /------------------------/
//
int offset = originalFrameSize + stkOffs;
if (isFramePointerUsed())
{
// also adjust for saved RPB on this frame
offset += TARGET_POINTER_SIZE;
}
else
{
offset += genSPtoFPdelta();
}
JITDUMP("---OSR--- V%02u (reg) old rbp offset %d old frame %d this frame sp-fp %d new offset %d (%02xH)\n",
varNum, stkOffs, originalFrameSize, genSPtoFPdelta(), offset, offset);
GetEmitter()->emitIns_R_AR(ins_Load(lclTyp), size, varDsc->GetRegNum(), genFramePointerReg(), offset);
#elif defined(TARGET_ARM64)
// Patchpoint offset is from top of Tier0 frame
//
// We need to determine the frame-pointer relative
// offset for this variable in the osr frame.
//
// First add the Tier0 frame size
//
const int tier0FrameSize = compiler->info.compPatchpointInfo->TotalFrameSize();
// then add the OSR frame size
//
const int osrFrameSize = genTotalFrameSize();
// then subtract OSR SP-FP delta
//
const int osrSpToFpDelta = genSPtoFPdelta();
// | => tier0 top of frame relative
// | + => tier0 bottom of frame relative
// | | + => osr bottom of frame (sp) relative
// | | | - => osr fp relative
// | | | |
const int offset = stkOffs + tier0FrameSize + osrFrameSize - osrSpToFpDelta;
JITDUMP("---OSR--- V%02u (reg) Tier0 virtual offset %d OSR frame size %d OSR sp-fp "
"delta %d total offset %d (0x%x)\n",
varNum, stkOffs, osrFrameSize, osrSpToFpDelta, offset, offset);
genInstrWithConstant(ins_Load(lclTyp), size, varDsc->GetRegNum(), genFramePointerReg(), offset, initReg);
*pInitRegZeroed = false;
#endif
}
}
/*-----------------------------------------------------------------------------
*
* Save the generic context argument.
*
* We need to do this within the "prolog" in case anyone tries to inspect
* the param-type-arg/this (which can be done after the prolog) using
* ICodeManager::GetParamTypeArg().
*/
void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
const bool reportArg = compiler->lvaReportParamTypeArg();
if (compiler->opts.IsOSR())
{
PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
if (reportArg)
{
// OSR method will use Tier0 slot to report context arg.
//
assert(ppInfo->HasGenericContextArgOffset());
JITDUMP("OSR method will use Tier0 frame slot for generics context arg.\n");
}
else if (compiler->lvaKeepAliveAndReportThis())
{
// OSR method will use Tier0 slot to report `this` as context.
//
assert(ppInfo->HasKeptAliveThis());
JITDUMP("OSR method will use Tier0 frame slot for generics context `this`.\n");
}
return;
}
// We should report either generic context arg or "this" when used so.
if (!reportArg)
{
#ifndef JIT32_GCENCODER
if (!compiler->lvaKeepAliveAndReportThis())
#endif
{
return;
}
}
// For JIT32_GCENCODER, we won't be here if reportArg is false.
unsigned contextArg = reportArg ? compiler->info.compTypeCtxtArg : compiler->info.compThisArg;
noway_assert(contextArg != BAD_VAR_NUM);
LclVarDsc* varDsc = compiler->lvaGetDesc(contextArg);
// We are still in the prolog and compiler->info.compTypeCtxtArg has not been
// moved to its final home location. So we need to use it from the
// incoming location.
regNumber reg;
bool isPrespilledForProfiling = false;
#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED)
isPrespilledForProfiling =
compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(contextArg, regSet.rsMaskPreSpillRegs(false));
#endif
// Load from the argument register only if it is not prespilled.
if (compiler->lvaIsRegArgument(contextArg) && !isPrespilledForProfiling)
{
reg = varDsc->GetArgReg();
}
else
{
if (isFramePointerUsed())
{
#if defined(TARGET_ARM)
// GetStackOffset() is always valid for incoming stack-arguments, even if the argument
// will become enregistered.
// On Arm compiler->compArgSize doesn't include r11 and lr sizes and hence we need to add 2*REGSIZE_BYTES
noway_assert((2 * REGSIZE_BYTES <= varDsc->GetStackOffset()) &&
(size_t(varDsc->GetStackOffset()) < compiler->compArgSize + 2 * REGSIZE_BYTES));
#else
// GetStackOffset() is always valid for incoming stack-arguments, even if the argument
// will become enregistered.
noway_assert((0 < varDsc->GetStackOffset()) && (size_t(varDsc->GetStackOffset()) < compiler->compArgSize));
#endif
}
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
reg = initReg;
*pInitRegZeroed = false;
// mov reg, [compiler->info.compTypeCtxtArg]
GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
varDsc->GetStackOffset());
regSet.verifyRegUsed(reg);
}
#if defined(TARGET_ARM64)
genInstrWithConstant(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset(), rsGetRsvdReg());
#elif defined(TARGET_ARM)
// ARM's emitIns_R_R_I automatically uses the reserved register if necessary.
GetEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset());
#else // !ARM64 !ARM
// mov [ebp-lvaCachedGenericContextArgOffset()], reg
GetEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset());
#endif // !ARM64 !ARM
}
/*****************************************************************************
Esp frames :
----------
These instructions are just a reordering of the instructions used today.
push ebp
push esi
push edi
push ebx
sub esp, LOCALS_SIZE / push dummyReg if LOCALS_SIZE=sizeof(void*)
...
add esp, LOCALS_SIZE / pop dummyReg
pop ebx
pop edi
pop esi
pop ebp
ret
Ebp frames :
----------
The epilog does "add esp, LOCALS_SIZE" instead of "mov ebp, esp".
Everything else is similar, though in a different order.
The security object will no longer be at a fixed offset. However, the
offset can still be determined by looking up the GC-info and determining
how many callee-saved registers are pushed.
push ebp
mov ebp, esp
push esi
push edi
push ebx
sub esp, LOCALS_SIZE / push dummyReg if LOCALS_SIZE=sizeof(void*)
...
add esp, LOCALS_SIZE / pop dummyReg
pop ebx
pop edi
pop esi
(mov esp, ebp if there are no callee-saved registers)
pop ebp
ret
Double-aligned frame :
--------------------
LOCALS_SIZE_ADJUSTED needs to include an unused DWORD if an odd number
of callee-saved registers are pushed on the stack so that the locals
themselves are qword-aligned. The instructions are the same as today,
just in a different order.
push ebp
mov ebp, esp
and esp, 0xFFFFFFFC
push esi
push edi
push ebx
sub esp, LOCALS_SIZE_ADJUSTED / push dummyReg if LOCALS_SIZE=sizeof(void*)
...
add esp, LOCALS_SIZE_ADJUSTED / pop dummyReg
pop ebx
pop edi
pop esi
pop ebp
mov esp, ebp
pop ebp
ret
localloc (with ebp) frames :
--------------------------
The instructions are the same as today, just in a different order.
Also, today the epilog does "lea esp, [ebp-LOCALS_SIZE-calleeSavedRegsPushedSize]"
which will change to "lea esp, [ebp-calleeSavedRegsPushedSize]".
push ebp
mov ebp, esp
push esi
push edi
push ebx
sub esp, LOCALS_SIZE / push dummyReg if LOCALS_SIZE=sizeof(void*)
...
lea esp, [ebp-calleeSavedRegsPushedSize]
pop ebx
pop edi
pop esi
(mov esp, ebp if there are no callee-saved registers)
pop ebp
ret
*****************************************************************************/
/*****************************************************************************
*
* Reserve space for a function prolog.
*/
void CodeGen::genReserveProlog(BasicBlock* block)
{
assert(block != nullptr);
JITDUMP("Reserving prolog IG for block " FMT_BB "\n", block->bbNum);
/* Nothing is live on entry to the prolog */
GetEmitter()->emitCreatePlaceholderIG(IGPT_PROLOG, block, VarSetOps::MakeEmpty(compiler), 0, 0, false);
}
/*****************************************************************************
*
* Reserve space for a function epilog.
*/
void CodeGen::genReserveEpilog(BasicBlock* block)
{
regMaskTP gcrefRegsArg = gcInfo.gcRegGCrefSetCur;
regMaskTP byrefRegsArg = gcInfo.gcRegByrefSetCur;
/* The return value is special-cased: make sure it goes live for the epilog */
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
if (IsFullPtrRegMapRequired() && !jmpEpilog)
{
if (varTypeIsGC(compiler->info.compRetNativeType))
{
noway_assert(genTypeStSz(compiler->info.compRetNativeType) == genTypeStSz(TYP_I_IMPL));
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
switch (compiler->info.compRetNativeType)
{
case TYP_REF:
gcrefRegsArg |= RBM_INTRET;
break;
case TYP_BYREF:
byrefRegsArg |= RBM_INTRET;
break;
default:
break;
}
JITDUMP("Extending return value GC liveness to epilog\n");
}
}
JITDUMP("Reserving epilog IG for block " FMT_BB "\n", block->bbNum);
assert(block != nullptr);
const VARSET_TP& gcrefVarsArg(GetEmitter()->emitThisGCrefVars);
bool last = (block->bbNext == nullptr);
GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last);
}
#if defined(FEATURE_EH_FUNCLETS)
/*****************************************************************************
*
* Reserve space for a funclet prolog.
*/
void CodeGen::genReserveFuncletProlog(BasicBlock* block)
{
assert(block != nullptr);
/* Currently, no registers are live on entry to the prolog, except maybe
the exception object. There might be some live stack vars, but they
cannot be accessed until after the frame pointer is re-established.
In order to potentially prevent emitting a death before the prolog
and a birth right after it, we just report it as live during the
prolog, and rely on the prolog being non-interruptible. Trust
genCodeForBBlist to correctly initialize all the sets.
We might need to relax these asserts if the VM ever starts
restoring any registers, then we could have live-in reg vars...
*/
noway_assert((gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT) == gcInfo.gcRegGCrefSetCur);
noway_assert(gcInfo.gcRegByrefSetCur == 0);
JITDUMP("Reserving funclet prolog IG for block " FMT_BB "\n", block->bbNum);
GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_PROLOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false);
}
/*****************************************************************************
*
* Reserve space for a funclet epilog.
*/
void CodeGen::genReserveFuncletEpilog(BasicBlock* block)
{
assert(block != nullptr);
JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum);
bool last = (block->bbNext == nullptr);
GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, last);
}
#endif // FEATURE_EH_FUNCLETS
/*****************************************************************************
* Finalize the frame size and offset assignments.
*
* No changes can be made to the modified register set after this, since that can affect how many
* callee-saved registers get saved.
*/
void CodeGen::genFinalizeFrame()
{
JITDUMP("Finalizing stack frame\n");
// Initializations need to happen based on the var locations at the start
// of the first basic block, so load those up. In particular, the determination
// of whether or not to use block init in the prolog is dependent on the variable
// locations on entry to the function.
compiler->m_pLinearScan->recordVarLocationsAtStartOfBB(compiler->fgFirstBB);
genCheckUseBlockInit();
// Set various registers as "modified" for special code generation scenarios: Edit & Continue, P/Invoke calls, etc.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
if (compiler->compTailCallUsed)
{
// If we are generating a helper-based tailcall, we've set the tailcall helper "flags"
// argument to "1", indicating to the tailcall helper that we've saved the callee-saved
// registers (ebx, esi, edi). So, we need to make sure all the callee-saved registers
// actually get saved.
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED);
}
#endif // TARGET_X86
#ifdef TARGET_ARM
// Make sure that callee-saved registers used by call to a stack probing helper generated are pushed on stack.
if (compiler->compLclFrameSize >= compiler->eeGetPageSize())
{
regSet.rsSetRegsModified(RBM_STACK_PROBE_HELPER_ARG | RBM_STACK_PROBE_HELPER_CALL_TARGET |
RBM_STACK_PROBE_HELPER_TRASH);
}
// If there are any reserved registers, add them to the modified set.
if (regSet.rsMaskResvd != RBM_NONE)
{
regSet.rsSetRegsModified(regSet.rsMaskResvd);
}
#endif // TARGET_ARM
#ifdef DEBUG
if (verbose)
{
printf("Modified regs: ");
dspRegMask(regSet.rsGetModifiedRegsMask());
printf("\n");
}
#endif // DEBUG
// Set various registers as "modified" for special code generation scenarios: Edit & Continue, P/Invoke calls, etc.
if (compiler->opts.compDbgEnC)
{
// We always save FP.
noway_assert(isFramePointerUsed());
#ifdef TARGET_AMD64
// On x64 we always save exactly RBP, RSI and RDI for EnC.
regMaskTP okRegs = (RBM_CALLEE_TRASH | RBM_FPBASE | RBM_RSI | RBM_RDI);
regSet.rsSetRegsModified(RBM_RSI | RBM_RDI);
noway_assert((regSet.rsGetModifiedRegsMask() & ~okRegs) == 0);
#else // !TARGET_AMD64
// On x86 we save all callee saved regs so the saved reg area size is consistent
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE);
#endif // !TARGET_AMD64
}
/* If we have any pinvoke calls, we might potentially trash everything */
if (compiler->compMethodRequiresPInvokeFrame())
{
noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE);
}
#ifdef UNIX_AMD64_ABI
// On Unix x64 we also save R14 and R15 for ELT profiler hook generation.
if (compiler->compIsProfilerHookNeeded())
{
regSet.rsSetRegsModified(RBM_PROFILER_ENTER_ARG_0 | RBM_PROFILER_ENTER_ARG_1);
}
#endif
/* Count how many callee-saved registers will actually be saved (pushed) */
// EBP cannot be (directly) modified for EBP frame and double-aligned frames
noway_assert(!doubleAlignOrFramePointerUsed() || !regSet.rsRegsModified(RBM_FPBASE));
#if ETW_EBP_FRAMED
// EBP cannot be (directly) modified
noway_assert(!regSet.rsRegsModified(RBM_FPBASE));
#endif
regMaskTP maskCalleeRegsPushed = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
#ifdef TARGET_ARMARCH
if (isFramePointerUsed())
{
// For a FP based frame we have to push/pop the FP register
//
maskCalleeRegsPushed |= RBM_FPBASE;
// This assert check that we are not using REG_FP
// as both the frame pointer and as a codegen register
//
assert(!regSet.rsRegsModified(RBM_FPBASE));
}
// we always push LR. See genPushCalleeSavedRegisters
//
maskCalleeRegsPushed |= RBM_LR;
#if defined(TARGET_ARM)
// TODO-ARM64-Bug?: enable some variant of this for FP on ARM64?
regMaskTP maskPushRegsFloat = maskCalleeRegsPushed & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = maskCalleeRegsPushed & ~maskPushRegsFloat;
if ((maskPushRegsFloat != RBM_NONE) ||
(compiler->opts.MinOpts() && (regSet.rsMaskResvd & maskCalleeRegsPushed & RBM_OPT_RSVD)))
{
// Here we try to keep stack double-aligned before the vpush
if ((genCountBits(regSet.rsMaskPreSpillRegs(true) | maskPushRegsInt) % 2) != 0)
{
regNumber extraPushedReg = REG_R4;
while (maskPushRegsInt & genRegMask(extraPushedReg))
{
extraPushedReg = REG_NEXT(extraPushedReg);
}
if (extraPushedReg < REG_R11)
{
maskPushRegsInt |= genRegMask(extraPushedReg);
regSet.rsSetRegsModified(genRegMask(extraPushedReg));
}
}
maskCalleeRegsPushed = maskPushRegsInt | maskPushRegsFloat;
}
// We currently only expect to push/pop consecutive FP registers
// and these have to be double-sized registers as well.
// Here we will insure that maskPushRegsFloat obeys these requirements.
//
if (maskPushRegsFloat != RBM_NONE)
{
regMaskTP contiguousMask = genRegMaskFloat(REG_F16, TYP_DOUBLE);
while (maskPushRegsFloat > contiguousMask)
{
contiguousMask <<= 2;
contiguousMask |= genRegMaskFloat(REG_F16, TYP_DOUBLE);
}
if (maskPushRegsFloat != contiguousMask)
{
regMaskTP maskExtraRegs = contiguousMask - maskPushRegsFloat;
maskPushRegsFloat |= maskExtraRegs;
regSet.rsSetRegsModified(maskExtraRegs);
maskCalleeRegsPushed |= maskExtraRegs;
}
}
#endif // TARGET_ARM
#endif // TARGET_ARMARCH
#if defined(TARGET_XARCH)
// Compute the count of callee saved float regs saved on stack.
// On Amd64 we push only integer regs. Callee saved float (xmm6-xmm15)
// regs are stack allocated and preserved in their stack locations.
compiler->compCalleeFPRegsSavedMask = maskCalleeRegsPushed & RBM_FLT_CALLEE_SAVED;
maskCalleeRegsPushed &= ~RBM_FLT_CALLEE_SAVED;
#endif // defined(TARGET_XARCH)
compiler->compCalleeRegsPushed = genCountBits(maskCalleeRegsPushed);
#ifdef DEBUG
if (verbose)
{
printf("Callee-saved registers pushed: %d ", compiler->compCalleeRegsPushed);
dspRegMask(maskCalleeRegsPushed);
printf("\n");
}
#endif // DEBUG
/* Assign the final offsets to things living on the stack frame */
compiler->lvaAssignFrameOffsets(Compiler::FINAL_FRAME_LAYOUT);
/* We want to make sure that the prolog size calculated here is accurate
(that is instructions will not shrink because of conservative stack
frame approximations). We do this by filling in the correct size
here (where we have committed to the final numbers for the frame offsets)
This will ensure that the prolog size is always correct
*/
GetEmitter()->emitMaxTmpSize = regSet.tmpGetTotalSize();
#ifdef DEBUG
if (compiler->opts.dspCode || compiler->opts.disAsm || compiler->opts.disAsm2 || verbose)
{
compiler->lvaTableDump();
}
#endif
}
/*****************************************************************************
*
* Generates code for a function prolog.
*
* NOTE REGARDING CHANGES THAT IMPACT THE DEBUGGER:
*
* The debugger relies on decoding ARM instructions to be able to successfully step through code. It does not
* implement decoding all ARM instructions. It only implements decoding the instructions which the JIT emits, and
* only instructions which result in control not going to the next instruction. Basically, any time execution would
* not continue at the next instruction (such as B, BL, BX, BLX, POP{pc}, etc.), the debugger has to be able to
* decode that instruction. If any of this is changed on ARM, the debugger team needs to be notified so that it
* can ensure stepping isn't broken. This is also a requirement for x86 and amd64.
*
* If any changes are made in the prolog, epilog, calls, returns, and branches, it is a good idea to notify the
* debugger team to ensure that stepping still works.
*
* ARM stepping code is here: debug\ee\arm\armwalker.cpp, vm\arm\armsinglestepper.cpp.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
void CodeGen::genFnProlog()
{
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
compiler->funSetCurrentFunc(0);
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFnProlog()\n");
}
#endif
#ifdef DEBUG
genInterruptibleUsed = true;
#endif
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT);
/* Ready to start on the prolog proper */
GetEmitter()->emitBegProlog();
compiler->unwindBegProlog();
// Do this so we can put the prolog instruction group ahead of
// other instruction groups
genIPmappingAddToFront(IPmappingDscKind::Prolog, DebugInfo(), true);
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n__prolog:\n");
}
#endif
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
// Create new scopes for the method-parameters for the prolog-block.
psiBegProlog();
}
#if defined(TARGET_ARM64)
// For arm64 OSR, emit a "phantom prolog" to account for the actions taken
// in the tier0 frame that impact FP and SP on entry to the OSR method.
//
// x64 handles this differently; the phantom prolog unwind is emitted in
// genOSRRecordTier0CalleeSavedRegistersAndFrame.
//
if (compiler->opts.IsOSR())
{
PatchpointInfo* patchpointInfo = compiler->info.compPatchpointInfo;
const int tier0FrameSize = patchpointInfo->TotalFrameSize();
// SP is tier0 method's SP.
compiler->unwindAllocStack(tier0FrameSize);
}
#endif // defined(TARGET_ARM64)
#ifdef DEBUG
if (compiler->compJitHaltMethod())
{
/* put a nop first because the debugger and other tools are likely to
put an int3 at the beginning and we don't want to confuse them */
instGen(INS_nop);
instGen(INS_BREAKPOINT);
#ifdef TARGET_ARMARCH
// Avoid asserts in the unwind info because these instructions aren't accounted for.
compiler->unwindPadding();
#endif // TARGET_ARMARCH
}
#endif // DEBUG
#if defined(FEATURE_EH_FUNCLETS) && defined(DEBUG)
// We cannot force 0-initialization of the PSPSym
// as it will overwrite the real value
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(compiler->lvaPSPSym);
assert(!varDsc->lvMustInit);
}
#endif // FEATURE_EH_FUNCLETS && DEBUG
/*-------------------------------------------------------------------------
*
* Record the stack frame ranges that will cover all of the tracked
* and untracked pointer variables.
* Also find which registers will need to be zero-initialized.
*
* 'initRegs': - Generally, enregistered variables should not need to be
* zero-inited. They only need to be zero-inited when they
* have a possibly uninitialized read on some control
* flow path. Apparently some of the IL_STUBs that we
* generate have this property.
*/
int untrLclLo = +INT_MAX;
int untrLclHi = -INT_MAX;
// 'hasUntrLcl' is true if there are any stack locals which must be init'ed.
// Note that they may be tracked, but simply not allocated to a register.
bool hasUntrLcl = false;
int GCrefLo = +INT_MAX;
int GCrefHi = -INT_MAX;
bool hasGCRef = false;
regMaskTP initRegs = RBM_NONE; // Registers which must be init'ed.
regMaskTP initFltRegs = RBM_NONE; // FP registers which must be init'ed.
regMaskTP initDblRegs = RBM_NONE;
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (varDsc->lvIsParam && !varDsc->lvIsRegArg)
{
continue;
}
if (!varDsc->lvIsInReg() && !varDsc->lvOnFrame)
{
noway_assert(varDsc->lvRefCnt() == 0);
continue;
}
signed int loOffs = varDsc->GetStackOffset();
signed int hiOffs = varDsc->GetStackOffset() + compiler->lvaLclSize(varNum);
/* We need to know the offset range of tracked stack GC refs */
/* We assume that the GC reference can be anywhere in the TYP_STRUCT */
if (varDsc->HasGCPtr() && varDsc->lvTrackedNonStruct() && varDsc->lvOnFrame)
{
// For fields of PROMOTION_TYPE_DEPENDENT type of promotion, they should have been
// taken care of by the parent struct.
if (!compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
hasGCRef = true;
if (loOffs < GCrefLo)
{
GCrefLo = loOffs;
}
if (hiOffs > GCrefHi)
{
GCrefHi = hiOffs;
}
}
}
/* For lvMustInit vars, gather pertinent info */
if (!varDsc->lvMustInit)
{
continue;
}
bool isInReg = varDsc->lvIsInReg();
bool isInMemory = !isInReg || varDsc->lvLiveInOutOfHndlr;
// Note that 'lvIsInReg()' will only be accurate for variables that are actually live-in to
// the first block. This will include all possibly-uninitialized locals, whose liveness
// will naturally propagate up to the entry block. However, we also set 'lvMustInit' for
// locals that are live-in to a finally block, and those may not be live-in to the first
// block. For those, we don't want to initialize the register, as it will not actually be
// occupying it on entry.
if (isInReg)
{
if (compiler->lvaEnregEHVars && varDsc->lvLiveInOutOfHndlr)
{
isInReg = VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex);
}
else
{
assert(VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex));
}
}
if (isInReg)
{
regNumber regForVar = varDsc->GetRegNum();
regMaskTP regMask = genRegMask(regForVar);
if (!genIsValidFloatReg(regForVar))
{
initRegs |= regMask;
if (varTypeIsMultiReg(varDsc))
{
if (varDsc->GetOtherReg() != REG_STK)
{
initRegs |= genRegMask(varDsc->GetOtherReg());
}
else
{
/* Upper DWORD is on the stack, and needs to be inited */
loOffs += sizeof(int);
goto INIT_STK;
}
}
}
else if (varDsc->TypeGet() == TYP_DOUBLE)
{
initDblRegs |= regMask;
}
else
{
initFltRegs |= regMask;
}
}
if (isInMemory)
{
INIT_STK:
hasUntrLcl = true;
if (loOffs < untrLclLo)
{
untrLclLo = loOffs;
}
if (hiOffs > untrLclHi)
{
untrLclHi = hiOffs;
}
}
}
/* Don't forget about spill temps that hold pointers */
assert(regSet.tmpAllFree());
for (TempDsc* tempThis = regSet.tmpListBeg(); tempThis != nullptr; tempThis = regSet.tmpListNxt(tempThis))
{
if (!varTypeIsGC(tempThis->tdTempType()))
{
continue;
}
signed int loOffs = tempThis->tdTempOffs();
signed int hiOffs = loOffs + TARGET_POINTER_SIZE;
// If there is a frame pointer used, due to frame pointer chaining it will point to the stored value of the
// previous frame pointer. Thus, stkOffs can't be zero.
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_AMD64)
// However, on amd64 there is no requirement to chain frame pointers.
noway_assert(!isFramePointerUsed() || loOffs != 0);
#endif // !defined(TARGET_AMD64)
// printf(" Untracked tmp at [EBP-%04X]\n", -stkOffs);
hasUntrLcl = true;
if (loOffs < untrLclLo)
{
untrLclLo = loOffs;
}
if (hiOffs > untrLclHi)
{
untrLclHi = hiOffs;
}
}
// TODO-Cleanup: Add suitable assert for the OSR case.
assert(compiler->opts.IsOSR() || ((genInitStkLclCnt > 0) == hasUntrLcl));
#ifdef DEBUG
if (verbose)
{
if (genInitStkLclCnt > 0)
{
printf("Found %u lvMustInit int-sized stack slots, frame offsets %d through %d\n", genInitStkLclCnt,
-untrLclLo, -untrLclHi);
}
}
#endif
#ifdef TARGET_ARM
// On the ARM we will spill any incoming struct args in the first instruction in the prolog
// Ditto for all enregistered user arguments in a varargs method.
// These registers will be available to use for the initReg. We just remove
// all of these registers from the rsCalleeRegArgMaskLiveIn.
//
intRegState.rsCalleeRegArgMaskLiveIn &= ~regSet.rsMaskPreSpillRegs(false);
#endif
/* Choose the register to use for zero initialization */
regNumber initReg = REG_SCRATCH; // Unless we find a better register below
// Track if initReg holds non-zero value. Start conservative and assume it has non-zero value.
// If initReg is ever set to zero, this variable is set to true and zero initializing initReg
// will be skipped.
bool initRegZeroed = false;
regMaskTP excludeMask = intRegState.rsCalleeRegArgMaskLiveIn;
regMaskTP tempMask;
// We should not use the special PINVOKE registers as the initReg
// since they are trashed by the jithelper call to setup the PINVOKE frame
if (compiler->compMethodRequiresPInvokeFrame())
{
excludeMask |= RBM_PINVOKE_FRAME;
assert((!compiler->opts.ShouldUsePInvokeHelpers()) || (compiler->info.compLvFrameListRoot == BAD_VAR_NUM));
if (!compiler->opts.ShouldUsePInvokeHelpers())
{
excludeMask |= (RBM_PINVOKE_TCB | RBM_PINVOKE_SCRATCH);
// We also must exclude the register used by compLvFrameListRoot when it is enregistered
//
const LclVarDsc* varDsc = compiler->lvaGetDesc(compiler->info.compLvFrameListRoot);
if (varDsc->lvRegister)
{
excludeMask |= genRegMask(varDsc->GetRegNum());
}
}
}
#ifdef TARGET_ARM
// If we have a variable sized frame (compLocallocUsed is true)
// then using REG_SAVED_LOCALLOC_SP in the prolog is not allowed
if (compiler->compLocallocUsed)
{
excludeMask |= RBM_SAVED_LOCALLOC_SP;
}
#endif // TARGET_ARM
const bool isRoot = (compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
#ifdef TARGET_AMD64
const bool isOSRx64Root = isRoot && compiler->opts.IsOSR();
#else
const bool isOSRx64Root = false;
#endif // TARGET_AMD64
tempMask = initRegs & ~excludeMask & ~regSet.rsMaskResvd;
if (tempMask != RBM_NONE)
{
// We will use one of the registers that we were planning to zero init anyway.
// We pick the lowest register number.
tempMask = genFindLowestBit(tempMask);
initReg = genRegNumFromMask(tempMask);
}
// Next we prefer to use one of the unused argument registers.
// If they aren't available we use one of the caller-saved integer registers.
else
{
tempMask = regSet.rsGetModifiedRegsMask() & RBM_ALLINT & ~excludeMask & ~regSet.rsMaskResvd;
if (tempMask != RBM_NONE)
{
// We pick the lowest register number
tempMask = genFindLowestBit(tempMask);
initReg = genRegNumFromMask(tempMask);
}
}
#if defined(TARGET_AMD64)
// For x64 OSR root frames, we can't use any as of yet unsaved
// callee save as initReg, as we defer saving these until later in
// the prolog, and we don't have normal arg regs.
if (isOSRx64Root)
{
initReg = REG_SCRATCH; // REG_EAX
}
#elif defined(TARGET_ARM64)
// For arm64 OSR root frames, we may need a scratch register for large
// offset addresses. Use a register that won't be allocated.
//
if (isRoot && compiler->opts.IsOSR())
{
initReg = REG_IP1;
}
#endif
noway_assert(!compiler->compMethodRequiresPInvokeFrame() || (initReg != REG_PINVOKE_FRAME));
#if defined(TARGET_AMD64)
// If we are a varargs call, in order to set up the arguments correctly this
// must be done in a 2 step process. As per the x64 ABI:
// a) The caller sets up the argument shadow space (just before the return
// address, 4 pointer sized slots).
// b) The callee is responsible to home the arguments on the shadow space
// provided by the caller.
// This way, the varargs iterator will be able to retrieve the
// call arguments properly since both the arg regs and the stack allocated
// args will be contiguous.
//
// OSR methods can skip this, as the setup is done by the orignal method.
if (compiler->info.compIsVarArgs && !compiler->opts.IsOSR())
{
GetEmitter()->spillIntArgRegsToShadowSlots();
}
#endif // TARGET_AMD64
#ifdef TARGET_ARM
/*-------------------------------------------------------------------------
*
* Now start emitting the part of the prolog which sets up the frame
*/
if (regSet.rsMaskPreSpillRegs(true) != RBM_NONE)
{
inst_IV(INS_push, (int)regSet.rsMaskPreSpillRegs(true));
compiler->unwindPushMaskInt(regSet.rsMaskPreSpillRegs(true));
}
#endif // TARGET_ARM
unsigned extraFrameSize = 0;
#ifdef TARGET_XARCH
#ifdef TARGET_AMD64
if (isOSRx64Root)
{
// Account for the Tier0 callee saves
//
genOSRRecordTier0CalleeSavedRegistersAndFrame();
// We don't actually push any callee saves on the OSR frame,
// but we still reserve space, so account for this when
// allocating the local frame.
//
extraFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
}
#endif // TARGET_ARM64
if (doubleAlignOrFramePointerUsed())
{
// OSR methods handle "saving" FP specially.
//
// For epilog and unwind, we restore the RBP saved by the
// Tier0 method. The save we do here is just to set up a
// proper RBP-based frame chain link.
//
if (isOSRx64Root && isFramePointerUsed())
{
GetEmitter()->emitIns_R_AR(INS_mov, EA_8BYTE, initReg, REG_FPBASE, 0);
inst_RV(INS_push, initReg, TYP_REF);
initRegZeroed = false;
// We account for the SP movement in unwind, but not for
// the "save" of RBP.
//
compiler->unwindAllocStack(REGSIZE_BYTES);
}
else
{
inst_RV(INS_push, REG_FPBASE, TYP_REF);
compiler->unwindPush(REG_FPBASE);
}
#ifdef USING_SCOPE_INFO
psiAdjustStackLevel(REGSIZE_BYTES);
#endif // USING_SCOPE_INFO
#ifndef TARGET_AMD64 // On AMD64, establish the frame pointer after the "sub rsp"
genEstablishFramePointer(0, /*reportUnwindData*/ true);
#endif // !TARGET_AMD64
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
{
noway_assert(isFramePointerUsed() == false);
noway_assert(!regSet.rsRegsModified(RBM_FPBASE)); /* Trashing EBP is out. */
inst_RV_IV(INS_AND, REG_SPBASE, -8, EA_PTRSIZE);
}
#endif // DOUBLE_ALIGN
}
#endif // TARGET_XARCH
#ifdef TARGET_ARM64
genPushCalleeSavedRegisters(initReg, &initRegZeroed);
#else // !TARGET_ARM64
if (!isOSRx64Root)
{
genPushCalleeSavedRegisters();
}
#endif // !TARGET_ARM64
#ifdef TARGET_ARM
bool needToEstablishFP = false;
int afterLclFrameSPtoFPdelta = 0;
if (doubleAlignOrFramePointerUsed())
{
needToEstablishFP = true;
// If the local frame is small enough, we establish the frame pointer after the OS-reported prolog.
// This makes the prolog and epilog match, giving us smaller unwind data. If the frame size is
// too big, we go ahead and do it here.
int SPtoFPdelta = (compiler->compCalleeRegsPushed - 2) * REGSIZE_BYTES;
afterLclFrameSPtoFPdelta = SPtoFPdelta + compiler->compLclFrameSize;
if (!arm_Valid_Imm_For_Add_SP(afterLclFrameSPtoFPdelta))
{
// Oh well, it looks too big. Go ahead and establish the frame pointer here.
genEstablishFramePointer(SPtoFPdelta, /*reportUnwindData*/ true);
needToEstablishFP = false;
}
}
#endif // TARGET_ARM
//-------------------------------------------------------------------------
//
// Subtract the local frame size from SP.
//
//-------------------------------------------------------------------------
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_ARM64
regMaskTP maskStackAlloc = RBM_NONE;
#ifdef TARGET_ARM
maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize + extraFrameSize,
regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED);
#endif // TARGET_ARM
if (maskStackAlloc == RBM_NONE)
{
genAllocLclFrame(compiler->compLclFrameSize + extraFrameSize, initReg, &initRegZeroed,
intRegState.rsCalleeRegArgMaskLiveIn);
}
#endif // !TARGET_ARM64
#ifdef TARGET_AMD64
// For x64 OSR we have to finish saving int callee saves.
//
if (isOSRx64Root)
{
genOSRSaveRemainingCalleeSavedRegisters();
}
#endif // TARGET_AMD64
//-------------------------------------------------------------------------
#ifdef TARGET_ARM
if (compiler->compLocallocUsed)
{
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, REG_SAVED_LOCALLOC_SP, REG_SPBASE, /* canSkip */ false);
regSet.verifyRegUsed(REG_SAVED_LOCALLOC_SP);
compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0);
}
#endif // TARGET_ARMARCH
#if defined(TARGET_XARCH)
// Preserve callee saved float regs to stack.
genPreserveCalleeSavedFltRegs(compiler->compLclFrameSize);
#endif // defined(TARGET_XARCH)
#ifdef TARGET_AMD64
// Establish the AMD64 frame pointer after the OS-reported prolog.
if (doubleAlignOrFramePointerUsed())
{
const bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
genEstablishFramePointer(compiler->codeGen->genSPtoFPdelta(), reportUnwindData);
}
#endif // TARGET_AMD64
//-------------------------------------------------------------------------
//
// This is the end of the OS-reported prolog for purposes of unwinding
//
//-------------------------------------------------------------------------
#ifdef TARGET_ARM
if (needToEstablishFP)
{
genEstablishFramePointer(afterLclFrameSPtoFPdelta, /*reportUnwindData*/ false);
needToEstablishFP = false; // nobody uses this later, but set it anyway, just to be explicit
}
#endif // TARGET_ARM
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM,
compiler->lvaStubArgumentVar, 0);
assert(intRegState.rsCalleeRegArgMaskLiveIn & RBM_SECRET_STUB_PARAM);
// It's no longer live; clear it out so it can be used after this in the prolog
intRegState.rsCalleeRegArgMaskLiveIn &= ~RBM_SECRET_STUB_PARAM;
}
//
// Zero out the frame as needed
//
genZeroInitFrame(untrLclHi, untrLclLo, initReg, &initRegZeroed);
#if defined(FEATURE_EH_FUNCLETS)
genSetPSPSym(initReg, &initRegZeroed);
#else // !FEATURE_EH_FUNCLETS
// when compInitMem is true the genZeroInitFrame will zero out the shadow SP slots
if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem)
{
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE;
// Zero out the slot for nesting level 0
unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE;
if (!initRegZeroed)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg);
initRegZeroed = true;
}
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar,
firstSlotOffs);
}
#endif // !FEATURE_EH_FUNCLETS
genReportGenericContextArg(initReg, &initRegZeroed);
#ifdef JIT32_GCENCODER
// Initialize the LocalAllocSP slot if there is localloc in the function.
if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
}
#endif // JIT32_GCENCODER
// Set up the GS security cookie
genSetGSSecurityCookie(initReg, &initRegZeroed);
#ifdef PROFILING_SUPPORTED
// Insert a function entry callback for profiling, if requested.
// OSR methods aren't called, so don't have enter hooks.
if (!compiler->opts.IsOSR())
{
genProfilingEnterCallback(initReg, &initRegZeroed);
}
#endif // PROFILING_SUPPORTED
// For OSR we may have a zero-length prolog. That's not supported
// when the method must report a generics context,/ so add a nop if so.
//
if (compiler->opts.IsOSR() && (GetEmitter()->emitGetPrologOffsetEstimate() == 0) &&
(compiler->lvaReportParamTypeArg() || compiler->lvaKeepAliveAndReportThis()))
{
JITDUMP("OSR: prolog was zero length and has generic context to report: adding nop to pad prolog.\n");
instGen(INS_nop);
}
if (!GetInterruptible())
{
// The 'real' prolog ends here for non-interruptible methods.
// For fully-interruptible methods, we extend the prolog so that
// we do not need to track GC inforation while shuffling the
// arguments.
GetEmitter()->emitMarkPrologEnd();
}
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
// The unused bits of Vector3 arguments must be cleared
// since native compiler doesn't initize the upper bits to zeros.
//
// TODO-Cleanup: This logic can be implemented in
// genFnPrologCalleeRegArgs() for argument registers and
// genEnregisterIncomingStackArgs() for stack arguments.
genClearStackVec3ArgUpperBits();
#endif // UNIX_AMD64_ABI && FEATURE_SIMD
/*-----------------------------------------------------------------------------
* Take care of register arguments first
*/
// Home incoming arguments and generate any required inits.
// OSR handles this by moving the values from the original frame.
//
// Update the arg initial register locations.
//
if (compiler->opts.IsOSR())
{
// For OSR we defer updating "initial reg" for args until
// we've set the live-in regs with values from the Tier0 frame.
//
// Otherwise we'll do some of these fetches twice.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
genEnregisterOSRArgsAndLocals(initReg, &initRegZeroed);
#else
genEnregisterOSRArgsAndLocals();
#endif
compiler->lvaUpdateArgsWithInitialReg();
}
else
{
compiler->lvaUpdateArgsWithInitialReg();
auto assignIncomingRegisterArgs = [this, initReg, &initRegZeroed](RegState* regState) {
if (regState->rsCalleeRegArgMaskLiveIn)
{
// If we need an extra register to shuffle around the incoming registers
// we will use xtraReg (initReg) and set the xtraRegClobbered flag,
// if we don't need to use the xtraReg then this flag will stay false
//
regNumber xtraReg;
bool xtraRegClobbered = false;
if (genRegMask(initReg) & RBM_ARG_REGS)
{
xtraReg = initReg;
}
else
{
xtraReg = REG_SCRATCH;
initRegZeroed = false;
}
genFnPrologCalleeRegArgs(xtraReg, &xtraRegClobbered, regState);
if (xtraRegClobbered)
{
initRegZeroed = false;
}
}
};
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM)
assignIncomingRegisterArgs(&intRegState);
assignIncomingRegisterArgs(&floatRegState);
#else
assignIncomingRegisterArgs(&intRegState);
#endif
// Home the incoming arguments.
genEnregisterIncomingStackArgs();
}
/* Initialize any must-init registers variables now */
if (initRegs)
{
regMaskTP regMask = 0x1;
for (regNumber reg = REG_INT_FIRST; reg <= REG_INT_LAST; reg = REG_NEXT(reg), regMask <<= 1)
{
if (regMask & initRegs)
{
// Check if we have already zeroed this register
if ((reg == initReg) && initRegZeroed)
{
continue;
}
else
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, reg);
if (reg == initReg)
{
initRegZeroed = true;
}
}
}
}
}
if (initFltRegs | initDblRegs)
{
// If initReg is not in initRegs then we will use REG_SCRATCH
if ((genRegMask(initReg) & initRegs) == 0)
{
initReg = REG_SCRATCH;
initRegZeroed = false;
}
#ifdef TARGET_ARM
// This is needed only for Arm since it can use a zero initialized int register
// to initialize vfp registers.
if (!initRegZeroed)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg);
initRegZeroed = true;
}
#endif // TARGET_ARM
genZeroInitFltRegs(initFltRegs, initDblRegs, initReg);
}
//-----------------------------------------------------------------------------
//
// Increase the prolog size here only if fully interruptible.
//
if (GetInterruptible())
{
GetEmitter()->emitMarkPrologEnd();
}
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
psiEndProlog();
}
if (hasGCRef)
{
GetEmitter()->emitSetFrameRangeGCRs(GCrefLo, GCrefHi);
}
else
{
noway_assert(GCrefLo == +INT_MAX);
noway_assert(GCrefHi == -INT_MAX);
}
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n");
}
#endif
#ifdef TARGET_X86
// On non-x86 the VARARG cookie does not need any special treatment.
// Load up the VARARG argument pointer register so it doesn't get clobbered.
// only do this if we actually access any statically declared args
// (our argument pointer register has a refcount > 0).
unsigned argsStartVar = compiler->lvaVarargsBaseOfStkArgs;
if (compiler->info.compIsVarArgs && compiler->lvaGetDesc(argsStartVar)->lvRefCnt() > 0)
{
varDsc = compiler->lvaGetDesc(argsStartVar);
noway_assert(compiler->info.compArgsCount > 0);
// MOV EAX, <VARARGS HANDLE>
GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, compiler->info.compArgsCount - 1, 0);
regSet.verifyRegUsed(REG_EAX);
// MOV EAX, [EAX]
GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, REG_EAX, 0);
// EDX might actually be holding something here. So make sure to only use EAX for this code
// sequence.
const LclVarDsc* lastArg = compiler->lvaGetDesc(compiler->info.compArgsCount - 1);
noway_assert(!lastArg->lvRegister);
signed offset = lastArg->GetStackOffset();
assert(offset != BAD_STK_OFFS);
noway_assert(lastArg->lvFramePointerBased);
// LEA EAX, &<VARARGS HANDLE> + EAX
GetEmitter()->emitIns_R_ARR(INS_lea, EA_PTRSIZE, REG_EAX, genFramePointerReg(), REG_EAX, offset);
if (varDsc->lvIsInReg())
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, varDsc->GetRegNum(), REG_EAX, /* canSkip */ true);
regSet.verifyRegUsed(varDsc->GetRegNum());
}
else
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, argsStartVar, 0);
}
}
#endif // TARGET_X86
#if defined(DEBUG) && defined(TARGET_XARCH)
if (compiler->opts.compStackCheckOnRet)
{
noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
}
#endif // defined(DEBUG) && defined(TARGET_XARCH)
GetEmitter()->emitEndProlog();
compiler->unwindEndProlog();
noway_assert(GetEmitter()->emitMaxTmpSize == regSet.tmpGetTotalSize());
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//------------------------------------------------------------------------
// getCallTarget - Get the node that evalutes to the call target
//
// Arguments:
// call - the GT_CALL node
//
// Returns:
// The node. Note that for direct calls this may still return non-null if the direct call
// requires a 'complex' tree to load the target (e.g. in R2R or because we go through a stub).
//
GenTree* CodeGen::getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd)
{
// all virtuals should have been expanded into a control expression by this point.
assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
if (call->gtCallType == CT_INDIRECT)
{
assert(call->gtControlExpr == nullptr);
if (methHnd != nullptr)
{
*methHnd = nullptr;
}
return call->gtCallAddr;
}
if (methHnd != nullptr)
{
*methHnd = call->gtCallMethHnd;
}
return call->gtControlExpr;
}
//------------------------------------------------------------------------
// getCallIndirectionCellReg - Get the register containing the indirection cell for a call
//
// Arguments:
// call - the node
//
// Returns:
// The register containing the indirection cell, or REG_NA if this call does not use an indirection cell argument.
//
// Notes:
// We currently use indirection cells for VSD on all platforms and for R2R calls on ARM architectures.
//
regNumber CodeGen::getCallIndirectionCellReg(const GenTreeCall* call)
{
regNumber result = REG_NA;
switch (call->GetIndirectionCellArgKind())
{
case NonStandardArgKind::None:
break;
case NonStandardArgKind::R2RIndirectionCell:
result = REG_R2R_INDIRECT_PARAM;
break;
case NonStandardArgKind::VirtualStubCell:
result = compiler->virtualStubParamInfo->GetReg();
break;
default:
unreached();
}
#ifdef DEBUG
regNumber foundReg = REG_NA;
unsigned argCount = call->fgArgInfo->ArgCount();
fgArgTabEntry** argTable = call->fgArgInfo->ArgTable();
for (unsigned i = 0; i < argCount; i++)
{
NonStandardArgKind kind = argTable[i]->nonStandardArgKind;
if ((kind == NonStandardArgKind::R2RIndirectionCell) || (kind == NonStandardArgKind::VirtualStubCell))
{
foundReg = argTable[i]->GetRegNum();
break;
}
}
assert(foundReg == result);
#endif
return result;
}
/*****************************************************************************
*
* Generates code for all the function and funclet prologs and epilogs.
*/
void CodeGen::genGeneratePrologsAndEpilogs()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** Before prolog / epilog generation\n");
GetEmitter()->emitDispIGlist(false);
}
#endif
// Before generating the prolog, we need to reset the variable locations to what they will be on entry.
// This affects our code that determines which untracked locals need to be zero initialized.
compiler->m_pLinearScan->recordVarLocationsAtStartOfBB(compiler->fgFirstBB);
// Tell the emitter we're done with main code generation, and are going to start prolog and epilog generation.
GetEmitter()->emitStartPrologEpilogGeneration();
gcInfo.gcResetForBB();
genFnProlog();
// Generate all the prologs and epilogs.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(FEATURE_EH_FUNCLETS)
// Capture the data we're going to use in the funclet prolog and epilog generation. This is
// information computed during codegen, or during function prolog generation, like
// frame offsets. It must run after main function prolog generation.
genCaptureFuncletPrologEpilogInfo();
#endif // FEATURE_EH_FUNCLETS
// Walk the list of prologs and epilogs and generate them.
// We maintain a list of prolog and epilog basic blocks in
// the insGroup structure in the emitter. This list was created
// during code generation by the genReserve*() functions.
//
// TODO: it seems like better design would be to create a list of prologs/epilogs
// in the code generator (not the emitter), and then walk that list. But we already
// have the insGroup list, which serves well, so we don't need the extra allocations
// for a prolog/epilog list in the code generator.
GetEmitter()->emitGeneratePrologEpilog();
// Tell the emitter we're done with all prolog and epilog generation.
GetEmitter()->emitFinishPrologEpilogGeneration();
#ifdef DEBUG
if (verbose)
{
printf("*************** After prolog / epilog generation\n");
GetEmitter()->emitDispIGlist(false);
}
#endif
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX End Prolog / Epilog XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//-----------------------------------------------------------------------------------
// IsMultiRegReturnedType: Returns true if the type is returned in multiple registers
//
// Arguments:
// hClass - type handle
//
// Return Value:
// true if type is returned in multiple registers, false otherwise.
//
bool Compiler::IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv)
{
if (hClass == NO_CLASS_HANDLE)
{
return false;
}
structPassingKind howToReturnStruct;
var_types returnType = getReturnTypeForStruct(hClass, callConv, &howToReturnStruct);
#ifdef TARGET_ARM64
return (varTypeIsStruct(returnType) && (howToReturnStruct != SPK_PrimitiveType));
#else
return (varTypeIsStruct(returnType));
#endif
}
//----------------------------------------------
// Methods that support HFA's for ARM32/ARM64
//----------------------------------------------
bool Compiler::IsHfa(CORINFO_CLASS_HANDLE hClass)
{
return varTypeIsValidHfaType(GetHfaType(hClass));
}
bool Compiler::IsHfa(GenTree* tree)
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(gtGetStructHandleIfPresent(tree));
}
else
{
return false;
}
}
var_types Compiler::GetHfaType(GenTree* tree)
{
if (GlobalJitOptions::compFeatureHfa)
{
return GetHfaType(gtGetStructHandleIfPresent(tree));
}
else
{
return TYP_UNDEF;
}
}
unsigned Compiler::GetHfaCount(GenTree* tree)
{
return GetHfaCount(gtGetStructHandle(tree));
}
var_types Compiler::GetHfaType(CORINFO_CLASS_HANDLE hClass)
{
if (GlobalJitOptions::compFeatureHfa)
{
if (hClass != NO_CLASS_HANDLE)
{
CorInfoHFAElemType elemKind = info.compCompHnd->getHFAType(hClass);
if (elemKind != CORINFO_HFA_ELEM_NONE)
{
// This type may not appear elsewhere, but it will occupy a floating point register.
compFloatingPointUsed = true;
}
return HfaTypeFromElemKind(elemKind);
}
}
return TYP_UNDEF;
}
//------------------------------------------------------------------------
// GetHfaCount: Given a class handle for an HFA struct
// return the number of registers needed to hold the HFA
//
// Note that on ARM32 the single precision registers overlap with
// the double precision registers and for that reason each
// double register is considered to be two single registers.
// Thus for ARM32 an HFA of 4 doubles this function will return 8.
// On ARM64 given an HFA of 4 singles or 4 doubles this function will
// will return 4 for both.
// Arguments:
// hClass: the class handle of a HFA struct
//
unsigned Compiler::GetHfaCount(CORINFO_CLASS_HANDLE hClass)
{
assert(IsHfa(hClass));
#ifdef TARGET_ARM
// A HFA of doubles is twice as large as an HFA of singles for ARM32
// (i.e. uses twice the number of single precison registers)
return info.compCompHnd->getClassSize(hClass) / REGSIZE_BYTES;
#else // TARGET_ARM64
var_types hfaType = GetHfaType(hClass);
unsigned classSize = info.compCompHnd->getClassSize(hClass);
// Note that the retail build issues a warning about a potential divsion by zero without the Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
return classSize / elemSize;
#endif // TARGET_ARM64
}
//------------------------------------------------------------------------------------------------ //
// getFirstArgWithStackSlot - returns the first argument with stack slot on the caller's frame.
//
// Return value:
// The number of the first argument with stack slot on the caller's frame.
//
// Note:
// On x64 Windows the caller always creates slots (homing space) in its frame for the
// first 4 arguments of a callee (register passed args). So, the the variable number
// (lclNum) for the first argument with a stack slot is always 0.
// For System V systems or armarch, there is no such calling convention requirement, and the code
// needs to find the first stack passed argument from the caller. This is done by iterating over
// all the lvParam variables and finding the first with GetArgReg() equals to REG_STK.
//
unsigned CodeGen::getFirstArgWithStackSlot()
{
#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARMARCH)
unsigned baseVarNum = 0;
// Iterate over all the lvParam variables in the Lcl var table until we find the first one
// that's passed on the stack.
LclVarDsc* varDsc = nullptr;
for (unsigned i = 0; i < compiler->info.compArgsCount; i++)
{
varDsc = compiler->lvaGetDesc(i);
// We should have found a stack parameter (and broken out of this loop) before
// we find any non-parameters.
assert(varDsc->lvIsParam);
if (varDsc->GetArgReg() == REG_STK)
{
baseVarNum = i;
break;
}
}
assert(varDsc != nullptr);
return baseVarNum;
#elif defined(TARGET_AMD64)
return 0;
#else // TARGET_X86
// Not implemented for x86.
NYI_X86("getFirstArgWithStackSlot not yet implemented for x86.");
return BAD_VAR_NUM;
#endif // TARGET_X86
}
//------------------------------------------------------------------------
// genSinglePush: Report a change in stack level caused by a single word-sized push instruction
//
void CodeGen::genSinglePush()
{
AddStackLevel(REGSIZE_BYTES);
}
//------------------------------------------------------------------------
// genSinglePop: Report a change in stack level caused by a single word-sized pop instruction
//
void CodeGen::genSinglePop()
{
SubtractStackLevel(REGSIZE_BYTES);
}
//------------------------------------------------------------------------
// genPushRegs: Push the given registers.
//
// Arguments:
// regs - mask or registers to push
// byrefRegs - OUT arg. Set to byref registers that were pushed.
// noRefRegs - OUT arg. Set to non-GC ref registers that were pushed.
//
// Return Value:
// Mask of registers pushed.
//
// Notes:
// This function does not check if the register is marked as used, etc.
//
regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs)
{
*byrefRegs = RBM_NONE;
*noRefRegs = RBM_NONE;
if (regs == RBM_NONE)
{
return RBM_NONE;
}
#if FEATURE_FIXED_OUT_ARGS
NYI("Don't call genPushRegs with real regs!");
return RBM_NONE;
#else // FEATURE_FIXED_OUT_ARGS
noway_assert(genTypeStSz(TYP_REF) == genTypeStSz(TYP_I_IMPL));
noway_assert(genTypeStSz(TYP_BYREF) == genTypeStSz(TYP_I_IMPL));
regMaskTP pushedRegs = regs;
for (regNumber reg = REG_INT_FIRST; regs != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = regMaskTP(1) << reg;
if ((regBit & regs) == RBM_NONE)
continue;
var_types type;
if (regBit & gcInfo.gcRegGCrefSetCur)
{
type = TYP_REF;
}
else if (regBit & gcInfo.gcRegByrefSetCur)
{
*byrefRegs |= regBit;
type = TYP_BYREF;
}
else if (noRefRegs != NULL)
{
*noRefRegs |= regBit;
type = TYP_I_IMPL;
}
else
{
continue;
}
inst_RV(INS_push, reg, type);
genSinglePush();
gcInfo.gcMarkRegSetNpt(regBit);
regs &= ~regBit;
}
return pushedRegs;
#endif // FEATURE_FIXED_OUT_ARGS
}
//------------------------------------------------------------------------
// genPopRegs: Pop the registers that were pushed by genPushRegs().
//
// Arguments:
// regs - mask of registers to pop
// byrefRegs - The byref registers that were pushed by genPushRegs().
// noRefRegs - The non-GC ref registers that were pushed by genPushRegs().
//
// Return Value:
// None
//
void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs)
{
if (regs == RBM_NONE)
{
return;
}
#if FEATURE_FIXED_OUT_ARGS
NYI("Don't call genPopRegs with real regs!");
#else // FEATURE_FIXED_OUT_ARGS
noway_assert((regs & byrefRegs) == byrefRegs);
noway_assert((regs & noRefRegs) == noRefRegs);
noway_assert((regs & (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur)) == RBM_NONE);
noway_assert(genTypeStSz(TYP_REF) == genTypeStSz(TYP_INT));
noway_assert(genTypeStSz(TYP_BYREF) == genTypeStSz(TYP_INT));
// Walk the registers in the reverse order as genPushRegs()
for (regNumber reg = REG_INT_LAST; regs != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = regMaskTP(1) << reg;
if ((regBit & regs) == RBM_NONE)
continue;
var_types type;
if (regBit & byrefRegs)
{
type = TYP_BYREF;
}
else if (regBit & noRefRegs)
{
type = TYP_INT;
}
else
{
type = TYP_REF;
}
inst_RV(INS_pop, reg, type);
genSinglePop();
if (type != TYP_INT)
gcInfo.gcMarkRegPtrVal(reg, type);
regs &= ~regBit;
}
#endif // FEATURE_FIXED_OUT_ARGS
}
/*****************************************************************************
* genSetScopeInfo
*
* This function should be called only after the sizes of the emitter blocks
* have been finalized.
*/
void CodeGen::genSetScopeInfo()
{
if (!compiler->opts.compScopeInfo)
{
return;
}
#ifdef DEBUG
if (verbose)
{
printf("*************** In genSetScopeInfo()\n");
}
#endif
unsigned varsLocationsCount = 0;
#ifdef USING_SCOPE_INFO
if (compiler->info.compVarScopesCount > 0)
{
varsLocationsCount = siScopeCnt + psiScopeCnt;
}
#else // USING_SCOPE_INFO
#ifdef USING_VARIABLE_LIVE_RANGE
varsLocationsCount = (unsigned int)varLiveKeeper->getLiveRangesCount();
#endif // USING_VARIABLE_LIVE_RANGE
#endif // USING_SCOPE_INFO
if (varsLocationsCount == 0)
{
// No variable home to report
compiler->eeSetLVcount(0);
compiler->eeSetLVdone();
return;
}
noway_assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0));
// Initialize the table where the reported variables' home will be placed.
compiler->eeSetLVcount(varsLocationsCount);
#ifdef DEBUG
genTrnslLocalVarCount = varsLocationsCount;
if (varsLocationsCount)
{
genTrnslLocalVarInfo = new (compiler, CMK_DebugOnly) TrnslLocalVarInfo[varsLocationsCount];
}
#endif
#ifdef USING_SCOPE_INFO
genSetScopeInfoUsingsiScope();
#else // USING_SCOPE_INFO
#ifdef USING_VARIABLE_LIVE_RANGE
// We can have one of both flags defined, both, or none. Specially if we need to compare both
// both results. But we cannot report both to the debugger, since there would be overlapping
// intervals, and may not indicate the same variable location.
genSetScopeInfoUsingVariableRanges();
#endif // USING_VARIABLE_LIVE_RANGE
#endif // USING_SCOPE_INFO
compiler->eeSetLVdone();
}
#ifdef USING_SCOPE_INFO
void CodeGen::genSetScopeInfoUsingsiScope()
{
noway_assert(psiOpenScopeList.scNext == nullptr);
// Record the scopes found for the parameters over the prolog.
// The prolog needs to be treated differently as a variable may not
// have the same info in the prolog block as is given by compiler->lvaTable.
// eg. A register parameter is actually on the stack, before it is loaded to reg.
CodeGen::psiScope* scopeP;
unsigned i;
for (i = 0, scopeP = psiScopeList.scNext; i < psiScopeCnt; i++, scopeP = scopeP->scNext)
{
noway_assert(scopeP != nullptr);
noway_assert(scopeP->scStartLoc.Valid());
noway_assert(scopeP->scEndLoc.Valid());
UNATIVE_OFFSET startOffs = scopeP->scStartLoc.CodeOffset(GetEmitter());
UNATIVE_OFFSET endOffs = scopeP->scEndLoc.CodeOffset(GetEmitter());
unsigned varNum = scopeP->scSlotNum;
noway_assert(startOffs <= endOffs);
// The range may be 0 if the prolog is empty. For such a case,
// report the liveness of arguments to span at least the first
// instruction in the method. This will be incorrect (except on
// entry to the method) if the very first instruction of the method
// is part of a loop. However, this should happen
// very rarely, and the incorrectness is worth being able to look
// at the argument on entry to the method.
if (startOffs == endOffs)
{
noway_assert(startOffs == 0);
endOffs++;
}
siVarLoc varLoc = scopeP->getSiVarLoc();
genSetScopeInfo(i, startOffs, endOffs - startOffs, varNum, scopeP->scLVnum, true, &varLoc);
}
// Record the scopes for the rest of the method.
// Check that the LocalVarInfo scopes look OK
noway_assert(siOpenScopeList.scNext == nullptr);
CodeGen::siScope* scopeL;
for (i = 0, scopeL = siScopeList.scNext; i < siScopeCnt; i++, scopeL = scopeL->scNext)
{
noway_assert(scopeL != nullptr);
noway_assert(scopeL->scStartLoc.Valid());
noway_assert(scopeL->scEndLoc.Valid());
// Find the start and end IP
UNATIVE_OFFSET startOffs = scopeL->scStartLoc.CodeOffset(GetEmitter());
UNATIVE_OFFSET endOffs = scopeL->scEndLoc.CodeOffset(GetEmitter());
noway_assert(scopeL->scStartLoc != scopeL->scEndLoc);
LclVarDsc* varDsc = compiler->lvaGetDesc(scopeL->scVarNum);
siVarLoc varLoc = getSiVarLoc(varDsc, scopeL);
genSetScopeInfo(psiScopeCnt + i, startOffs, endOffs - startOffs, scopeL->scVarNum, scopeL->scLVnum, false,
&varLoc);
}
}
#endif // USING_SCOPE_INFO
#ifdef USING_VARIABLE_LIVE_RANGE
//------------------------------------------------------------------------
// genSetScopeInfoUsingVariableRanges: Call "genSetScopeInfo" with the
// "VariableLiveRanges" created for the arguments, special arguments and
// IL local variables.
//
// Notes:
// This function is called from "genSetScopeInfo" once the code is generated
// and we want to send debug info to the debugger.
//
void CodeGen::genSetScopeInfoUsingVariableRanges()
{
unsigned int liveRangeIndex = 0;
for (unsigned int varNum = 0; varNum < compiler->info.compLocalsCount; varNum++)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
if (compiler->compMap2ILvarNum(varNum) == (unsigned int)ICorDebugInfo::UNKNOWN_ILNUM)
{
continue;
}
auto reportRange = [this, varDsc, varNum, &liveRangeIndex](siVarLoc* loc, UNATIVE_OFFSET start,
UNATIVE_OFFSET end) {
if (varDsc->lvIsParam && (start == end))
{
// If the length is zero, it means that the prolog is empty. In that case,
// CodeGen::genSetScopeInfo will report the liveness of all arguments
// as spanning the first instruction in the method, so that they can
// at least be inspected on entry to the method.
end++;
}
genSetScopeInfo(liveRangeIndex, start, end - start, varNum, varNum, true, loc);
liveRangeIndex++;
};
siVarLoc* curLoc = nullptr;
UNATIVE_OFFSET curStart = 0;
UNATIVE_OFFSET curEnd = 0;
for (int rangeIndex = 0; rangeIndex < 2; rangeIndex++)
{
VariableLiveKeeper::LiveRangeList* liveRanges;
if (rangeIndex == 0)
{
liveRanges = varLiveKeeper->getLiveRangesForVarForProlog(varNum);
}
else
{
liveRanges = varLiveKeeper->getLiveRangesForVarForBody(varNum);
}
for (VariableLiveKeeper::VariableLiveRange& liveRange : *liveRanges)
{
UNATIVE_OFFSET startOffs = liveRange.m_StartEmitLocation.CodeOffset(GetEmitter());
UNATIVE_OFFSET endOffs = liveRange.m_EndEmitLocation.CodeOffset(GetEmitter());
assert(startOffs <= endOffs);
assert(startOffs >= curEnd);
if ((curLoc != nullptr) && (startOffs == curEnd) && siVarLoc::Equals(curLoc, &liveRange.m_VarLocation))
{
// Extend current range.
curEnd = endOffs;
continue;
}
// Report old range if any.
if (curLoc != nullptr)
{
reportRange(curLoc, curStart, curEnd);
}
// Start a new range.
curLoc = &liveRange.m_VarLocation;
curStart = startOffs;
curEnd = endOffs;
}
}
// Report last range
if (curLoc != nullptr)
{
reportRange(curLoc, curStart, curEnd);
}
}
compiler->eeVarsCount = liveRangeIndex;
}
#endif // USING_VARIABLE_LIVE_RANGE
//------------------------------------------------------------------------
// genSetScopeInfo: Record scope information for debug info
//
// Arguments:
// which
// startOffs - the starting offset for this scope
// length - the length of this scope
// varNum - the lclVar for this scope info
// LVnum
// avail - a bool indicating if it has a home
// varLoc - the position (reg or stack) of the variable
//
// Notes:
// Called for every scope info piece to record by the main genSetScopeInfo()
void CodeGen::genSetScopeInfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
unsigned LVnum,
bool avail,
siVarLoc* varLoc)
{
// We need to do some mapping while reporting back these variables.
unsigned ilVarNum = compiler->compMap2ILvarNum(varNum);
noway_assert((int)ilVarNum != ICorDebugInfo::UNKNOWN_ILNUM);
#ifdef TARGET_X86
// Non-x86 platforms are allowed to access all arguments directly
// so we don't need this code.
// Is this a varargs function?
if (compiler->info.compIsVarArgs && varNum != compiler->lvaVarargsHandleArg &&
varNum < compiler->info.compArgsCount && !compiler->lvaGetDesc(varNum)->lvIsRegArg)
{
noway_assert(varLoc->vlType == VLT_STK || varLoc->vlType == VLT_STK2);
// All stack arguments (except the varargs handle) have to be
// accessed via the varargs cookie. Discard generated info,
// and just find its position relative to the varargs handle
PREFIX_ASSUME(compiler->lvaVarargsHandleArg < compiler->info.compArgsCount);
if (!compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->lvOnFrame)
{
noway_assert(!compiler->opts.compDbgCode);
return;
}
// Can't check compiler->lvaTable[varNum].lvOnFrame as we don't set it for
// arguments of vararg functions to avoid reporting them to GC.
noway_assert(!compiler->lvaGetDesc(varNum)->lvRegister);
unsigned cookieOffset = compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->GetStackOffset();
unsigned varOffset = compiler->lvaGetDesc(varNum)->GetStackOffset();
noway_assert(cookieOffset < varOffset);
unsigned offset = varOffset - cookieOffset;
unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(offset < stkArgSize);
offset = stkArgSize - offset;
varLoc->vlType = VLT_FIXED_VA;
varLoc->vlFixedVarArg.vlfvOffset = offset;
}
#endif // TARGET_X86
VarName name = nullptr;
#ifdef DEBUG
for (unsigned scopeNum = 0; scopeNum < compiler->info.compVarScopesCount; scopeNum++)
{
if (LVnum == compiler->info.compVarScopes[scopeNum].vsdLVnum)
{
name = compiler->info.compVarScopes[scopeNum].vsdName;
}
}
// Hang on to this compiler->info.
TrnslLocalVarInfo& tlvi = genTrnslLocalVarInfo[which];
tlvi.tlviVarNum = ilVarNum;
tlvi.tlviLVnum = LVnum;
tlvi.tlviName = name;
tlvi.tlviStartPC = startOffs;
tlvi.tlviLength = length;
tlvi.tlviAvailable = avail;
tlvi.tlviVarLoc = *varLoc;
#endif // DEBUG
compiler->eeSetLVinfo(which, startOffs, length, ilVarNum, *varLoc);
}
/*****************************************************************************/
#ifdef LATE_DISASM
#if defined(DEBUG)
/*****************************************************************************
* CompilerRegName
*
* Can be called only after lviSetLocalVarInfo() has been called
*/
/* virtual */
const char* CodeGen::siRegVarName(size_t offs, size_t size, unsigned reg)
{
if (!compiler->opts.compScopeInfo)
return nullptr;
if (compiler->info.compVarScopesCount == 0)
return nullptr;
noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo);
for (unsigned i = 0; i < genTrnslLocalVarCount; i++)
{
if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsInReg((regNumber)reg)) &&
(genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) &&
(genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs))
{
return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL;
}
}
return NULL;
}
/*****************************************************************************
* CompilerStkName
*
* Can be called only after lviSetLocalVarInfo() has been called
*/
/* virtual */
const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs)
{
if (!compiler->opts.compScopeInfo)
return nullptr;
if (compiler->info.compVarScopesCount == 0)
return nullptr;
noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo);
for (unsigned i = 0; i < genTrnslLocalVarCount; i++)
{
if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsOnStack((regNumber)reg, stkOffs)) &&
(genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) &&
(genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs))
{
return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL;
}
}
return NULL;
}
/*****************************************************************************/
#endif // defined(DEBUG)
#endif // LATE_DISASM
#ifdef DEBUG
/*****************************************************************************
* Display a IPmappingDsc. Pass -1 as mappingNum to not display a mapping number.
*/
void CodeGen::genIPmappingDisp(unsigned mappingNum, IPmappingDsc* ipMapping)
{
if (mappingNum != unsigned(-1))
{
printf("%d: ", mappingNum);
}
switch (ipMapping->ipmdKind)
{
case IPmappingDscKind::Prolog:
printf("PROLOG");
break;
case IPmappingDscKind::Epilog:
printf("EPILOG");
break;
case IPmappingDscKind::NoMapping:
printf("NO_MAP");
break;
case IPmappingDscKind::Normal:
const ILLocation& loc = ipMapping->ipmdLoc;
Compiler::eeDispILOffs(loc.GetOffset());
if (loc.IsStackEmpty())
{
printf(" STACK_EMPTY");
}
if (loc.IsCall())
{
printf(" CALL_INSTRUCTION");
}
break;
}
printf(" ");
ipMapping->ipmdNativeLoc.Print(compiler->compMethodID);
// We can only call this after code generation. Is there any way to tell when it's legal to call?
// printf(" [%x]", ipMapping->ipmdNativeLoc.CodeOffset(GetEmitter()));
if (ipMapping->ipmdIsLabel)
{
printf(" label");
}
printf("\n");
}
void CodeGen::genIPmappingListDisp()
{
unsigned mappingNum = 0;
for (IPmappingDsc& dsc : compiler->genIPmappings)
{
genIPmappingDisp(mappingNum, &dsc);
++mappingNum;
}
}
#endif // DEBUG
/*****************************************************************************
*
* Append an IPmappingDsc struct to the list that we're maintaining
* for the debugger.
* Record the instr offset as being at the current code gen position.
*/
void CodeGen::genIPmappingAdd(IPmappingDscKind kind, const DebugInfo& di, bool isLabel)
{
if (!compiler->opts.compDbgInfo)
{
return;
}
assert((kind == IPmappingDscKind::Normal) == di.IsValid());
switch (kind)
{
case IPmappingDscKind::Prolog:
case IPmappingDscKind::Epilog:
break;
default:
if (kind == IPmappingDscKind::Normal)
{
noway_assert(di.GetLocation().GetOffset() <= compiler->info.compILCodeSize);
}
// Ignore this one if it's the same IL location as the last one we saw.
// Note that we'll let through two identical IL offsets if the flag bits
// differ, or two identical "special" mappings (e.g., PROLOG).
if ((compiler->genIPmappings.size() > 0) && (kind == compiler->genIPmappings.back().ipmdKind) &&
(di.GetLocation() == compiler->genIPmappings.back().ipmdLoc))
{
JITDUMP("genIPmappingAdd: ignoring duplicate IL offset 0x%x\n", di.GetLocation().GetOffset());
return;
}
break;
}
IPmappingDsc addMapping;
addMapping.ipmdNativeLoc.CaptureLocation(GetEmitter());
addMapping.ipmdKind = kind;
addMapping.ipmdLoc = di.GetLocation();
addMapping.ipmdIsLabel = isLabel;
assert((kind == IPmappingDscKind::Normal) == addMapping.ipmdLoc.IsValid());
compiler->genIPmappings.push_back(addMapping);
#ifdef DEBUG
if (verbose)
{
printf("Added IP mapping: ");
genIPmappingDisp(unsigned(-1), &addMapping);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Prepend an IPmappingDsc struct to the list that we're maintaining
* for the debugger.
*/
void CodeGen::genIPmappingAddToFront(IPmappingDscKind kind, const DebugInfo& di, bool isLabel)
{
if (!compiler->opts.compDbgInfo)
{
return;
}
noway_assert((kind != IPmappingDscKind::Normal) ||
(di.IsValid() && (di.GetLocation().GetOffset() <= compiler->info.compILCodeSize)));
/* Create a mapping entry and prepend it to the list */
IPmappingDsc addMapping;
addMapping.ipmdNativeLoc.CaptureLocation(GetEmitter());
addMapping.ipmdKind = kind;
addMapping.ipmdLoc = di.GetLocation();
addMapping.ipmdIsLabel = isLabel;
compiler->genIPmappings.push_front(addMapping);
#ifdef DEBUG
if (verbose)
{
printf("Added IP mapping to front: ");
genIPmappingDisp(unsigned(-1), &addMapping);
}
#endif // DEBUG
}
/*****************************************************************************/
void CodeGen::genEnsureCodeEmitted(const DebugInfo& di)
{
if (!compiler->opts.compDbgCode)
{
return;
}
if (!di.IsValid())
{
return;
}
// If other IL were offsets reported, skip
if (compiler->genIPmappings.size() <= 0)
{
return;
}
const IPmappingDsc& prev = compiler->genIPmappings.back();
if (prev.ipmdLoc != di.GetLocation())
{
return;
}
// di represents the last reported offset. Make sure that we generated native code
if (prev.ipmdNativeLoc.IsCurrentLocation(GetEmitter()))
{
instGen(INS_nop);
}
}
//------------------------------------------------------------------------
// genIPmappingGen: Shut down the IP-mapping logic, report the info to the EE.
//
void CodeGen::genIPmappingGen()
{
if (!compiler->opts.compDbgInfo)
{
return;
}
#ifdef DEBUG
if (verbose)
{
printf("*************** In genIPmappingGen()\n");
}
#endif
if (compiler->genIPmappings.size() <= 0)
{
compiler->eeSetLIcount(0);
compiler->eeSetLIdone();
return;
}
UNATIVE_OFFSET prevNativeOfs = UNATIVE_OFFSET(~0);
for (jitstd::list<IPmappingDsc>::iterator it = compiler->genIPmappings.begin();
it != compiler->genIPmappings.end();)
{
UNATIVE_OFFSET dscNativeOfs = it->ipmdNativeLoc.CodeOffset(GetEmitter());
if (dscNativeOfs != prevNativeOfs)
{
prevNativeOfs = dscNativeOfs;
++it;
continue;
}
// If we have a previous offset we should have a previous mapping.
assert(it != compiler->genIPmappings.begin());
jitstd::list<IPmappingDsc>::iterator prev = it;
--prev;
// Prev and current mappings have same native offset.
// If one does not map to IL then remove that one.
if (prev->ipmdKind == IPmappingDscKind::NoMapping)
{
compiler->genIPmappings.erase(prev);
++it;
continue;
}
if (it->ipmdKind == IPmappingDscKind::NoMapping)
{
it = compiler->genIPmappings.erase(it);
continue;
}
// Both have mappings.
// If previous is the prolog, keep both if this one is at IL offset 0.
// (TODO: Why? Debugger has no problem breaking on the prolog mapping
// it seems.)
if ((prev->ipmdKind == IPmappingDscKind::Prolog) && (it->ipmdKind == IPmappingDscKind::Normal) &&
(it->ipmdLoc.GetOffset() == 0))
{
++it;
continue;
}
// For the special case of an IL instruction with no body followed by
// the epilog (say ret void immediately preceding the method end), we
// leave both entries in, so that we'll stop at the (empty) ret
// statement if the user tries to put a breakpoint there, and then have
// the option of seeing the epilog or not based on SetUnmappedStopMask
// for the stepper.
if (it->ipmdKind == IPmappingDscKind::Epilog)
{
++it;
continue;
}
// For managed return values we store all calls. Keep both in this case
// too.
if (((prev->ipmdKind == IPmappingDscKind::Normal) && (prev->ipmdLoc.IsCall())) ||
((it->ipmdKind == IPmappingDscKind::Normal) && (it->ipmdLoc.IsCall())))
{
++it;
continue;
}
// Otherwise report the higher offset unless the previous mapping is a
// label.
if (prev->ipmdIsLabel)
{
it = compiler->genIPmappings.erase(it);
}
else
{
compiler->genIPmappings.erase(prev);
++it;
}
}
// Tell them how many mapping records we've got
compiler->eeSetLIcount(static_cast<unsigned int>(compiler->genIPmappings.size()));
// Now tell them about the mappings
unsigned int mappingIdx = 0;
for (const IPmappingDsc& dsc : compiler->genIPmappings)
{
compiler->eeSetLIinfo(mappingIdx++, dsc.ipmdNativeLoc.CodeOffset(GetEmitter()), dsc.ipmdKind, dsc.ipmdLoc);
}
#if 0
// TODO-Review:
//This check is disabled. It is always true that any time this check asserts, the debugger would have a
//problem with IL source level debugging. However, for a C# file, it only matters if things are on
//different source lines. As a result, we have all sorts of latent problems with how we emit debug
//info, but very few actual ones. Whenever someone wants to tackle that problem in general, turn this
//assert back on.
if (compiler->opts.compDbgCode)
{
//Assert that the first instruction of every basic block with more than one incoming edge has a
//different sequence point from each incoming block.
//
//It turns out that the only thing we really have to assert is that the first statement in each basic
//block has an IL offset and appears in eeBoundaries.
for (BasicBlock* const block : compiler->Blocks())
{
Statement* stmt = block->firstStmt();
if ((block->bbRefs > 1) && (stmt != nullptr))
{
bool found = false;
DebugInfo rootInfo = stmt->GetDebugInfo().GetRoot();
if (rootInfo.IsValid())
{
for (unsigned i = 0; i < compiler->eeBoundariesCount; ++i)
{
if (compiler->eeBoundaries[i].ilOffset == rootInfo.GetLocation().GetOffset())
{
found = true;
break;
}
}
}
noway_assert(found && "A basic block that is a jump target did not start a new sequence point.");
}
}
}
#endif // 0
compiler->eeSetLIdone();
}
#ifdef DEBUG
void CodeGen::genDumpPreciseDebugInfoInlineTree(FILE* file, InlineContext* context, bool* first)
{
if (context->GetSibling() != nullptr)
{
genDumpPreciseDebugInfoInlineTree(file, context->GetSibling(), first);
}
if (context->IsSuccess())
{
if (!*first)
{
fprintf(file, ",");
}
*first = false;
fprintf(file, "{\"Ordinal\":%u,", context->GetOrdinal());
fprintf(file, "\"MethodID\":%lld,", (INT64)context->GetCallee());
const char* className;
const char* methodName = compiler->eeGetMethodName(context->GetCallee(), &className);
fprintf(file, "\"MethodName\":\"%s\",", methodName);
fprintf(file, "\"Inlinees\":[");
if (context->GetChild() != nullptr)
{
bool childFirst = true;
genDumpPreciseDebugInfoInlineTree(file, context->GetChild(), &childFirst);
}
fprintf(file, "]}");
}
}
void CodeGen::genDumpPreciseDebugInfo()
{
if (JitConfig.JitDumpPreciseDebugInfoFile() == nullptr)
return;
static CritSecObject s_critSect;
CritSecHolder holder(s_critSect);
FILE* file = _wfopen(JitConfig.JitDumpPreciseDebugInfoFile(), W("a"));
if (file == nullptr)
return;
// MethodID in ETW events are the method handles.
fprintf(file, "{\"MethodID\":%lld,", (INT64)compiler->info.compMethodHnd);
// Print inline tree.
fprintf(file, "\"InlineTree\":");
bool first = true;
genDumpPreciseDebugInfoInlineTree(file, compiler->compInlineContext, &first);
fprintf(file, ",\"Mappings\":[");
first = true;
for (PreciseIPMapping& mapping : compiler->genPreciseIPmappings)
{
if (!first)
{
fprintf(file, ",");
}
first = false;
fprintf(file, "{\"NativeOffset\":%u,\"InlineContext\":%u,\"ILOffset\":%u}",
mapping.nativeLoc.CodeOffset(GetEmitter()), mapping.debugInfo.GetInlineContext()->GetOrdinal(),
mapping.debugInfo.GetLocation().GetOffset());
}
fprintf(file, "]}\n");
fclose(file);
}
void CodeGen::genAddPreciseIPMappingHere(const DebugInfo& di)
{
PreciseIPMapping mapping;
mapping.nativeLoc.CaptureLocation(GetEmitter());
mapping.debugInfo = di;
compiler->genPreciseIPmappings.push_back(mapping);
}
#endif
/*============================================================================
*
* These are empty stubs to help the late dis-assembler to compile
* if the late disassembler is being built into a non-DEBUG build.
*
*============================================================================
*/
#if defined(LATE_DISASM)
#if !defined(DEBUG)
/* virtual */
const char* CodeGen::siRegVarName(size_t offs, size_t size, unsigned reg)
{
return NULL;
}
/* virtual */
const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs)
{
return NULL;
}
/*****************************************************************************/
#endif // !defined(DEBUG)
#endif // defined(LATE_DISASM)
//------------------------------------------------------------------------
// indirForm: Make a temporary indir we can feed to pattern matching routines
// in cases where we don't want to instantiate all the indirs that happen.
//
/* static */ GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
{
GenTreeIndir i(GT_IND, type, base, nullptr);
i.SetRegNum(REG_NA);
i.SetContained();
return i;
}
//------------------------------------------------------------------------
// indirForm: Make a temporary indir we can feed to pattern matching routines
// in cases where we don't want to instantiate all the indirs that happen.
//
/* static */ GenTreeStoreInd CodeGen::storeIndirForm(var_types type, GenTree* base, GenTree* data)
{
GenTreeStoreInd i(type, base, data);
i.SetRegNum(REG_NA);
return i;
}
//------------------------------------------------------------------------
// intForm: Make a temporary int we can feed to pattern matching routines
// in cases where we don't want to instantiate.
//
GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
{
GenTreeIntCon i(type, value);
i.SetRegNum(REG_NA);
return i;
}
#if defined(TARGET_X86) || defined(TARGET_ARM)
//------------------------------------------------------------------------
// genLongReturn: Generates code for long return statement for x86 and arm.
//
// Note: treeNode's and op1's registers are already consumed.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node with LONG return type.
//
// Return Value:
// None
//
void CodeGen::genLongReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
assert(treeNode->TypeGet() == TYP_LONG);
GenTree* op1 = treeNode->gtGetOp1();
var_types targetType = treeNode->TypeGet();
assert(op1 != nullptr);
assert(op1->OperGet() == GT_LONG);
GenTree* loRetVal = op1->gtGetOp1();
GenTree* hiRetVal = op1->gtGetOp2();
assert((loRetVal->GetRegNum() != REG_NA) && (hiRetVal->GetRegNum() != REG_NA));
genConsumeReg(loRetVal);
genConsumeReg(hiRetVal);
inst_Mov(targetType, REG_LNGRET_LO, loRetVal->GetRegNum(), /* canSkip */ true, emitActualTypeSize(TYP_INT));
inst_Mov(targetType, REG_LNGRET_HI, hiRetVal->GetRegNum(), /* canSkip */ true, emitActualTypeSize(TYP_INT));
}
#endif // TARGET_X86 || TARGET_ARM
//------------------------------------------------------------------------
// genReturn: Generates code for return statement.
// In case of struct return, delegates to the genStructReturn method.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node.
//
// Return Value:
// None
//
void CodeGen::genReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
GenTree* op1 = treeNode->gtGetOp1();
var_types targetType = treeNode->TypeGet();
// A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in the return
// register, if it's not already there. The processing is the same as GT_RETURN. For filters, the IL spec says the
// result is type int32. Further, the only legal values are 0 or 1; the use of other values is "undefined".
assert(!treeNode->OperIs(GT_RETFILT) || (targetType == TYP_VOID) || (targetType == TYP_INT));
#ifdef DEBUG
if (targetType == TYP_VOID)
{
assert(op1 == nullptr);
}
#endif // DEBUG
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (targetType == TYP_LONG)
{
genLongReturn(treeNode);
}
else
#endif // TARGET_X86 || TARGET_ARM
{
if (isStructReturn(treeNode))
{
genStructReturn(treeNode);
}
else if (targetType != TYP_VOID)
{
assert(op1 != nullptr);
noway_assert(op1->GetRegNum() != REG_NA);
// !! NOTE !! genConsumeReg will clear op1 as GC ref after it has
// consumed a reg for the operand. This is because the variable
// is dead after return. But we are issuing more instructions
// like "profiler leave callback" after this consumption. So
// if you are issuing more instructions after this point,
// remember to keep the variable live up until the new method
// exit point where it is actually dead.
genConsumeReg(op1);
#if defined(TARGET_ARM64)
genSimpleReturn(treeNode);
#else // !TARGET_ARM64
#if defined(TARGET_X86)
if (varTypeUsesFloatReg(treeNode))
{
genFloatReturn(treeNode);
}
else
#elif defined(TARGET_ARM)
if (varTypeUsesFloatReg(treeNode) && (compiler->opts.compUseSoftFP || compiler->info.compIsVarArgs))
{
if (targetType == TYP_FLOAT)
{
GetEmitter()->emitIns_Mov(INS_vmov_f2i, EA_4BYTE, REG_INTRET, op1->GetRegNum(),
/* canSkip */ false);
}
else
{
assert(targetType == TYP_DOUBLE);
GetEmitter()->emitIns_R_R_R(INS_vmov_d2i, EA_8BYTE, REG_INTRET, REG_NEXT(REG_INTRET),
op1->GetRegNum());
}
}
else
#endif // TARGET_ARM
{
regNumber retReg = varTypeUsesFloatReg(treeNode) ? REG_FLOATRET : REG_INTRET;
inst_Mov_Extend(targetType, /* srcInReg */ true, retReg, op1->GetRegNum(), /* canSkip */ true);
}
#endif // !TARGET_ARM64
}
}
#ifdef PROFILING_SUPPORTED
// !! Note !!
// TODO-AMD64-Unix: If the profiler hook is implemented on *nix, make sure for 2 register returned structs
// the RAX and RDX needs to be kept alive. Make the necessary changes in lowerxarch.cpp
// in the handling of the GT_RETURN statement.
// Such structs containing GC pointers need to be handled by calling gcInfo.gcMarkRegSetNpt
// for the return registers containing GC refs.
//
// Reason for not materializing Leave callback as a GT_PROF_HOOK node after GT_RETURN:
// In flowgraph and other places assert that the last node of a block marked as
// BBJ_RETURN is either a GT_RETURN or GT_JMP or a tail call. It would be nice to
// maintain such an invariant irrespective of whether profiler hook needed or not.
// Also, there is not much to be gained by materializing it as an explicit node.
//
// There should be a single return block while generating profiler ELT callbacks,
// so we just look for that block to trigger insertion of the profile hook.
if ((compiler->compCurBB == compiler->genReturnBB) && compiler->compIsProfilerHookNeeded())
{
// !! NOTE !!
// Since we are invalidating the assumption that we would slip into the epilog
// right after the "return", we need to preserve the return reg's GC state
// across the call until actual method return.
ReturnTypeDesc retTypeDesc;
unsigned regCount = 0;
if (compiler->compMethodReturnsMultiRegRetType())
{
if (varTypeIsLong(compiler->info.compRetNativeType))
{
retTypeDesc.InitializeLongReturnType();
}
else // we must have a struct return type
{
CorInfoCallConvExtension callConv = compiler->info.compCallConv;
retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass,
callConv);
}
regCount = retTypeDesc.GetReturnRegCount();
}
if (varTypeIsGC(compiler->info.compRetNativeType))
{
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
}
else if (compiler->compMethodReturnsMultiRegRetType())
{
for (unsigned i = 0; i < regCount; ++i)
{
if (varTypeIsGC(retTypeDesc.GetReturnRegType(i)))
{
gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
}
}
}
else if (compiler->compMethodReturnsRetBufAddr())
{
gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
}
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_LEAVE);
if (varTypeIsGC(compiler->info.compRetNativeType))
{
gcInfo.gcMarkRegSetNpt(genRegMask(REG_INTRET));
}
else if (compiler->compMethodReturnsMultiRegRetType())
{
for (unsigned i = 0; i < regCount; ++i)
{
if (varTypeIsGC(retTypeDesc.GetReturnRegType(i)))
{
gcInfo.gcMarkRegSetNpt(genRegMask(retTypeDesc.GetABIReturnReg(i)));
}
}
}
else if (compiler->compMethodReturnsRetBufAddr())
{
gcInfo.gcMarkRegSetNpt(genRegMask(REG_INTRET));
}
}
#endif // PROFILING_SUPPORTED
#if defined(DEBUG) && defined(TARGET_XARCH)
bool doStackPointerCheck = compiler->opts.compStackCheckOnRet;
#if defined(FEATURE_EH_FUNCLETS)
// Don't do stack pointer check at the return from a funclet; only for the main function.
if (compiler->funCurrentFunc()->funKind != FUNC_ROOT)
{
doStackPointerCheck = false;
}
#else // !FEATURE_EH_FUNCLETS
// Don't generate stack checks for x86 finally/filter EH returns: these are not invoked
// with the same SP as the main function. See also CodeGen::genEHFinallyOrFilterRet().
if ((compiler->compCurBB->bbJumpKind == BBJ_EHFINALLYRET) || (compiler->compCurBB->bbJumpKind == BBJ_EHFILTERRET))
{
doStackPointerCheck = false;
}
#endif // !FEATURE_EH_FUNCLETS
genStackPointerCheck(doStackPointerCheck, compiler->lvaReturnSpCheck);
#endif // defined(DEBUG) && defined(TARGET_XARCH)
}
//------------------------------------------------------------------------
// isStructReturn: Returns whether the 'treeNode' is returning a struct.
//
// Arguments:
// treeNode - The tree node to evaluate whether is a struct return.
//
// Return Value:
// Returns true if the 'treeNode" is a GT_RETURN node of type struct.
// Otherwise returns false.
//
bool CodeGen::isStructReturn(GenTree* treeNode)
{
// This method could be called for 'treeNode' of GT_RET_FILT or GT_RETURN.
// For the GT_RET_FILT, the return is always a bool or a void, for the end of a finally block.
noway_assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
if (treeNode->OperGet() != GT_RETURN)
{
return false;
}
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
assert(!varTypeIsStruct(treeNode));
return false;
#else
return varTypeIsStruct(treeNode) && (compiler->info.compRetNativeType == TYP_STRUCT);
#endif
}
//------------------------------------------------------------------------
// genStructReturn: Generates code for returning a struct.
//
// Arguments:
// treeNode - The GT_RETURN tree node.
//
// Return Value:
// None
//
// Assumption:
// op1 of GT_RETURN node is either GT_LCL_VAR or multi-reg GT_CALL
//
void CodeGen::genStructReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN);
GenTree* op1 = treeNode->gtGetOp1();
genConsumeRegs(op1);
GenTree* actualOp1 = op1;
if (op1->IsCopyOrReload())
{
actualOp1 = op1->gtGetOp1();
}
ReturnTypeDesc retTypeDesc;
LclVarDsc* varDsc = nullptr;
if (actualOp1->OperIs(GT_LCL_VAR))
{
varDsc = compiler->lvaGetDesc(actualOp1->AsLclVar());
retTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd(), compiler->info.compCallConv);
assert(varDsc->lvIsMultiRegRet);
}
else
{
assert(actualOp1->OperIs(GT_CALL));
retTypeDesc = *(actualOp1->AsCall()->GetReturnTypeDesc());
}
unsigned regCount = retTypeDesc.GetReturnRegCount();
assert(regCount <= MAX_RET_REG_COUNT);
#if FEATURE_MULTIREG_RET
// Right now the only enregisterable structs supported are SIMD vector types.
if (genIsRegCandidateLocal(actualOp1))
{
#if defined(DEBUG)
const GenTreeLclVar* lclVar = actualOp1->AsLclVar();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclVar);
assert(varTypeIsSIMD(varDsc->GetRegisterType()));
assert(!lclVar->IsMultiReg());
#endif // DEBUG
#ifdef FEATURE_SIMD
genSIMDSplitReturn(op1, &retTypeDesc);
#endif // FEATURE_SIMD
}
else if (actualOp1->OperIs(GT_LCL_VAR) && !actualOp1->AsLclVar()->IsMultiReg())
{
GenTreeLclVar* lclNode = actualOp1->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
assert(varDsc->lvIsMultiRegRet);
int offset = 0;
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc.GetReturnRegType(i);
regNumber toReg = retTypeDesc.GetABIReturnReg(i);
GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), toReg, lclNode->GetLclNum(), offset);
offset += genTypeSize(type);
}
}
else
{
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc.GetReturnRegType(i);
regNumber toReg = retTypeDesc.GetABIReturnReg(i);
regNumber fromReg = op1->GetRegByIndex(i);
if ((fromReg == REG_NA) && op1->OperIs(GT_COPY))
{
// A copy that doesn't copy this field will have REG_NA.
// TODO-Cleanup: It would probably be better to always have a valid reg
// on a GT_COPY, unless the operand is actually spilled. Then we wouldn't have
// to check for this case (though we'd have to check in the genRegCopy that the
// reg is valid).
fromReg = actualOp1->GetRegByIndex(i);
}
if (fromReg == REG_NA)
{
// This is a spilled field of a multi-reg lclVar.
// We currently only mark a lclVar operand as RegOptional, since we don't have a way
// to mark a multi-reg tree node as used from spill (GTF_NOREG_AT_USE) on a per-reg basis.
assert(varDsc != nullptr);
assert(varDsc->lvPromoted);
unsigned fieldVarNum = varDsc->lvFieldLclStart + i;
assert(compiler->lvaGetDesc(fieldVarNum)->lvOnFrame);
GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), toReg, fieldVarNum, 0);
}
else
{
// Note that ins_Copy(fromReg, type) will return the appropriate register to copy
// between register files if needed.
inst_Mov(type, toReg, fromReg, /* canSkip */ true);
}
}
}
#else // !FEATURE_MULTIREG_RET
unreached();
#endif
}
//----------------------------------------------------------------------------------
// genMultiRegStoreToLocal: store multi-reg value to a local
//
// Arguments:
// lclNode - GenTree of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
// Assumption:
// The child of store is a multi-reg node.
//
void CodeGen::genMultiRegStoreToLocal(GenTreeLclVar* lclNode)
{
assert(lclNode->OperIs(GT_STORE_LCL_VAR));
assert(varTypeIsStruct(lclNode) || varTypeIsMultiReg(lclNode));
GenTree* op1 = lclNode->gtGetOp1();
assert(op1->IsMultiRegNode());
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount = actualOp1->GetMultiRegCount(compiler);
assert(regCount > 1);
// Assumption: current implementation requires that a multi-reg
// var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
// being promoted, unless compiler->lvaEnregMultiRegVars is true.
unsigned lclNum = lclNode->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
if (op1->OperIs(GT_CALL))
{
assert(regCount <= MAX_RET_REG_COUNT);
noway_assert(varDsc->lvIsMultiRegRet);
}
#ifdef FEATURE_SIMD
// Check for the case of an enregistered SIMD type that's returned in multiple registers.
if (varDsc->lvIsRegCandidate() && (lclNode->GetRegNum() != REG_NA))
{
assert(varTypeIsSIMD(lclNode));
genMultiRegStoreToSIMDLocal(lclNode);
return;
}
#endif // FEATURE_SIMD
// We have either a multi-reg local or a local with multiple fields in memory.
//
// The liveness model is as follows:
// use reg #0 from src, including any reload or copy
// define reg #0
// use reg #1 from src, including any reload or copy
// define reg #1
// etc.
// Imagine the following scenario:
// There are 3 registers used. Prior to this node, they occupy registers r3, r2 and r1.
// There are 3 registers defined by this node. They need to be placed in r1, r2 and r3,
// in that order.
//
// If we defined the as using all the source registers at once, we'd have to adopt one
// of the following models:
// - All (or all but one) of the incoming sources are marked "delayFree" so that they won't
// get the same register as any of the registers being defined. This would result in copies for
// the common case where the source and destination registers are the same (e.g. when a CALL
// result is assigned to a lclVar, which is then returned).
// - For our example (and for many/most cases) we would have to copy or spill all sources.
// - We allow circular dependencies between source and destination registers. This would require
// the code generator to determine the order in which the copies must be generated, and would
// require a temp register in case a swap is required. This complexity would have to be handled
// in both the normal code generation case, as well as for copies & reloads, as they are currently
// modeled by the register allocator to happen just prior to the use.
// - For our example, a temp would be required to swap r1 and r3, unless a swap instruction is
// available on the target.
//
// By having a multi-reg local use and define each field in order, we avoid these issues, and the
// register allocator will ensure that any conflicts are resolved via spill or inserted COPYs.
// For our example, the register allocator would simple spill r1 because the first def requires it.
// The code generator would move r3 to r1, leave r2 alone, and then load the spilled value into r3.
unsigned offset = 0;
bool isMultiRegVar = lclNode->IsMultiRegLclVar();
bool hasRegs = false;
if (isMultiRegVar)
{
assert(compiler->lvaEnregMultiRegVars);
assert(regCount == varDsc->lvFieldCnt);
}
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = genConsumeReg(op1, i);
var_types srcType = actualOp1->GetRegTypeByIndex(i);
// genConsumeReg will return the valid register, either from the COPY
// or from the original source.
assert(reg != REG_NA);
if (isMultiRegVar)
{
// Each field is passed in its own register, use the field types.
regNumber varReg = lclNode->GetRegByIndex(i);
unsigned fieldLclNum = varDsc->lvFieldLclStart + i;
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldLclNum);
var_types destType = fieldVarDsc->TypeGet();
if (varReg != REG_NA)
{
hasRegs = true;
// We may need a cross register-file copy here.
inst_Mov(destType, varReg, reg, /* canSkip */ true);
}
else
{
varReg = REG_STK;
}
if ((varReg == REG_STK) || fieldVarDsc->IsAlwaysAliveInMemory())
{
if (!lclNode->IsLastUse(i))
{
// A byte field passed in a long register should be written on the stack as a byte.
instruction storeIns = ins_StoreFromSrc(reg, destType);
GetEmitter()->emitIns_S_R(storeIns, emitTypeSize(destType), reg, fieldLclNum, 0);
}
}
fieldVarDsc->SetRegNum(varReg);
}
else
{
// Several fields could be passed in one register, copy using the register type.
// It could rewrite memory outside of the fields but local on the stack are rounded to POINTER_SIZE so
// it is safe to store a long register into a byte field as it is known that we have enough padding after.
GetEmitter()->emitIns_S_R(ins_Store(srcType), emitTypeSize(srcType), reg, lclNum, offset);
offset += genTypeSize(srcType);
#ifdef DEBUG
#ifdef TARGET_64BIT
assert(offset <= varDsc->lvSize());
#else // !TARGET_64BIT
if (varTypeIsStruct(varDsc))
{
assert(offset <= varDsc->lvSize());
}
else
{
assert(varDsc->TypeGet() == TYP_LONG);
assert(offset <= genTypeSize(TYP_LONG));
}
#endif // !TARGET_64BIT
#endif // DEBUG
}
}
// Update variable liveness.
if (isMultiRegVar)
{
if (hasRegs)
{
genProduceReg(lclNode);
}
else
{
genUpdateLife(lclNode);
}
}
else
{
genUpdateLife(lclNode);
varDsc->SetRegNum(REG_STK);
}
}
//------------------------------------------------------------------------
// genRegCopy: Produce code for a GT_COPY node.
//
// Arguments:
// tree - the GT_COPY node
//
// Notes:
// This will copy the register produced by this node's source, to
// the register allocated to this GT_COPY node.
// It has some special handling for these cases:
// - when the source and target registers are in different register files
// (note that this is *not* a conversion).
// - when the source is a lclVar whose home location is being moved to a new
// register (rather than just being copied for temporary use).
//
void CodeGen::genRegCopy(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_COPY);
GenTree* op1 = treeNode->AsOp()->gtOp1;
if (op1->IsMultiRegNode())
{
// Register allocation assumes that any reload and copy are done in operand order.
// That is, we can have:
// (reg0, reg1) = COPY(V0,V1) where V0 is in reg1 and V1 is in memory
// The register allocation model assumes:
// First, V0 is moved to reg0 (v1 can't be in reg0 because it is still live, which would be a conflict).
// Then, V1 is moved to reg1
// However, if we call genConsumeRegs on op1, it will do the reload of V1 before we do the copy of V0.
// So we need to handle that case first.
//
// There should never be any circular dependencies, and we will check that here.
// GenTreeCopyOrReload only reports the highest index that has a valid register.
// However, we need to ensure that we consume all the registers of the child node,
// so we use its regCount.
unsigned regCount = op1->GetMultiRegCount(compiler);
assert(regCount <= MAX_MULTIREG_COUNT);
// First set the source registers as busy if they haven't been spilled.
// (Note that this is just for verification that we don't have circular dependencies.)
regMaskTP busyRegs = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
if ((op1->GetRegSpillFlagByIdx(i) & GTF_SPILLED) == 0)
{
busyRegs |= genRegMask(op1->GetRegByIndex(i));
}
}
for (unsigned i = 0; i < regCount; ++i)
{
regNumber sourceReg = op1->GetRegByIndex(i);
// genRegCopy will consume the source register, perform any required reloads,
// and will return either the register copied to, or the original register if there's no copy.
regNumber targetReg = genRegCopy(treeNode, i);
if (targetReg != sourceReg)
{
regMaskTP targetRegMask = genRegMask(targetReg);
assert((busyRegs & targetRegMask) == 0);
// Clear sourceReg from the busyRegs, and add targetReg.
busyRegs &= ~genRegMask(sourceReg);
}
busyRegs |= genRegMask(targetReg);
}
return;
}
regNumber srcReg = genConsumeReg(op1);
var_types targetType = treeNode->TypeGet();
regNumber targetReg = treeNode->GetRegNum();
assert(srcReg != REG_NA);
assert(targetReg != REG_NA);
assert(targetType != TYP_STRUCT);
inst_Mov(targetType, targetReg, srcReg, /* canSkip */ false);
if (op1->IsLocal())
{
// The lclVar will never be a def.
// If it is a last use, the lclVar will be killed by genConsumeReg(), as usual, and genProduceReg will
// appropriately set the gcInfo for the copied value.
// If not, there are two cases we need to handle:
// - If this is a TEMPORARY copy (indicated by the GTF_VAR_DEATH flag) the variable
// will remain live in its original register.
// genProduceReg() will appropriately set the gcInfo for the copied value,
// and genConsumeReg will reset it.
// - Otherwise, we need to update register info for the lclVar.
GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
assert((lcl->gtFlags & GTF_VAR_DEF) == 0);
if ((lcl->gtFlags & GTF_VAR_DEATH) == 0 && (treeNode->gtFlags & GTF_VAR_DEATH) == 0)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
// If we didn't just spill it (in genConsumeReg, above), then update the register info
if (varDsc->GetRegNum() != REG_STK)
{
// The old location is dying
genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
gcInfo.gcMarkRegSetNpt(genRegMask(op1->GetRegNum()));
genUpdateVarReg(varDsc, treeNode);
#ifdef USING_VARIABLE_LIVE_RANGE
// Report the home change for this variable
varLiveKeeper->siUpdateVariableLiveRange(varDsc, lcl->GetLclNum());
#endif // USING_VARIABLE_LIVE_RANGE
// The new location is going live
genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
}
}
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genRegCopy: Produce code for a single register of a multireg copy node.
//
// Arguments:
// tree - The GT_COPY node
// multiRegIndex - The index of the register to be copied
//
// Notes:
// This will copy the corresponding register produced by this node's source, to
// the register allocated to the register specified by this GT_COPY node.
// A multireg copy doesn't support moving between register files, as the GT_COPY
// node does not retain separate types for each index.
// - when the source is a lclVar whose home location is being moved to a new
// register (rather than just being copied for temporary use).
//
// Return Value:
// Either the register copied to, or the original register if there's no copy.
//
regNumber CodeGen::genRegCopy(GenTree* treeNode, unsigned multiRegIndex)
{
assert(treeNode->OperGet() == GT_COPY);
GenTree* op1 = treeNode->gtGetOp1();
assert(op1->IsMultiRegNode());
GenTreeCopyOrReload* copyNode = treeNode->AsCopyOrReload();
assert(copyNode->GetRegCount() <= MAX_MULTIREG_COUNT);
// Consume op1's register, which will perform any necessary reloads.
genConsumeReg(op1, multiRegIndex);
regNumber sourceReg = op1->GetRegByIndex(multiRegIndex);
regNumber targetReg = copyNode->GetRegNumByIdx(multiRegIndex);
// GenTreeCopyOrReload only reports the highest index that has a valid register.
// However there may be lower indices that have no valid register (i.e. the register
// on the source is still valid at the consumer).
if (targetReg != REG_NA)
{
// We shouldn't specify a no-op move.
assert(sourceReg != targetReg);
var_types type;
if (op1->IsMultiRegLclVar())
{
LclVarDsc* parentVarDsc = compiler->lvaGetDesc(op1->AsLclVar());
unsigned fieldVarNum = parentVarDsc->lvFieldLclStart + multiRegIndex;
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum);
type = fieldVarDsc->TypeGet();
inst_Mov(type, targetReg, sourceReg, /* canSkip */ false);
if (!op1->AsLclVar()->IsLastUse(multiRegIndex) && fieldVarDsc->GetRegNum() != REG_STK)
{
// The old location is dying
genUpdateRegLife(fieldVarDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
gcInfo.gcMarkRegSetNpt(genRegMask(sourceReg));
genUpdateVarReg(fieldVarDsc, treeNode);
#ifdef USING_VARIABLE_LIVE_RANGE
// Report the home change for this variable
varLiveKeeper->siUpdateVariableLiveRange(fieldVarDsc, fieldVarNum);
#endif // USING_VARIABLE_LIVE_RANGE
// The new location is going live
genUpdateRegLife(fieldVarDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
}
}
else
{
type = op1->GetRegTypeByIndex(multiRegIndex);
inst_Mov(type, targetReg, sourceReg, /* canSkip */ false);
// We never spill after a copy, so to produce the single register, we simply need to
// update the GC info for the defined register.
gcInfo.gcMarkRegPtrVal(targetReg, type);
}
return targetReg;
}
else
{
return sourceReg;
}
}
#if defined(DEBUG) && defined(TARGET_XARCH)
//------------------------------------------------------------------------
// genStackPointerCheck: Generate code to check the stack pointer against a saved value.
// This is a debug check.
//
// Arguments:
// doStackPointerCheck - If true, do the stack pointer check, otherwise do nothing.
// lvaStackPointerVar - The local variable number that holds the value of the stack pointer
// we are comparing against.
//
// Return Value:
// None
//
void CodeGen::genStackPointerCheck(bool doStackPointerCheck, unsigned lvaStackPointerVar)
{
if (doStackPointerCheck)
{
noway_assert(lvaStackPointerVar != 0xCCCCCCCC && compiler->lvaGetDesc(lvaStackPointerVar)->lvDoNotEnregister &&
compiler->lvaGetDesc(lvaStackPointerVar)->lvOnFrame);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, lvaStackPointerVar, 0);
BasicBlock* sp_check = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_je, sp_check);
instGen(INS_BREAKPOINT);
genDefineTempLabel(sp_check);
}
}
#endif // defined(DEBUG) && defined(TARGET_XARCH)
unsigned CodeGenInterface::getCurrentStackLevel() const
{
return genStackLevel;
}
#ifdef USING_VARIABLE_LIVE_RANGE
#ifdef DEBUG
//------------------------------------------------------------------------
// VariableLiveRanges dumpers
//------------------------------------------------------------------------
// Dump "VariableLiveRange" when code has not been generated and we don't have so the assembly native offset
// but at least "emitLocation"s and "siVarLoc"
void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRange(
const CodeGenInterface* codeGen) const
{
codeGen->dumpSiVarLoc(&m_VarLocation);
printf(" [");
m_StartEmitLocation.Print(codeGen->GetCompiler()->compMethodID);
printf(", ");
if (m_EndEmitLocation.Valid())
{
m_EndEmitLocation.Print(codeGen->GetCompiler()->compMethodID);
}
else
{
printf("...");
}
printf("]");
}
// Dump "VariableLiveRange" when code has been generated and we have the assembly native offset of each "emitLocation"
void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRange(
emitter* emit, const CodeGenInterface* codeGen) const
{
assert(emit != nullptr);
// "VariableLiveRanges" are created setting its location ("m_VarLocation") and the initial native offset
// ("m_StartEmitLocation")
codeGen->dumpSiVarLoc(&m_VarLocation);
// If this is an open "VariableLiveRange", "m_EndEmitLocation" is non-valid and print -1
UNATIVE_OFFSET endAssemblyOffset = m_EndEmitLocation.Valid() ? m_EndEmitLocation.CodeOffset(emit) : -1;
printf(" [%X, %X)", m_StartEmitLocation.CodeOffset(emit), m_EndEmitLocation.CodeOffset(emit));
}
//------------------------------------------------------------------------
// LiveRangeDumper
//------------------------------------------------------------------------
//------------------------------------------------------------------------
// resetDumper: If the the "liveRange" has its last "VariableLiveRange" closed, it makes
// the "LiveRangeDumper" points to end of "liveRange" (nullptr). In other case,
// it makes the "LiveRangeDumper" points to the last "VariableLiveRange" of
// "liveRange", which is opened.
//
// Arguments:
// liveRanges - the "LiveRangeList" of the "VariableLiveDescriptor" we want to
// udpate its "LiveRangeDumper".
//
// Notes:
// This method is expected to be called once a the code for a BasicBlock has been
// generated and all the new "VariableLiveRange"s of the variable during this block
// has been dumped.
void CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::resetDumper(const LiveRangeList* liveRanges)
{
// There must have reported something in order to reset
assert(m_hasLiveRangestoDump);
if (liveRanges->back().m_EndEmitLocation.Valid())
{
// the last "VariableLiveRange" is closed and the variable
// is no longer alive
m_hasLiveRangestoDump = false;
}
else
{
// the last "VariableLiveRange" remains opened because it is
// live at "BasicBlock"s "bbLiveOut".
m_StartingLiveRange = liveRanges->backPosition();
}
}
//------------------------------------------------------------------------
// setDumperStartAt: Make "LiveRangeDumper" instance points the last "VariableLiveRange"
// added so we can starts dumping from there after the actual "BasicBlock"s code is generated.
//
// Arguments:
// liveRangeIt - an iterator to a position in "VariableLiveDescriptor::m_VariableLiveRanges"
//
// Return Value:
// A const pointer to the "LiveRangeList" containing all the "VariableLiveRange"s
// of the variable with index "varNum".
//
// Notes:
// "varNum" should be always a valid inde ("varnum" < "m_LiveDscCount")
void CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::setDumperStartAt(const LiveRangeListIterator liveRangeIt)
{
m_hasLiveRangestoDump = true;
m_StartingLiveRange = liveRangeIt;
}
//------------------------------------------------------------------------
// getStartForDump: Return an iterator to the first "VariableLiveRange" edited/added
// during the current "BasicBlock"
//
// Return Value:
// A LiveRangeListIterator to the first "VariableLiveRange" in "LiveRangeList" which
// was used during last "BasicBlock".
//
CodeGenInterface::VariableLiveKeeper::LiveRangeListIterator CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::
getStartForDump() const
{
return m_StartingLiveRange;
}
//------------------------------------------------------------------------
// hasLiveRangesToDump: Retutn wheter at least a "VariableLiveRange" was alive during
// the current "BasicBlock"'s code generation
//
// Return Value:
// A boolean indicating indicating if there is at least a "VariableLiveRange"
// that has been used for the variable during last "BasicBlock".
//
bool CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::hasLiveRangesToDump() const
{
return m_hasLiveRangestoDump;
}
#endif // DEBUG
//------------------------------------------------------------------------
// VariableLiveDescriptor
//------------------------------------------------------------------------
CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::VariableLiveDescriptor(CompAllocator allocator)
{
// Initialize an empty list
m_VariableLiveRanges = new (allocator) LiveRangeList(allocator);
INDEBUG(m_VariableLifeBarrier = new (allocator) LiveRangeDumper(m_VariableLiveRanges));
}
//------------------------------------------------------------------------
// hasVariableLiveRangeOpen: Return true if the variable is still alive,
// false in other case.
//
bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVariableLiveRangeOpen() const
{
return !m_VariableLiveRanges->empty() && !m_VariableLiveRanges->back().m_EndEmitLocation.Valid();
}
//------------------------------------------------------------------------
// getLiveRanges: Return the list of variable locations for this variable.
//
// Return Value:
// A const LiveRangeList* pointing to the first variable location if it has
// any or the end of the list in other case.
//
CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::
getLiveRanges() const
{
return m_VariableLiveRanges;
}
//------------------------------------------------------------------------
// startLiveRangeFromEmitter: Report this variable as being born in "varLocation"
// since the instruction where "emit" is located.
//
// Arguments:
// varLocation - the home of the variable.
// emit - an emitter* instance located at the first instruction from
// where "varLocation" becomes valid.
//
// Assumptions:
// This variable is being born so it should be dead.
//
// Notes:
// The position of "emit" matters to ensure intervals inclusive of the
// beginning and exclusive of the end.
//
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::startLiveRangeFromEmitter(
CodeGenInterface::siVarLoc varLocation, emitter* emit) const
{
noway_assert(emit != nullptr);
// Is the first "VariableLiveRange" or the previous one has been closed so its "m_EndEmitLocation" is valid
noway_assert(m_VariableLiveRanges->empty() || m_VariableLiveRanges->back().m_EndEmitLocation.Valid());
if (!m_VariableLiveRanges->empty() &&
siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) &&
m_VariableLiveRanges->back().m_EndEmitLocation.IsPreviousInsNum(emit))
{
JITDUMP("Extending debug range...\n");
// The variable is being born just after the instruction at which it died.
// In this case, i.e. an update of the variable's value, we coalesce the live ranges.
m_VariableLiveRanges->back().m_EndEmitLocation.Init();
}
else
{
JITDUMP("New debug range: %s\n",
m_VariableLiveRanges->empty()
? "first"
: siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation))
? "new var or location"
: "not adjacent");
// Creates new live range with invalid end
m_VariableLiveRanges->emplace_back(varLocation, emitLocation(), emitLocation());
m_VariableLiveRanges->back().m_StartEmitLocation.CaptureLocation(emit);
}
#ifdef DEBUG
if (!m_VariableLifeBarrier->hasLiveRangesToDump())
{
m_VariableLifeBarrier->setDumperStartAt(m_VariableLiveRanges->backPosition());
}
#endif // DEBUG
// startEmitLocationendEmitLocation has to be Valid and endEmitLocationendEmitLocation not
noway_assert(m_VariableLiveRanges->back().m_StartEmitLocation.Valid());
noway_assert(!m_VariableLiveRanges->back().m_EndEmitLocation.Valid());
}
//------------------------------------------------------------------------
// endLiveRangeAtEmitter: Report this variable as becoming dead since the
// instruction where "emit" is located.
//
// Arguments:
// emit - an emitter* instance located at the first instruction from
// this variable becomes dead.
//
// Assumptions:
// This variable is becoming dead so it should be alive.
//
// Notes:
// The position of "emit" matters to ensure intervals inclusive of the
// beginning and exclusive of the end.
//
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::endLiveRangeAtEmitter(emitter* emit) const
{
noway_assert(emit != nullptr);
noway_assert(hasVariableLiveRangeOpen());
// Using [close, open) ranges so as to not compute the size of the last instruction
m_VariableLiveRanges->back().m_EndEmitLocation.CaptureLocation(emit);
// No m_EndEmitLocation has to be Valid
noway_assert(m_VariableLiveRanges->back().m_EndEmitLocation.Valid());
}
//------------------------------------------------------------------------
// UpdateLiveRangeAtEmitter: Report this variable as changing its variable
// home to "varLocation" since the instruction where "emit" is located.
//
// Arguments:
// varLocation - the new variable location.
// emit - an emitter* instance located at the first instruction from
// where "varLocation" becomes valid.
//
// Assumptions:
// This variable is being born so it should be dead.
//
// Notes:
// The position of "emit" matters to ensure intervals inclusive of the
// beginning and exclusive of the end.
//
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::updateLiveRangeAtEmitter(
CodeGenInterface::siVarLoc varLocation, emitter* emit) const
{
// This variable is changing home so it has been started before during this block
noway_assert(m_VariableLiveRanges != nullptr && !m_VariableLiveRanges->empty());
// And its last m_EndEmitLocation has to be invalid
noway_assert(!m_VariableLiveRanges->back().m_EndEmitLocation.Valid());
// If we are reporting again the same home, that means we are doing something twice?
// noway_assert(! CodeGenInterface::siVarLoc::Equals(&m_VariableLiveRanges->back().m_VarLocation, varLocation));
// Close previous live range
endLiveRangeAtEmitter(emit);
startLiveRangeFromEmitter(varLocation, emit);
}
#ifdef DEBUG
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpAllRegisterLiveRangesForBlock(
emitter* emit, const CodeGenInterface* codeGen) const
{
bool first = true;
for (LiveRangeListIterator it = m_VariableLiveRanges->begin(); it != m_VariableLiveRanges->end(); it++)
{
if (!first)
{
printf("; ");
}
it->dumpVariableLiveRange(emit, codeGen);
first = false;
}
}
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpRegisterLiveRangesForBlockBeforeCodeGenerated(
const CodeGenInterface* codeGen) const
{
bool first = true;
for (LiveRangeListIterator it = m_VariableLifeBarrier->getStartForDump(); it != m_VariableLiveRanges->end(); it++)
{
if (!first)
{
printf("; ");
}
it->dumpVariableLiveRange(codeGen);
first = false;
}
}
// Returns true if a live range for this variable has been recorded
bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVarLiveRangesToDump() const
{
return !m_VariableLiveRanges->empty();
}
// Returns true if a live range for this variable has been recorded from last call to EndBlock
bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVarLiveRangesFromLastBlockToDump() const
{
return m_VariableLifeBarrier->hasLiveRangesToDump();
}
// Reset the barrier so as to dump only next block changes on next block
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::endBlockLiveRanges()
{
// make "m_VariableLifeBarrier->m_StartingLiveRange" now points to nullptr for printing purposes
m_VariableLifeBarrier->resetDumper(m_VariableLiveRanges);
}
#endif // DEBUG
//------------------------------------------------------------------------
// VariableLiveKeeper
//------------------------------------------------------------------------
// Initialize structures for VariableLiveRanges
void CodeGenInterface::initializeVariableLiveKeeper()
{
CompAllocator allocator = compiler->getAllocator(CMK_VariableLiveRanges);
int amountTrackedVariables = compiler->opts.compDbgInfo ? compiler->info.compLocalsCount : 0;
int amountTrackedArgs = compiler->opts.compDbgInfo ? compiler->info.compArgsCount : 0;
varLiveKeeper = new (allocator) VariableLiveKeeper(amountTrackedVariables, amountTrackedArgs, compiler, allocator);
}
CodeGenInterface::VariableLiveKeeper* CodeGenInterface::getVariableLiveKeeper() const
{
return varLiveKeeper;
};
//------------------------------------------------------------------------
// VariableLiveKeeper: Create an instance of the object in charge of managing
// VariableLiveRanges and intialize the array "m_vlrLiveDsc".
//
// Arguments:
// totalLocalCount - the count of args, special args and IL Local
// variables in the method.
// argsCount - the count of args and special args in the method.
// compiler - a compiler instance
//
CodeGenInterface::VariableLiveKeeper::VariableLiveKeeper(unsigned int totalLocalCount,
unsigned int argsCount,
Compiler* comp,
CompAllocator allocator)
: m_LiveDscCount(totalLocalCount)
, m_LiveArgsCount(argsCount)
, m_Compiler(comp)
, m_LastBasicBlockHasBeenEmited(false)
{
if (m_LiveDscCount > 0)
{
// Allocate memory for "m_vlrLiveDsc" and initialize each "VariableLiveDescriptor"
m_vlrLiveDsc = allocator.allocate<VariableLiveDescriptor>(m_LiveDscCount);
m_vlrLiveDscForProlog = allocator.allocate<VariableLiveDescriptor>(m_LiveDscCount);
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
new (m_vlrLiveDsc + varNum, jitstd::placement_t()) VariableLiveDescriptor(allocator);
new (m_vlrLiveDscForProlog + varNum, jitstd::placement_t()) VariableLiveDescriptor(allocator);
}
}
}
//------------------------------------------------------------------------
// siStartOrCloseVariableLiveRange: Reports the given variable as beign born
// or becoming dead.
//
// Arguments:
// varDsc - the variable for which a location changed will be reported
// varNum - the index of the variable in the "compiler->lvaTable"
// isBorn - whether the variable is being born from where the emitter is located.
// isDying - whether the variable is dying from where the emitter is located.
//
// Assumptions:
// The emitter should be located on the first instruction from where is true that
// the variable becoming valid (when isBorn is true) or invalid (when isDying is true).
//
// Notes:
// This method is being called from treeLifeUpdater when the variable is being born,
// becoming dead, or both.
//
void CodeGenInterface::VariableLiveKeeper::siStartOrCloseVariableLiveRange(const LclVarDsc* varDsc,
unsigned int varNum,
bool isBorn,
bool isDying)
{
noway_assert(varDsc != nullptr);
// Only the variables that exists in the IL, "this", and special arguments
// are reported.
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount)
{
if (isBorn && !isDying)
{
// "varDsc" is valid from this point
siStartVariableLiveRange(varDsc, varNum);
}
if (isDying && !isBorn)
{
// this variable live range is no longer valid from this point
siEndVariableLiveRange(varNum);
}
}
}
//------------------------------------------------------------------------
// siStartOrCloseVariableLiveRanges: Iterates the given set of variables
// calling "siStartOrCloseVariableLiveRange" with each one.
//
// Arguments:
// varsIndexSet - the set of variables to report start/end "VariableLiveRange"
// isBorn - whether the set is being born from where the emitter is located.
// isDying - whether the set is dying from where the emitter is located.
//
// Assumptions:
// The emitter should be located on the first instruction from where is true that
// the variable becoming valid (when isBorn is true) or invalid (when isDying is true).
//
// Notes:
// This method is being called from treeLifeUpdater when a set of variables
// is being born, becoming dead, or both.
//
void CodeGenInterface::VariableLiveKeeper::siStartOrCloseVariableLiveRanges(VARSET_VALARG_TP varsIndexSet,
bool isBorn,
bool isDying)
{
if (m_Compiler->opts.compDbgInfo)
{
VarSetOps::Iter iter(m_Compiler, varsIndexSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned int varNum = m_Compiler->lvaTrackedIndexToLclNum(varIndex);
const LclVarDsc* varDsc = m_Compiler->lvaGetDesc(varNum);
siStartOrCloseVariableLiveRange(varDsc, varNum, isBorn, isDying);
}
}
}
//------------------------------------------------------------------------
// siStartVariableLiveRange: Reports the given variable as being born.
//
// Arguments:
// varDsc - the variable for which a location changed will be reported
// varNum - the index of the variable to report home in lvLiveDsc
//
// Assumptions:
// The emitter should be pointing to the first instruction from where the VariableLiveRange is
// becoming valid.
// The given "varDsc" should have its VariableRangeLists initialized.
//
// Notes:
// This method should be called on every place a Variable is becoming alive.
void CodeGenInterface::VariableLiveKeeper::siStartVariableLiveRange(const LclVarDsc* varDsc, unsigned int varNum)
{
noway_assert(varDsc != nullptr);
// Only the variables that exists in the IL, "this", and special arguments are reported, as long as they were
// allocated.
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && (varDsc->lvIsInReg() || varDsc->lvOnFrame))
{
// Build siVarLoc for this born "varDsc"
CodeGenInterface::siVarLoc varLocation =
m_Compiler->codeGen->getSiVarLoc(varDsc, m_Compiler->codeGen->getCurrentStackLevel());
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum];
// this variable live range is valid from this point
varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter());
}
}
//------------------------------------------------------------------------
// siEndVariableLiveRange: Reports the variable as becoming dead.
//
// Arguments:
// varNum - the index of the variable at m_vlrLiveDsc or lvaTable in that
// is becoming dead.
//
// Assumptions:
// The given variable should be alive.
// The emitter should be pointing to the first instruction from where the VariableLiveRange is
// becoming invalid.
//
// Notes:
// This method should be called on every place a Variable is becoming dead.
void CodeGenInterface::VariableLiveKeeper::siEndVariableLiveRange(unsigned int varNum)
{
// Only the variables that exists in the IL, "this", and special arguments
// will be reported.
// This method is being called from genUpdateLife, and that one is called after
// code for BasicBlock have been generated, but the emitter has no longer
// a valid IG so we don't report the close of a "VariableLiveRange" after code is
// emitted.
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && !m_LastBasicBlockHasBeenEmited &&
m_vlrLiveDsc[varNum].hasVariableLiveRangeOpen())
{
// this variable live range is no longer valid from this point
m_vlrLiveDsc[varNum].endLiveRangeAtEmitter(m_Compiler->GetEmitter());
}
}
//------------------------------------------------------------------------
// siUpdateVariableLiveRange: Reports the change of variable location for the
// given variable.
//
// Arguments:
// varDsc - the variable for which tis home has changed.
// varNum - the index of the variable to report home in lvLiveDsc
//
// Assumptions:
// The given variable should be alive.
// The emitter should be pointing to the first instruction from where
// the new variable location is becoming valid.
//
void CodeGenInterface::VariableLiveKeeper::siUpdateVariableLiveRange(const LclVarDsc* varDsc, unsigned int varNum)
{
noway_assert(varDsc != nullptr);
// Only the variables that exists in the IL, "this", and special arguments
// will be reported. This are locals and arguments, and are counted in
// "info.compLocalsCount".
// This method is being called when the prolog is being generated, and
// the emitter has no longer a valid IG so we don't report the close of
// a "VariableLiveRange" after code is emitted.
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && !m_LastBasicBlockHasBeenEmited)
{
// Build the location of the variable
CodeGenInterface::siVarLoc siVarLoc =
m_Compiler->codeGen->getSiVarLoc(varDsc, m_Compiler->codeGen->getCurrentStackLevel());
// Report the home change for this variable
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum];
varLiveDsc->updateLiveRangeAtEmitter(siVarLoc, m_Compiler->GetEmitter());
}
}
//------------------------------------------------------------------------
// siEndAllVariableLiveRange: Reports the set of variables as becoming dead.
//
// Arguments:
// newLife - the set of variables that are becoming dead.
//
// Assumptions:
// All the variables in the set are alive.
//
// Notes:
// This method is called when the last block being generated to killed all
// the live variables and set a flag to avoid reporting variable locations for
// on next calls to method that update variable liveness.
void CodeGenInterface::VariableLiveKeeper::siEndAllVariableLiveRange(VARSET_VALARG_TP varsToClose)
{
if (m_Compiler->opts.compDbgInfo)
{
if (m_Compiler->lvaTrackedCount > 0 || !m_Compiler->opts.OptimizationDisabled())
{
VarSetOps::Iter iter(m_Compiler, varsToClose);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned int varNum = m_Compiler->lvaTrackedIndexToLclNum(varIndex);
siEndVariableLiveRange(varNum);
}
}
else
{
// It seems we are jitting debug code, so we don't have variable
// liveness info
siEndAllVariableLiveRange();
}
}
m_LastBasicBlockHasBeenEmited = true;
}
//------------------------------------------------------------------------
// siEndAllVariableLiveRange: Reports all live variables as dead.
//
// Notes:
// This overload exists for the case we are jitting code compiled in
// debug mode. When that happen we don't have variable liveness info
// as "BaiscBlock::bbLiveIn" or "BaiscBlock::bbLiveOut" and there is no
// tracked variable.
//
void CodeGenInterface::VariableLiveKeeper::siEndAllVariableLiveRange()
{
// TODO: we can improve this keeping a set for the variables with
// open VariableLiveRanges
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
const VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
if (varLiveDsc->hasVariableLiveRangeOpen())
{
siEndVariableLiveRange(varNum);
}
}
}
//------------------------------------------------------------------------
// getLiveRangesForVarForBody: Return the "VariableLiveRange" that correspond to
// the given "varNum".
//
// Arguments:
// varNum - the index of the variable in m_vlrLiveDsc, which is the same as
// in lvaTable.
//
// Return Value:
// A const pointer to the list of variable locations reported for the variable.
//
// Assumptions:
// This variable should be an argument, a special argument or an IL local
// variable.
CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::getLiveRangesForVarForBody(
unsigned int varNum) const
{
// There should be at least one variable for which its liveness is tracked
noway_assert(varNum < m_LiveDscCount);
return m_vlrLiveDsc[varNum].getLiveRanges();
}
//------------------------------------------------------------------------
// getLiveRangesForVarForProlog: Return the "VariableLiveRange" that correspond to
// the given "varNum".
//
// Arguments:
// varNum - the index of the variable in m_vlrLiveDsc, which is the same as
// in lvaTable.
//
// Return Value:
// A const pointer to the list of variable locations reported for the variable.
//
// Assumptions:
// This variable should be an argument, a special argument or an IL local
// variable.
CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::getLiveRangesForVarForProlog(
unsigned int varNum) const
{
// There should be at least one variable for which its liveness is tracked
noway_assert(varNum < m_LiveDscCount);
return m_vlrLiveDscForProlog[varNum].getLiveRanges();
}
//------------------------------------------------------------------------
// getLiveRangesCount: Returns the count of variable locations reported for the tracked
// variables, which are arguments, special arguments, and local IL variables.
//
// Return Value:
// size_t - the count of variable locations
//
// Notes:
// This method is being called from "genSetScopeInfo" to know the count of
// "varResultInfo" that should be created on eeSetLVcount.
//
size_t CodeGenInterface::VariableLiveKeeper::getLiveRangesCount() const
{
size_t liveRangesCount = 0;
if (m_Compiler->opts.compDbgInfo)
{
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
for (int i = 0; i < 2; i++)
{
VariableLiveDescriptor* varLiveDsc = (i == 0 ? m_vlrLiveDscForProlog : m_vlrLiveDsc) + varNum;
if (m_Compiler->compMap2ILvarNum(varNum) != (unsigned int)ICorDebugInfo::UNKNOWN_ILNUM)
{
liveRangesCount += varLiveDsc->getLiveRanges()->size();
}
}
}
}
return liveRangesCount;
}
//------------------------------------------------------------------------
// psiStartVariableLiveRange: Reports the given variable as being born.
//
// Arguments:
// varLcation - the variable location
// varNum - the index of the variable in "compiler->lvaTable" or
// "VariableLivekeeper->m_vlrLiveDsc"
//
// Notes:
// This function is expected to be called from "psiBegProlog" during
// prolog code generation.
//
void CodeGenInterface::VariableLiveKeeper::psiStartVariableLiveRange(CodeGenInterface::siVarLoc varLocation,
unsigned int varNum)
{
// This descriptor has to correspond to a parameter. The first slots in lvaTable
// are arguments and special arguments.
noway_assert(varNum < m_LiveArgsCount);
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDscForProlog[varNum];
varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter());
}
//------------------------------------------------------------------------
// psiClosePrologVariableRanges: Report all the parameters as becoming dead.
//
// Notes:
// This function is expected to be called from preffix "psiEndProlog" after
// code for prolog has been generated.
//
void CodeGenInterface::VariableLiveKeeper::psiClosePrologVariableRanges()
{
noway_assert(m_LiveArgsCount <= m_LiveDscCount);
for (unsigned int varNum = 0; varNum < m_LiveArgsCount; varNum++)
{
VariableLiveDescriptor* varLiveDsc = m_vlrLiveDscForProlog + varNum;
if (varLiveDsc->hasVariableLiveRangeOpen())
{
varLiveDsc->endLiveRangeAtEmitter(m_Compiler->GetEmitter());
}
}
}
#ifdef DEBUG
void CodeGenInterface::VariableLiveKeeper::dumpBlockVariableLiveRanges(const BasicBlock* block)
{
assert(block != nullptr);
bool hasDumpedHistory = false;
printf("\nVariable Live Range History Dump for " FMT_BB "\n", block->bbNum);
if (m_Compiler->opts.compDbgInfo)
{
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
if (varLiveDsc->hasVarLiveRangesFromLastBlockToDump())
{
hasDumpedHistory = true;
m_Compiler->gtDispLclVar(varNum, false);
printf(": ");
varLiveDsc->dumpRegisterLiveRangesForBlockBeforeCodeGenerated(m_Compiler->codeGen);
varLiveDsc->endBlockLiveRanges();
printf("\n");
}
}
}
if (!hasDumpedHistory)
{
printf("..None..\n");
}
}
void CodeGenInterface::VariableLiveKeeper::dumpLvaVariableLiveRanges() const
{
bool hasDumpedHistory = false;
printf("VARIABLE LIVE RANGES:\n");
if (m_Compiler->opts.compDbgInfo)
{
for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
if (varLiveDsc->hasVarLiveRangesToDump())
{
hasDumpedHistory = true;
m_Compiler->gtDispLclVar(varNum, false);
printf(": ");
varLiveDsc->dumpAllRegisterLiveRangesForBlock(m_Compiler->GetEmitter(), m_Compiler->codeGen);
printf("\n");
}
}
}
if (!hasDumpedHistory)
{
printf("..None..\n");
}
}
#endif // DEBUG
#endif // USING_VARIABLE_LIVE_RANGE
//-----------------------------------------------------------------------------
// genPoisonFrame: Generate code that places a recognizable value into address exposed variables.
//
// Remarks:
// This function emits code to poison address exposed non-zero-inited local variables. We expect this function
// to be called when emitting code for the scratch BB that comes right after the prolog.
// The variables are poisoned using 0xcdcdcdcd.
void CodeGen::genPoisonFrame(regMaskTP regLiveIn)
{
assert(compiler->compShouldPoisonFrame());
#if defined(TARGET_XARCH)
regNumber poisonValReg = REG_EAX;
assert((regLiveIn & (RBM_EDI | RBM_ECX | RBM_EAX)) == 0);
#else
regNumber poisonValReg = REG_SCRATCH;
assert((regLiveIn & (genRegMask(REG_SCRATCH) | RBM_ARG_0 | RBM_ARG_1 | RBM_ARG_2)) == 0);
#endif
#ifdef TARGET_64BIT
const ssize_t poisonVal = (ssize_t)0xcdcdcdcdcdcdcdcd;
#else
const ssize_t poisonVal = (ssize_t)0xcdcdcdcd;
#endif
// The first time we need to poison something we will initialize a register to the largest immediate cccccccc that
// we can fit.
bool hasPoisonImm = false;
for (unsigned varNum = 0; varNum < compiler->info.compLocalsCount; varNum++)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvIsParam || varDsc->lvMustInit || !varDsc->IsAddressExposed())
{
continue;
}
assert(varDsc->lvOnFrame);
unsigned int size = compiler->lvaLclSize(varNum);
if ((size / TARGET_POINTER_SIZE) > 16)
{
// This will require more than 16 instructions, switch to rep stosd/memset call.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH)
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_EDI, (int)varNum, 0);
assert(size % 4 == 0);
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_ECX, size / 4);
// On xarch we can leave the value in eax and only set eax once
// since rep stosd does not kill eax.
if (!hasPoisonImm)
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_EAX, poisonVal);
hasPoisonImm = true;
}
instGen(INS_r_stosd);
#else
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_0, (int)varNum, 0);
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_ARG_1, static_cast<char>(poisonVal));
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_ARG_2, size);
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
// May kill REG_SCRATCH, so we need to reload it.
hasPoisonImm = false;
#endif
}
else
{
if (!hasPoisonImm)
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, poisonValReg, poisonVal);
hasPoisonImm = true;
}
// For 64-bit we check if the local is 8-byte aligned. For 32-bit, we assume everything is always 4-byte aligned.
#ifdef TARGET_64BIT
bool fpBased;
int addr = compiler->lvaFrameAddress((int)varNum, &fpBased);
#else
int addr = 0;
#endif
int end = addr + (int)size;
for (int offs = addr; offs < end;)
{
#ifdef TARGET_64BIT
if ((offs % 8) == 0 && end - offs >= 8)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_LONG), EA_8BYTE, REG_SCRATCH, (int)varNum, offs - addr);
offs += 8;
continue;
}
#endif
assert((offs % 4) == 0 && end - offs >= 4);
GetEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, REG_SCRATCH, (int)varNum, offs - addr);
offs += 4;
}
}
}
}
//----------------------------------------------------------------------
// genBitCast - Generate the instruction to move a value between register files
//
// Arguments
// targetType - the destination type
// targetReg - the destination register
// srcType - the source type
// srcReg - the source register
//
void CodeGen::genBitCast(var_types targetType, regNumber targetReg, var_types srcType, regNumber srcReg)
{
const bool srcFltReg = varTypeUsesFloatReg(srcType) || varTypeIsSIMD(srcType);
assert(srcFltReg == genIsValidFloatReg(srcReg));
const bool dstFltReg = varTypeUsesFloatReg(targetType) || varTypeIsSIMD(targetType);
assert(dstFltReg == genIsValidFloatReg(targetReg));
inst_Mov(targetType, targetReg, srcReg, /* canSkip */ true);
}
//----------------------------------------------------------------------
// genCodeForBitCast - Generate code for a GT_BITCAST that is not contained
//
// Arguments
// treeNode - the GT_BITCAST for which we're generating code
//
void CodeGen::genCodeForBitCast(GenTreeOp* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
GenTree* op1 = treeNode->gtGetOp1();
genConsumeRegs(op1);
if (op1->isContained())
{
assert(op1->IsLocal() || op1->isIndir());
if (genIsRegCandidateLocal(op1))
{
unsigned lclNum = op1->AsLclVar()->GetLclNum();
GetEmitter()->emitIns_R_S(ins_Load(treeNode->TypeGet(), compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(treeNode), targetReg, lclNum, 0);
}
else
{
op1->gtType = treeNode->TypeGet();
op1->SetRegNum(targetReg);
op1->ClearContained();
JITDUMP("Changing type of BITCAST source to load directly.\n");
genCodeForTreeNode(op1);
}
}
else
{
#ifdef TARGET_ARM
if (compiler->opts.compUseSoftFP && (targetType == TYP_LONG))
{
// This is a special arm-softFP case when a TYP_LONG node was introduced during lowering
// for a call argument, so it was not handled by decomposelongs phase as all other TYP_LONG nodes.
// Example foo(double LclVar V01), LclVar V01 has to be passed in general registers r0, r1,
// so lowering will add `BITCAST long(LclVar double V01)` and codegen has to support it here.
const regNumber srcReg = op1->GetRegNum();
const regNumber otherReg = treeNode->AsMultiRegOp()->gtOtherReg;
assert(otherReg != REG_NA);
inst_RV_RV_RV(INS_vmov_d2i, targetReg, otherReg, srcReg, EA_8BYTE);
}
else
#endif // TARGET_ARM
{
genBitCast(targetType, targetReg, op1->TypeGet(), op1->GetRegNum());
}
}
genProduceReg(treeNode);
}
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/codegenlinear.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Code Generation Support Methods for Linear Codegen XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "emit.h"
#include "codegen.h"
//------------------------------------------------------------------------
// genInitializeRegisterState: Initialize the register state contained in 'regSet'.
//
// Assumptions:
// On exit the "rsModifiedRegsMask" (in "regSet") holds all the registers' masks hosting an argument on the function
// and elements of "rsSpillDesc" (in "regSet") are setted to nullptr.
//
// Notes:
// This method is intended to be called only from initializeStructuresBeforeBlockCodeGeneration.
void CodeGen::genInitializeRegisterState()
{
// Initialize the spill tracking logic
regSet.rsSpillBeg();
// If any arguments live in registers, mark those regs as such
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
// Is this variable a parameter assigned to a register?
if (!varDsc->lvIsParam || !varDsc->lvRegister)
{
continue;
}
// Is the argument live on entry to the method?
if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
continue;
}
if (varDsc->IsAddressExposed())
{
continue;
}
// Mark the register as holding the variable
regNumber reg = varDsc->GetRegNum();
if (genIsValidIntReg(reg))
{
regSet.verifyRegUsed(reg);
}
}
}
//------------------------------------------------------------------------
// genInitialize: Initialize Scopes, registers, gcInfo and current liveness variables structures
// used in the generation of blocks' code before.
//
// Assumptions:
// -The pointer logic in "gcInfo" for pointers on registers and variable is cleaned.
// -"compiler->compCurLife" becomes an empty set
// -"compiler->compCurLife" are set to be a clean set
// -If there is local var info siScopes scope logic in codegen is initialized in "siInit()"
//
// Notes:
// This method is intended to be called when code generation for blocks happens, and before the list of blocks is
// iterated.
void CodeGen::genInitialize()
{
// Initialize the line# tracking logic
if (compiler->opts.compScopeInfo)
{
siInit();
}
#ifdef USING_VARIABLE_LIVE_RANGE
initializeVariableLiveKeeper();
#endif // USING_VARIABLE_LIVE_RANGE
genPendingCallLabel = nullptr;
// Initialize the pointer tracking code
gcInfo.gcRegPtrSetInit();
gcInfo.gcVarPtrSetInit();
// Initialize the register set logic
genInitializeRegisterState();
// Make sure a set is allocated for compiler->compCurLife (in the long case), so we can set it to empty without
// allocation at the start of each basic block.
VarSetOps::AssignNoCopy(compiler, compiler->compCurLife, VarSetOps::MakeEmpty(compiler));
// We initialize the stack level before first "BasicBlock" code is generated in case we need to report stack
// variable needs home and so its stack offset.
SetStackLevel(0);
}
//------------------------------------------------------------------------
// genCodeForBBlist: Generate code for all the blocks in a method
//
// Arguments:
// None
//
// Notes:
// This is the main method for linear codegen. It calls genCodeForTreeNode
// to generate the code for each node in each BasicBlock, and handles BasicBlock
// boundaries and branches.
//
void CodeGen::genCodeForBBlist()
{
unsigned savedStkLvl;
#ifdef DEBUG
genInterruptibleUsed = true;
// You have to be careful if you create basic blocks from now on
compiler->fgSafeBasicBlockCreation = false;
#endif // DEBUG
#if defined(DEBUG) && defined(TARGET_X86)
// Check stack pointer on call stress mode is not compatible with fully interruptible GC. REVIEW: why?
//
if (GetInterruptible() && compiler->opts.compStackCheckOnCall)
{
compiler->opts.compStackCheckOnCall = false;
}
#endif // defined(DEBUG) && defined(TARGET_X86)
#if defined(DEBUG) && defined(TARGET_XARCH)
// Check stack pointer on return stress mode is not compatible with fully interruptible GC. REVIEW: why?
// It is also not compatible with any function that makes a tailcall: we aren't smart enough to only
// insert the SP check in the non-tailcall returns.
//
if ((GetInterruptible() || compiler->compTailCallUsed) && compiler->opts.compStackCheckOnRet)
{
compiler->opts.compStackCheckOnRet = false;
}
#endif // defined(DEBUG) && defined(TARGET_XARCH)
genMarkLabelsForCodegen();
assert(!compiler->fgFirstBBScratch ||
compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
/* Initialize structures used in the block list iteration */
genInitialize();
/*-------------------------------------------------------------------------
*
* Walk the basic blocks and generate code for each one
*
*/
BasicBlock* block;
for (block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\n=============== Generating ");
block->dspBlockHeader(compiler, true, true);
compiler->fgDispBBLiveness(block);
}
#endif // DEBUG
assert(LIR::AsRange(block).CheckLIR(compiler));
// Figure out which registers hold variables on entry to this block
regSet.ClearMaskVars();
gcInfo.gcRegGCrefSetCur = RBM_NONE;
gcInfo.gcRegByrefSetCur = RBM_NONE;
compiler->m_pLinearScan->recordVarLocationsAtStartOfBB(block);
// Updating variable liveness after last instruction of previous block was emitted
// and before first of the current block is emitted
genUpdateLife(block->bbLiveIn);
// Even if liveness didn't change, we need to update the registers containing GC references.
// genUpdateLife will update the registers live due to liveness changes. But what about registers that didn't
// change? We cleared them out above. Maybe we should just not clear them out, but update the ones that change
// here. That would require handling the changes in recordVarLocationsAtStartOfBB().
regMaskTP newLiveRegSet = RBM_NONE;
regMaskTP newRegGCrefSet = RBM_NONE;
regMaskTP newRegByrefSet = RBM_NONE;
#ifdef DEBUG
VARSET_TP removedGCVars(VarSetOps::MakeEmpty(compiler));
VARSET_TP addedGCVars(VarSetOps::MakeEmpty(compiler));
#endif
VarSetOps::Iter iter(compiler, block->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
if (varDsc->lvIsInReg())
{
newLiveRegSet |= varDsc->lvRegMask();
if (varDsc->lvType == TYP_REF)
{
newRegGCrefSet |= varDsc->lvRegMask();
}
else if (varDsc->lvType == TYP_BYREF)
{
newRegByrefSet |= varDsc->lvRegMask();
}
if (!varDsc->IsAlwaysAliveInMemory())
{
#ifdef DEBUG
if (verbose && VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varIndex))
{
VarSetOps::AddElemD(compiler, removedGCVars, varIndex);
}
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varIndex);
}
}
if ((!varDsc->lvIsInReg() || varDsc->IsAlwaysAliveInMemory()) && compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (verbose && !VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varIndex))
{
VarSetOps::AddElemD(compiler, addedGCVars, varIndex);
}
#endif // DEBUG
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varIndex);
}
}
regSet.SetMaskVars(newLiveRegSet);
#ifdef DEBUG
if (compiler->verbose)
{
if (!VarSetOps::IsEmpty(compiler, addedGCVars))
{
printf("\t\t\t\t\t\t\tAdded GCVars: ");
dumpConvertedVarSet(compiler, addedGCVars);
printf("\n");
}
if (!VarSetOps::IsEmpty(compiler, removedGCVars))
{
printf("\t\t\t\t\t\t\tRemoved GCVars: ");
dumpConvertedVarSet(compiler, removedGCVars);
printf("\n");
}
}
#endif // DEBUG
gcInfo.gcMarkRegSetGCref(newRegGCrefSet DEBUGARG(true));
gcInfo.gcMarkRegSetByref(newRegByrefSet DEBUGARG(true));
/* Blocks with handlerGetsXcptnObj()==true use GT_CATCH_ARG to
represent the exception object (TYP_REF).
We mark REG_EXCEPTION_OBJECT as holding a GC object on entry
to the block, it will be the first thing evaluated
(thanks to GTF_ORDER_SIDEEFF).
*/
if (handlerGetsXcptnObj(block->bbCatchTyp))
{
for (GenTree* node : LIR::AsRange(block))
{
if (node->OperGet() == GT_CATCH_ARG)
{
gcInfo.gcMarkRegSetGCref(RBM_EXCEPTION_OBJECT);
break;
}
}
}
#if defined(TARGET_ARM)
genInsertNopForUnwinder(block);
#endif
/* Start a new code output block */
genUpdateCurrentFunclet(block);
genLogLabel(block);
// Tell everyone which basic block we're working on
compiler->compCurBB = block;
block->bbEmitCookie = nullptr;
// If this block is a jump target or it requires a label then set 'needLabel' to true,
//
bool needLabel = (block->bbFlags & BBF_HAS_LABEL) != 0;
if (block == compiler->fgFirstColdBlock)
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nThis is the start of the cold region of the method\n");
}
#endif
// We should never have a block that falls through into the Cold section
noway_assert(!block->bbPrev->bbFallsThrough());
needLabel = true;
}
// We also want to start a new Instruction group by calling emitAddLabel below,
// when we need accurate bbWeights for this block in the emitter. We force this
// whenever our previous block was a BBJ_COND and it has a different weight than us.
//
// Note: We need to have set compCurBB before calling emitAddLabel
//
if ((block->bbPrev != nullptr) && (block->bbPrev->bbJumpKind == BBJ_COND) &&
(block->bbWeight != block->bbPrev->bbWeight))
{
JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT
" different from " FMT_BB " with weight " FMT_WT "\n",
block->bbPrev->bbNum, block->bbPrev->bbWeight, block->bbNum, block->bbWeight);
needLabel = true;
}
#if FEATURE_LOOP_ALIGN
if (GetEmitter()->emitEndsWithAlignInstr())
{
// Force new label if current IG ends with an align instruction.
needLabel = true;
}
#endif
if (needLabel)
{
// Mark a label and update the current set of live GC refs
block->bbEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(block));
}
if (block == compiler->fgFirstColdBlock)
{
// We require the block that starts the Cold section to have a label
noway_assert(block->bbEmitCookie);
GetEmitter()->emitSetFirstColdIGCookie(block->bbEmitCookie);
}
// Both stacks are always empty on entry to a basic block.
assert(genStackLevel == 0);
genAdjustStackLevel(block);
savedStkLvl = genStackLevel;
// Needed when jitting debug code
siBeginBlock(block);
// BBF_INTERNAL blocks don't correspond to any single IL instruction.
if (compiler->opts.compDbgInfo && (block->bbFlags & BBF_INTERNAL) &&
!compiler->fgBBisScratch(block)) // If the block is the distinguished first scratch block, then no need to
// emit a NO_MAPPING entry, immediately after the prolog.
{
genIPmappingAdd(IPmappingDscKind::NoMapping, DebugInfo(), true);
}
bool firstMapping = true;
#if defined(FEATURE_EH_FUNCLETS)
if (block->bbFlags & BBF_FUNCLET_BEG)
{
genReserveFuncletProlog(block);
}
#endif // FEATURE_EH_FUNCLETS
// Clear compCurStmt and compCurLifeTree.
compiler->compCurStmt = nullptr;
compiler->compCurLifeTree = nullptr;
// Emit poisoning into scratch BB that comes right after prolog.
// We cannot emit this code in the prolog as it might make the prolog too large.
if (compiler->compShouldPoisonFrame() && compiler->fgBBisScratch(block))
{
genPoisonFrame(newLiveRegSet);
}
// Traverse the block in linear order, generating code for each node as we
// as we encounter it.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Set the use-order numbers for each node.
{
int useNum = 0;
for (GenTree* node : LIR::AsRange(block))
{
assert((node->gtDebugFlags & GTF_DEBUG_NODE_CG_CONSUMED) == 0);
node->gtUseNum = -1;
if (node->isContained() || node->IsCopyOrReload())
{
continue;
}
for (GenTree* operand : node->Operands())
{
genNumberOperandUse(operand, useNum);
}
}
}
bool addPreciseMappings =
(JitConfig.JitDumpPreciseDebugInfoFile() != nullptr) || (JitConfig.JitDisasmWithDebugInfo() != 0);
#endif // DEBUG
DebugInfo currentDI;
for (GenTree* node : LIR::AsRange(block))
{
// Do we have a new IL offset?
if (node->OperGet() == GT_IL_OFFSET)
{
GenTreeILOffset* ilOffset = node->AsILOffset();
DebugInfo rootDI = ilOffset->gtStmtDI.GetRoot();
if (rootDI.IsValid())
{
genEnsureCodeEmitted(currentDI);
currentDI = rootDI;
genIPmappingAdd(IPmappingDscKind::Normal, currentDI, firstMapping);
firstMapping = false;
}
#ifdef DEBUG
if (addPreciseMappings && ilOffset->gtStmtDI.IsValid())
{
genAddPreciseIPMappingHere(ilOffset->gtStmtDI);
}
assert(ilOffset->gtStmtLastILoffs <= compiler->info.compILCodeSize ||
ilOffset->gtStmtLastILoffs == BAD_IL_OFFSET);
if (compiler->opts.dspCode && compiler->opts.dspInstrs && ilOffset->gtStmtLastILoffs != BAD_IL_OFFSET)
{
while (genCurDispOffset <= ilOffset->gtStmtLastILoffs)
{
genCurDispOffset += dumpSingleInstr(compiler->info.compCode, genCurDispOffset, "> ");
}
}
#endif // DEBUG
}
genCodeForTreeNode(node);
if (node->gtHasReg() && node->IsUnusedValue())
{
genConsumeReg(node);
}
} // end for each node in block
#ifdef DEBUG
// The following set of register spill checks and GC pointer tracking checks used to be
// performed at statement boundaries. Now, with LIR, there are no statements, so they are
// performed at the end of each block.
// TODO: could these checks be performed more frequently? E.g., at each location where
// the register allocator says there are no live non-variable registers. Perhaps this could
// be done by using the map maintained by LSRA (operandToLocationInfoMap) to mark a node
// somehow when, after the execution of that node, there will be no live non-variable registers.
regSet.rsSpillChk();
/* Make sure we didn't bungle pointer register tracking */
regMaskTP ptrRegs = gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur;
regMaskTP nonVarPtrRegs = ptrRegs & ~regSet.GetMaskVars();
// If return is a GC-type, clear it. Note that if a common
// epilog is generated (genReturnBB) it has a void return
// even though we might return a ref. We can't use the compRetType
// as the determiner because something we are tracking as a byref
// might be used as a return value of a int function (which is legal)
GenTree* blockLastNode = block->lastNode();
if ((blockLastNode != nullptr) && (blockLastNode->gtOper == GT_RETURN) &&
(varTypeIsGC(compiler->info.compRetType) ||
(blockLastNode->AsOp()->gtOp1 != nullptr && varTypeIsGC(blockLastNode->AsOp()->gtOp1->TypeGet()))))
{
nonVarPtrRegs &= ~RBM_INTRET;
}
if (nonVarPtrRegs)
{
printf("Regset after " FMT_BB " gcr=", block->bbNum);
printRegMaskInt(gcInfo.gcRegGCrefSetCur & ~regSet.GetMaskVars());
compiler->GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur & ~regSet.GetMaskVars());
printf(", byr=");
printRegMaskInt(gcInfo.gcRegByrefSetCur & ~regSet.GetMaskVars());
compiler->GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur & ~regSet.GetMaskVars());
printf(", regVars=");
printRegMaskInt(regSet.GetMaskVars());
compiler->GetEmitter()->emitDispRegSet(regSet.GetMaskVars());
printf("\n");
}
noway_assert(nonVarPtrRegs == RBM_NONE);
#endif // DEBUG
#if defined(DEBUG)
if (block->bbNext == nullptr)
{
// Unit testing of the emitter: generate a bunch of instructions into the last block
// (it's as good as any, but better than the prologue, which can only be a single instruction
// group) then use COMPlus_JitLateDisasm=* to see if the late disassembler
// thinks the instructions are the same as we do.
#if defined(TARGET_AMD64) && defined(LATE_DISASM)
genAmd64EmitterUnitTests();
#elif defined(TARGET_ARM64)
genArm64EmitterUnitTests();
#endif // TARGET_ARM64
}
#endif // defined(DEBUG)
// It is possible to reach the end of the block without generating code for the current IL offset.
// For example, if the following IR ends the current block, no code will have been generated for
// offset 21:
//
// ( 0, 0) [000040] ------------ il_offset void IL offset: 21
//
// N001 ( 0, 0) [000039] ------------ nop void
//
// This can lead to problems when debugging the generated code. To prevent these issues, make sure
// we've generated code for the last IL offset we saw in the block.
genEnsureCodeEmitted(currentDI);
/* Is this the last block, and are there any open scopes left ? */
bool isLastBlockProcessed = (block->bbNext == nullptr);
if (block->isBBCallAlwaysPair())
{
isLastBlockProcessed = (block->bbNext->bbNext == nullptr);
}
#ifdef USING_VARIABLE_LIVE_RANGE
if (compiler->opts.compDbgInfo && isLastBlockProcessed)
{
varLiveKeeper->siEndAllVariableLiveRange(compiler->compCurLife);
}
#endif // USING_VARIABLE_LIVE_RANGE
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
siEndBlock(block);
#ifdef USING_SCOPE_INFO
if (isLastBlockProcessed && siOpenScopeList.scNext)
{
/* This assert no longer holds, because we may insert a throw
block to demarcate the end of a try or finally region when they
are at the end of the method. It would be nice if we could fix
our code so that this throw block will no longer be necessary. */
// noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
siCloseAllOpenScopes();
}
#endif // USING_SCOPE_INFO
}
SubtractStackLevel(savedStkLvl);
#ifdef DEBUG
// compCurLife should be equal to the liveOut set, except that we don't keep
// it up to date for vars that are not register candidates
// (it would be nice to have a xor set function)
VARSET_TP mismatchLiveVars(VarSetOps::Diff(compiler, block->bbLiveOut, compiler->compCurLife));
VarSetOps::UnionD(compiler, mismatchLiveVars,
VarSetOps::Diff(compiler, compiler->compCurLife, block->bbLiveOut));
VarSetOps::Iter mismatchLiveVarIter(compiler, mismatchLiveVars);
unsigned mismatchLiveVarIndex = 0;
bool foundMismatchedRegVar = false;
while (mismatchLiveVarIter.NextElem(&mismatchLiveVarIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(mismatchLiveVarIndex);
if (varDsc->lvIsRegCandidate())
{
if (!foundMismatchedRegVar)
{
JITDUMP("Mismatched live reg vars after " FMT_BB ":", block->bbNum);
foundMismatchedRegVar = true;
}
JITDUMP(" V%02u", compiler->lvaTrackedIndexToLclNum(mismatchLiveVarIndex));
}
}
if (foundMismatchedRegVar)
{
JITDUMP("\n");
assert(!"Found mismatched live reg var(s) after block");
}
#endif
/* Both stacks should always be empty on exit from a basic block */
noway_assert(genStackLevel == 0);
#ifdef TARGET_AMD64
// On AMD64, we need to generate a NOP after a call that is the last instruction of the block, in several
// situations, to support proper exception handling semantics. This is mostly to ensure that when the stack
// walker computes an instruction pointer for a frame, that instruction pointer is in the correct EH region.
// The document "X64 and ARM ABIs.docx" has more details. The situations:
// 1. If the call instruction is in a different EH region as the instruction that follows it.
// 2. If the call immediately precedes an OS epilog. (Note that what the JIT or VM consider an epilog might
// be slightly different from what the OS considers an epilog, and it is the OS-reported epilog that matters
// here.)
// We handle case #1 here, and case #2 in the emitter.
if (GetEmitter()->emitIsLastInsCall())
{
// Ok, the last instruction generated is a call instruction. Do any of the other conditions hold?
// Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically,
// if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions
// generated before the OS epilog starts, such as a GS cookie check.
if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
// We only need the NOP if we're not going to generate any more code as part of the block end.
switch (block->bbJumpKind)
{
case BBJ_ALWAYS:
case BBJ_THROW:
case BBJ_CALLFINALLY:
case BBJ_EHCATCHRET:
// We're going to generate more code below anyway, so no need for the NOP.
case BBJ_RETURN:
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
// These are the "epilog follows" case, handled in the emitter.
break;
case BBJ_NONE:
if (block->bbNext == nullptr)
{
// Call immediately before the end of the code; we should never get here .
instGen(INS_BREAKPOINT); // This should never get executed
}
else
{
// We need the NOP
instGen(INS_nop);
}
break;
case BBJ_COND:
case BBJ_SWITCH:
// These can't have a call as the last instruction!
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
}
}
#endif // TARGET_AMD64
/* Do we need to generate a jump or return? */
switch (block->bbJumpKind)
{
case BBJ_RETURN:
genExitCode(block);
break;
case BBJ_THROW:
// If we have a throw at the end of a function or funclet, we need to emit another instruction
// afterwards to help the OS unwinder determine the correct context during unwind.
// We insert an unexecuted breakpoint instruction in several situations
// following a throw instruction:
// 1. If the throw is the last instruction of the function or funclet. This helps
// the OS unwinder determine the correct context during an unwind from the
// thrown exception.
// 2. If this is this is the last block of the hot section.
// 3. If the subsequent block is a special throw block.
// 4. On AMD64, if the next block is in a different EH region.
if ((block->bbNext == nullptr) || (block->bbNext->bbFlags & BBF_FUNCLET_BEG) ||
!BasicBlock::sameEHRegion(block, block->bbNext) ||
(!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext)) ||
block->bbNext == compiler->fgFirstColdBlock)
{
instGen(INS_BREAKPOINT); // This should never get executed
}
// Do likewise for blocks that end in DOES_NOT_RETURN calls
// that were not caught by the above rules. This ensures that
// gc register liveness doesn't change across call instructions
// in fully-interruptible mode.
else
{
GenTree* call = block->lastNode();
if ((call != nullptr) && (call->gtOper == GT_CALL))
{
if ((call->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0)
{
instGen(INS_BREAKPOINT); // This should never get executed
}
}
}
break;
case BBJ_CALLFINALLY:
block = genCallFinally(block);
break;
#if defined(FEATURE_EH_FUNCLETS)
case BBJ_EHCATCHRET:
genEHCatchRet(block);
FALLTHROUGH;
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
genReserveFuncletEpilog(block);
break;
#else // !FEATURE_EH_FUNCLETS
case BBJ_EHCATCHRET:
noway_assert(!"Unexpected BBJ_EHCATCHRET"); // not used on x86
break;
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
genEHFinallyOrFilterRet(block);
break;
#endif // !FEATURE_EH_FUNCLETS
case BBJ_NONE:
case BBJ_SWITCH:
break;
case BBJ_ALWAYS:
inst_JMP(EJ_jmp, block->bbJumpDest);
FALLTHROUGH;
case BBJ_COND:
#if FEATURE_LOOP_ALIGN
// This is the last place where we operate on blocks and after this, we operate
// on IG. Hence, if we know that the destination of "block" is the first block
// of a loop and needs alignment (it has BBF_LOOP_ALIGN), then "block" represents
// end of the loop. Propagate that information on the IG through "igLoopBackEdge".
//
// During emitter, this information will be used to calculate the loop size.
// Depending on the loop size, decision of whether to align a loop or not will be taken.
//
// In the emitter, we need to calculate the loop size from `block->bbJumpDest` through
// `block` (inclusive). Thus, we need to ensure there is a label on the lexical fall-through
// block, even if one is not otherwise needed, to be able to calculate the size of this
// loop (loop size is calculated by walking the instruction groups; see emitter::getLoopSize()).
if (block->bbJumpDest->isLoopAlign())
{
GetEmitter()->emitSetLoopBackEdge(block->bbJumpDest);
if (block->bbNext != nullptr)
{
JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->bbNext->bbNum);
block->bbNext->bbFlags |= BBF_HAS_LABEL;
}
}
#endif // FEATURE_LOOP_ALIGN
break;
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
#if FEATURE_LOOP_ALIGN
if (block->hasAlign())
{
// If this block has 'align' instruction in the end (identified by BBF_HAS_ALIGN),
// then need to add align instruction in the current "block".
//
// For non-adaptive alignment, add alignment instruction of size depending on the
// compJitAlignLoopBoundary.
// For adaptive alignment, alignment instruction will always be of 15 bytes for xarch
// and 16 bytes for arm64.
assert(ShouldAlignLoops());
GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->bbJumpKind == BBJ_ALWAYS));
}
if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign()))
{
if (compiler->opts.compJitHideAlignBehindJmp)
{
// The current IG is the one that is just before the IG having loop start.
// Establish a connection of recent align instruction emitted to the loop
// it actually is aligning using 'idaLoopHeadPredIG'.
GetEmitter()->emitConnectAlignInstrWithCurIG();
}
}
#endif
#if defined(DEBUG) && defined(USING_VARIABLE_LIVE_RANGE)
if (compiler->verbose)
{
varLiveKeeper->dumpBlockVariableLiveRanges(block);
}
#endif // defined(DEBUG) && defined(USING_VARIABLE_LIVE_RANGE)
INDEBUG(compiler->compCurBB = nullptr);
} //------------------ END-FOR each block of the method -------------------
// There could be variables alive at this point. For example see lvaKeepAliveAndReportThis.
// This call is for cleaning the GC refs
genUpdateLife(VarSetOps::MakeEmpty(compiler));
/* Finalize the spill tracking logic */
regSet.rsSpillEnd();
/* Finalize the temp tracking logic */
regSet.tmpEnd();
#ifdef DEBUG
if (compiler->verbose)
{
printf("\n# ");
printf("compCycleEstimate = %6d, compSizeEstimate = %5d ", compiler->compCycleEstimate,
compiler->compSizeEstimate);
printf("%s\n", compiler->info.compFullName);
}
#endif
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Register Management XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//------------------------------------------------------------------------
// genSpillVar: Spill a local variable
//
// Arguments:
// tree - the lclVar node for the variable being spilled
//
// Return Value:
// None.
//
// Assumptions:
// The lclVar must be a register candidate (lvRegCandidate)
void CodeGen::genSpillVar(GenTree* tree)
{
unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(varDsc->lvIsRegCandidate());
// We don't actually need to spill if it is already living in memory
bool needsSpill = ((tree->gtFlags & GTF_VAR_DEF) == 0 && varDsc->lvIsInReg());
if (needsSpill)
{
// In order for a lclVar to have been allocated to a register, it must not have been aliasable, and can
// therefore be store-normalized (rather than load-normalized). In fact, not performing store normalization
// can lead to problems on architectures where a lclVar may be allocated to a register that is not
// addressable at the granularity of the lclVar's defined type (e.g. x86).
var_types lclType = varDsc->GetActualRegisterType();
emitAttr size = emitTypeSize(lclType);
// If this is a write-thru or a single-def variable, we don't actually spill at a use,
// but we will kill the var in the reg (below).
if (!varDsc->IsAlwaysAliveInMemory())
{
instruction storeIns = ins_Store(lclType, compiler->isSIMDTypeLocalAligned(varNum));
assert(varDsc->GetRegNum() == tree->GetRegNum());
inst_TT_RV(storeIns, size, tree, tree->GetRegNum());
}
// We should only have both GTF_SPILL (i.e. the flag causing this method to be called) and
// GTF_SPILLED on a write-thru/single-def def, for which we should not be calling this method.
assert((tree->gtFlags & GTF_SPILLED) == 0);
// Remove the live var from the register.
genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(tree));
gcInfo.gcMarkRegSetNpt(varDsc->lvRegMask());
if (VarSetOps::IsMember(compiler, gcInfo.gcTrkStkPtrLcls, varDsc->lvVarIndex))
{
#ifdef DEBUG
if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
}
#endif
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
tree->gtFlags &= ~GTF_SPILL;
// If this is NOT a write-thru, reset the var location.
if ((tree->gtFlags & GTF_SPILLED) == 0)
{
varDsc->SetRegNum(REG_STK);
if (varTypeIsMultiReg(tree))
{
varDsc->SetOtherReg(REG_STK);
}
}
else
{
// We only have 'GTF_SPILL' and 'GTF_SPILLED' on a def of a write-thru lclVar
// or a single-def var that is to be spilled at its definition.
assert((varDsc->IsAlwaysAliveInMemory()) && ((tree->gtFlags & GTF_VAR_DEF) != 0));
}
#ifdef USING_VARIABLE_LIVE_RANGE
if (needsSpill)
{
// We need this after "lvRegNum" has change because now we are sure that varDsc->lvIsInReg() is false.
// "SiVarLoc" constructor uses the "LclVarDsc" of the variable.
varLiveKeeper->siUpdateVariableLiveRange(varDsc, varNum);
}
#endif // USING_VARIABLE_LIVE_RANGE
}
//------------------------------------------------------------------------
// genUpdateVarReg: Update the current register location for a multi-reg lclVar
//
// Arguments:
// varDsc - the LclVarDsc for the lclVar
// tree - the lclVar node
// regIndex - the index of the register in the node
//
// inline
void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTree* tree, int regIndex)
{
// This should only be called for multireg lclVars.
assert(compiler->lvaEnregMultiRegVars);
assert(tree->IsMultiRegLclVar() || (tree->gtOper == GT_COPY));
varDsc->SetRegNum(tree->GetRegByIndex(regIndex));
}
//------------------------------------------------------------------------
// genUpdateVarReg: Update the current register location for a lclVar
//
// Arguments:
// varDsc - the LclVarDsc for the lclVar
// tree - the lclVar node
//
// inline
void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTree* tree)
{
// This should not be called for multireg lclVars.
assert((tree->OperIsScalarLocal() && !tree->IsMultiRegLclVar()) || (tree->gtOper == GT_COPY));
varDsc->SetRegNum(tree->GetRegNum());
}
//------------------------------------------------------------------------
// sameRegAsDst: Return the child that has the same reg as the dst (if any)
//
// Arguments:
// tree - the node of interest
// other - an out parameter to return the other child
//
// Notes:
// If 'tree' has a child with the same assigned register as its target reg,
// that child will be returned, and 'other' will contain the non-matching child.
// Otherwise, both other and the return value will be nullptr.
//
GenTree* sameRegAsDst(GenTree* tree, GenTree*& other /*out*/)
{
if (tree->GetRegNum() == REG_NA)
{
other = nullptr;
return nullptr;
}
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
if (op1->GetRegNum() == tree->GetRegNum())
{
other = op2;
return op1;
}
if (op2->GetRegNum() == tree->GetRegNum())
{
other = op1;
return op2;
}
else
{
other = nullptr;
return nullptr;
}
}
//------------------------------------------------------------------------
// genUnspillLocal: Reload a register candidate local into a register, if needed.
//
// Arguments:
// varNum - The variable number of the local to be reloaded (unspilled).
// It may be a local field.
// type - The type of the local.
// lclNode - The node being unspilled. Note that for a multi-reg local,
// the gtLclNum will be that of the parent struct.
// regNum - The register that 'varNum' should be loaded to.
// reSpill - True if it will be immediately spilled after use.
// isLastUse - True if this is a last use of 'varNum'.
//
// Notes:
// The caller must have determined that this local needs to be unspilled.
void CodeGen::genUnspillLocal(
unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum, bool reSpill, bool isLastUse)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
inst_set_SV_var(lclNode);
instruction ins = ins_Load(type, compiler->isSIMDTypeLocalAligned(varNum));
GetEmitter()->emitIns_R_S(ins, emitTypeSize(type), regNum, varNum, 0);
// TODO-Review: We would like to call:
// genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(tree));
// instead of the following code, but this ends up hitting this assert:
// assert((regSet.GetMaskVars() & regMask) == 0);
// due to issues with LSRA resolution moves.
// So, just force it for now. This probably indicates a condition that creates a GC hole!
//
// Extra note: I think we really want to call something like gcInfo.gcUpdateForRegVarMove,
// because the variable is not really going live or dead, but that method is somewhat poorly
// factored because it, in turn, updates rsMaskVars which is part of RegSet not GCInfo.
// TODO-Cleanup: This code exists in other CodeGen*.cpp files, and should be moved to CodeGenCommon.cpp.
// Don't update the variable's location if we are just re-spilling it again.
if (!reSpill)
{
varDsc->SetRegNum(regNum);
#ifdef USING_VARIABLE_LIVE_RANGE
// We want "VariableLiveRange" inclusive on the beginning and exclusive on the ending.
// For that we shouldn't report an update of the variable location if is becoming dead
// on the same native offset.
if (!isLastUse)
{
// Report the home change for this variable
varLiveKeeper->siUpdateVariableLiveRange(varDsc, varNum);
}
#endif // USING_VARIABLE_LIVE_RANGE
if (!varDsc->IsAlwaysAliveInMemory())
{
#ifdef DEBUG
if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tRemoving V%02u from gcVarPtrSetCur\n", varNum);
}
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tV%02u in reg ", varNum);
varDsc->PrintVarReg();
printf(" is becoming live ");
compiler->printTreeID(lclNode);
printf("\n");
}
#endif // DEBUG
regSet.AddMaskVars(genGetRegMask(varDsc));
}
gcInfo.gcMarkRegPtrVal(regNum, type);
}
//------------------------------------------------------------------------
// genUnspillRegIfNeeded: Reload a MultiReg source value into a register, if needed
//
// Arguments:
// tree - the MultiReg node of interest.
// multiRegIndex - the index of the value to reload, if needed.
//
// Notes:
// It must *not* be a GT_LCL_VAR (those are handled separately).
// In the normal case, the value will be reloaded into the register it
// was originally computed into. However, if that register is not available,
// the register allocator will have allocated a different register, and
// inserted a GT_RELOAD to indicate the register into which it should be
// reloaded.
//
void CodeGen::genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex)
{
GenTree* unspillTree = tree;
assert(unspillTree->IsMultiRegNode());
if (tree->gtOper == GT_RELOAD)
{
unspillTree = tree->AsOp()->gtOp1;
}
// In case of multi-reg node, GTF_SPILLED flag on it indicates that
// one or more of its result regs are spilled. Individual spill flags need to be
// queried to determine which specific result regs need to be unspilled.
if ((unspillTree->gtFlags & GTF_SPILLED) == 0)
{
return;
}
GenTreeFlags spillFlags = unspillTree->GetRegSpillFlagByIdx(multiRegIndex);
if ((spillFlags & GTF_SPILLED) == 0)
{
return;
}
regNumber dstReg = tree->GetRegByIndex(multiRegIndex);
if (dstReg == REG_NA)
{
assert(tree->IsCopyOrReload());
dstReg = unspillTree->GetRegByIndex(multiRegIndex);
}
if (tree->IsMultiRegLclVar())
{
GenTreeLclVar* lclNode = tree->AsLclVar();
unsigned fieldVarNum = compiler->lvaGetDesc(lclNode)->lvFieldLclStart + multiRegIndex;
bool reSpill = ((spillFlags & GTF_SPILL) != 0);
bool isLastUse = lclNode->IsLastUse(multiRegIndex);
genUnspillLocal(fieldVarNum, compiler->lvaGetDesc(fieldVarNum)->TypeGet(), lclNode, dstReg, reSpill, isLastUse);
}
else
{
var_types dstType = unspillTree->GetRegTypeByIndex(multiRegIndex);
regNumber unspillTreeReg = unspillTree->GetRegByIndex(multiRegIndex);
TempDsc* t = regSet.rsUnspillInPlace(unspillTree, unspillTreeReg, multiRegIndex);
emitAttr emitType = emitActualTypeSize(dstType);
GetEmitter()->emitIns_R_S(ins_Load(dstType), emitType, dstReg, t->tdTempNum(), 0);
regSet.tmpRlsTemp(t);
gcInfo.gcMarkRegPtrVal(dstReg, dstType);
}
}
//------------------------------------------------------------------------
// genUnspillRegIfNeeded: Reload the value into a register, if needed
//
// Arguments:
// tree - the node of interest.
//
// Notes:
// In the normal case, the value will be reloaded into the register it
// was originally computed into. However, if that register is not available,
// the register allocator will have allocated a different register, and
// inserted a GT_RELOAD to indicate the register into which it should be
// reloaded.
//
// A GT_RELOAD never has a reg candidate lclVar or multi-reg lclVar as its child.
// This is because register candidates locals always have distinct tree nodes
// for uses and definitions. (This is unlike non-register candidate locals which
// may be "defined" by a GT_LCL_VAR node that loads it into a register. It may
// then have a GT_RELOAD inserted if it needs a different register, though this
// is unlikely to happen except in stress modes.)
//
void CodeGen::genUnspillRegIfNeeded(GenTree* tree)
{
GenTree* unspillTree = tree;
if (tree->gtOper == GT_RELOAD)
{
unspillTree = tree->AsOp()->gtOp1;
}
if ((unspillTree->gtFlags & GTF_SPILLED) != 0)
{
if (genIsRegCandidateLocal(unspillTree))
{
// We never have a GT_RELOAD for this case.
assert(tree == unspillTree);
// Reset spilled flag, since we are going to load a local variable from its home location.
unspillTree->gtFlags &= ~GTF_SPILLED;
GenTreeLclVar* lcl = unspillTree->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
var_types spillType = varDsc->GetRegisterType(lcl);
assert(spillType != TYP_UNDEF);
// TODO-Cleanup: The following code could probably be further merged and cleaned up.
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
// Load local variable from its home location.
// In most cases the tree type will indicate the correct type to use for the load.
// However, if it is NOT a normalizeOnLoad lclVar (i.e. NOT a small int that always gets
// widened when loaded into a register), and its size is not the same as the actual register type
// of the lclVar, then we need to change the type of the tree node when loading.
// This situation happens due to "optimizations" that avoid a cast and
// simply retype the node when using long type lclVar as an int.
// While loading the int in that case would work for this use of the lclVar, if it is
// later used as a long, we will have incorrectly truncated the long.
// In the normalizeOnLoad case ins_Load will return an appropriate sign- or zero-
// extending load.
var_types lclActualType = varDsc->GetActualRegisterType();
assert(lclActualType != TYP_UNDEF);
if (spillType != lclActualType && !varTypeIsGC(spillType) && !varDsc->lvNormalizeOnLoad())
{
assert(!varTypeIsGC(varDsc));
spillType = lclActualType;
}
#elif defined(TARGET_ARM)
// No normalizing for ARM
#else
NYI("Unspilling not implemented for this target architecture.");
#endif
bool reSpill = ((unspillTree->gtFlags & GTF_SPILL) != 0);
bool isLastUse = lcl->IsLastUse(0);
genUnspillLocal(lcl->GetLclNum(), spillType, lcl->AsLclVar(), tree->GetRegNum(), reSpill, isLastUse);
}
else if (unspillTree->IsMultiRegLclVar())
{
// We never have a GT_RELOAD for this case.
assert(tree == unspillTree);
GenTreeLclVar* lclNode = unspillTree->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
unsigned regCount = varDsc->lvFieldCnt;
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags spillFlags = lclNode->GetRegSpillFlagByIdx(i);
if ((spillFlags & GTF_SPILLED) != 0)
{
regNumber reg = lclNode->GetRegNumByIdx(i);
unsigned fieldVarNum = varDsc->lvFieldLclStart + i;
bool reSpill = ((spillFlags & GTF_SPILL) != 0);
bool isLastUse = lclNode->IsLastUse(i);
genUnspillLocal(fieldVarNum, compiler->lvaGetDesc(fieldVarNum)->TypeGet(), lclNode, reg, reSpill,
isLastUse);
}
}
}
else if (unspillTree->IsMultiRegNode())
{
// Here we may have a GT_RELOAD, and we will need to use that node ('tree') to
// do the unspilling if needed. However, that tree doesn't have the register
// count, so we use 'unspillTree' for that.
unsigned regCount = unspillTree->GetMultiRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
genUnspillRegIfNeeded(tree, i);
}
unspillTree->gtFlags &= ~GTF_SPILLED;
}
else
{
// Here we may have a GT_RELOAD.
// The spill temp allocated for it is associated with the original tree that defined the
// register that it was spilled from.
// So we use 'unspillTree' to recover that spill temp.
TempDsc* t = regSet.rsUnspillInPlace(unspillTree, unspillTree->GetRegNum());
emitAttr emitType = emitActualTypeSize(unspillTree->TypeGet());
// Reload into the register specified by 'tree' which may be a GT_RELOAD.
regNumber dstReg = tree->GetRegNum();
GetEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType), emitType, dstReg, t->tdTempNum(), 0);
regSet.tmpRlsTemp(t);
unspillTree->gtFlags &= ~GTF_SPILLED;
gcInfo.gcMarkRegPtrVal(dstReg, unspillTree->TypeGet());
}
}
}
//------------------------------------------------------------------------
// genCopyRegIfNeeded: Copy the given node into the specified register
//
// Arguments:
// node - The node that has been evaluated (consumed).
// needReg - The register in which its value is needed.
//
// Notes:
// This must be a node that has a register.
//
void CodeGen::genCopyRegIfNeeded(GenTree* node, regNumber needReg)
{
assert((node->GetRegNum() != REG_NA) && (needReg != REG_NA));
assert(!node->isUsedFromSpillTemp());
inst_Mov(node->TypeGet(), needReg, node->GetRegNum(), /* canSkip */ true);
}
// Do Liveness update for a subnodes that is being consumed by codegen
// including the logic for reload in case is needed and also takes care
// of locating the value on the desired register.
void CodeGen::genConsumeRegAndCopy(GenTree* node, regNumber needReg)
{
if (needReg == REG_NA)
{
return;
}
genConsumeReg(node);
genCopyRegIfNeeded(node, needReg);
}
// Check that registers are consumed in the right order for the current node being generated.
#ifdef DEBUG
void CodeGen::genNumberOperandUse(GenTree* const operand, int& useNum) const
{
assert(operand != nullptr);
// Ignore argument placeholders.
if (operand->OperGet() == GT_ARGPLACE)
{
return;
}
assert(operand->gtUseNum == -1);
if (!operand->isContained() && !operand->IsCopyOrReload())
{
operand->gtUseNum = useNum;
useNum++;
}
else
{
for (GenTree* op : operand->Operands())
{
genNumberOperandUse(op, useNum);
}
}
}
void CodeGen::genCheckConsumeNode(GenTree* const node)
{
assert(node != nullptr);
if (verbose)
{
if (node->gtUseNum == -1)
{
// nothing wrong if the node was not consumed
}
else if ((node->gtDebugFlags & GTF_DEBUG_NODE_CG_CONSUMED) != 0)
{
printf("Node was consumed twice:\n");
compiler->gtDispTree(node, nullptr, nullptr, true);
}
else if ((lastConsumedNode != nullptr) && (node->gtUseNum < lastConsumedNode->gtUseNum))
{
printf("Nodes were consumed out-of-order:\n");
compiler->gtDispTree(lastConsumedNode, nullptr, nullptr, true);
compiler->gtDispTree(node, nullptr, nullptr, true);
}
}
assert((node->OperGet() == GT_CATCH_ARG) || ((node->gtDebugFlags & GTF_DEBUG_NODE_CG_CONSUMED) == 0));
assert((lastConsumedNode == nullptr) || (node->gtUseNum == -1) || (node->gtUseNum > lastConsumedNode->gtUseNum));
node->gtDebugFlags |= GTF_DEBUG_NODE_CG_CONSUMED;
lastConsumedNode = node;
}
#endif // DEBUG
//--------------------------------------------------------------------
// genConsumeReg: Do liveness update for a single register of a multireg child node
// that is being consumed by codegen.
//
// Arguments:
// tree - GenTree node
// multiRegIndex - The index of the register to be consumed
//
// Return Value:
// Returns the reg number for the given multiRegIndex.
//
regNumber CodeGen::genConsumeReg(GenTree* tree, unsigned multiRegIndex)
{
regNumber reg = tree->GetRegByIndex(multiRegIndex);
if (tree->OperIs(GT_COPY))
{
reg = genRegCopy(tree, multiRegIndex);
}
else if (reg == REG_NA)
{
assert(tree->OperIs(GT_RELOAD));
reg = tree->gtGetOp1()->GetRegByIndex(multiRegIndex);
assert(reg != REG_NA);
}
genUnspillRegIfNeeded(tree, multiRegIndex);
// UpdateLifeFieldVar() will return true if local var should be spilled.
if (tree->IsMultiRegLclVar() && treeLifeUpdater->UpdateLifeFieldVar(tree->AsLclVar(), multiRegIndex))
{
GenTreeLclVar* lcl = tree->AsLclVar();
genSpillLocal(lcl->GetLclNum(), lcl->GetFieldTypeByIndex(compiler, multiRegIndex), lcl,
lcl->GetRegByIndex(multiRegIndex));
}
if (tree->gtSkipReloadOrCopy()->OperIs(GT_LCL_VAR))
{
GenTreeLclVar* lcl = tree->gtSkipReloadOrCopy()->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
assert(compiler->lvaEnregMultiRegVars && lcl->IsMultiReg());
assert(varDsc->lvPromoted && (multiRegIndex < varDsc->lvFieldCnt));
unsigned fieldVarNum = varDsc->lvFieldLclStart + multiRegIndex;
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(fieldVarNum);
assert(fldVarDsc->lvLRACandidate);
bool isFieldDying = lcl->IsLastUse(multiRegIndex);
if (fldVarDsc->GetRegNum() == REG_STK)
{
// We have loaded this into a register only temporarily
gcInfo.gcMarkRegSetNpt(reg);
}
else if (isFieldDying)
{
gcInfo.gcMarkRegSetNpt(genRegMask(fldVarDsc->GetRegNum()));
}
}
else
{
gcInfo.gcMarkRegSetNpt(tree->gtGetRegMask());
}
return reg;
}
//--------------------------------------------------------------------
// genConsumeReg: Do liveness update for a subnode that is being
// consumed by codegen.
//
// Arguments:
// tree - GenTree node
//
// Return Value:
// Returns the reg number of tree.
// In case of multi-reg call node returns the first reg number
// of the multi-reg return.
regNumber CodeGen::genConsumeReg(GenTree* tree)
{
if (tree->OperGet() == GT_COPY)
{
genRegCopy(tree);
}
// Handle the case where we have a lclVar that needs to be copied before use (i.e. because it
// interferes with one of the other sources (or the target, if it's a "delayed use" register)).
// TODO-Cleanup: This is a special copyReg case in LSRA - consider eliminating these and
// always using GT_COPY to make the lclVar location explicit.
// Note that we have to do this before calling genUpdateLife because otherwise if we spill it
// the lvRegNum will be set to REG_STK and we will lose track of what register currently holds
// the lclVar (normally when a lclVar is spilled it is then used from its former register
// location, which matches the GetRegNum() on the node).
// (Note that it doesn't matter if we call this before or after genUnspillRegIfNeeded
// because if it's on the stack it will always get reloaded into tree->GetRegNum()).
if (genIsRegCandidateLocal(tree))
{
GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
if (varDsc->GetRegNum() != REG_STK)
{
var_types regType = varDsc->GetRegisterType(lcl);
inst_Mov(regType, tree->GetRegNum(), varDsc->GetRegNum(), /* canSkip */ true);
}
}
genUnspillRegIfNeeded(tree);
// genUpdateLife() will also spill local var if marked as GTF_SPILL by calling CodeGen::genSpillVar
genUpdateLife(tree);
// there are three cases where consuming a reg means clearing the bit in the live mask
// 1. it was not produced by a local
// 2. it was produced by a local that is going dead
// 3. it was produced by a local that does not live in that reg (like one allocated on the stack)
if (genIsRegCandidateLocal(tree))
{
assert(tree->gtHasReg());
GenTreeLclVarCommon* lcl = tree->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
assert(varDsc->lvLRACandidate);
if (varDsc->GetRegNum() == REG_STK)
{
// We have loaded this into a register only temporarily
gcInfo.gcMarkRegSetNpt(genRegMask(tree->GetRegNum()));
}
else if ((tree->gtFlags & GTF_VAR_DEATH) != 0)
{
gcInfo.gcMarkRegSetNpt(genRegMask(varDsc->GetRegNum()));
}
}
else if (tree->gtSkipReloadOrCopy()->IsMultiRegLclVar())
{
assert(compiler->lvaEnregMultiRegVars);
GenTreeLclVar* lcl = tree->gtSkipReloadOrCopy()->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
unsigned firstFieldVarNum = varDsc->lvFieldLclStart;
for (unsigned i = 0; i < varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(firstFieldVarNum + i);
assert(fldVarDsc->lvLRACandidate);
regNumber reg;
if (tree->OperIs(GT_COPY, GT_RELOAD) && (tree->AsCopyOrReload()->GetRegByIndex(i) != REG_NA))
{
reg = tree->AsCopyOrReload()->GetRegByIndex(i);
}
else
{
reg = lcl->AsLclVar()->GetRegNumByIdx(i);
}
bool isFieldDying = lcl->IsLastUse(i);
if (fldVarDsc->GetRegNum() == REG_STK)
{
// We have loaded this into a register only temporarily
gcInfo.gcMarkRegSetNpt(reg);
}
else if (isFieldDying)
{
gcInfo.gcMarkRegSetNpt(genRegMask(fldVarDsc->GetRegNum()));
}
}
}
else
{
gcInfo.gcMarkRegSetNpt(tree->gtGetRegMask());
}
genCheckConsumeNode(tree);
return tree->GetRegNum();
}
// Do liveness update for an address tree: one of GT_LEA, GT_LCL_VAR, or GT_CNS_INT (for call indirect).
void CodeGen::genConsumeAddress(GenTree* addr)
{
if (!addr->isContained())
{
genConsumeReg(addr);
}
else if (addr->OperGet() == GT_LEA)
{
genConsumeAddrMode(addr->AsAddrMode());
}
}
// do liveness update for a subnode that is being consumed by codegen
void CodeGen::genConsumeAddrMode(GenTreeAddrMode* addr)
{
genConsumeOperands(addr);
}
void CodeGen::genConsumeRegs(GenTree* tree)
{
#if !defined(TARGET_64BIT)
if (tree->OperGet() == GT_LONG)
{
genConsumeRegs(tree->gtGetOp1());
genConsumeRegs(tree->gtGetOp2());
return;
}
#endif // !defined(TARGET_64BIT)
if (tree->isUsedFromSpillTemp())
{
// spill temps are un-tracked and hence no need to update life
}
else if (tree->isContained())
{
if (tree->OperIsIndir())
{
genConsumeAddress(tree->AsIndir()->Addr());
}
else if (tree->OperIs(GT_LEA))
{
genConsumeAddress(tree);
}
#ifdef TARGET_ARM64
else if (tree->OperIs(GT_BFIZ))
{
// Can be contained as part of LEA on ARM64
GenTreeCast* cast = tree->gtGetOp1()->AsCast();
assert(cast->isContained());
genConsumeAddress(cast->CastOp());
}
#endif
else if (tree->OperIsLocalRead())
{
// A contained lcl var must be living on stack and marked as reg optional, or not be a
// register candidate.
unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
noway_assert(varDsc->GetRegNum() == REG_STK);
noway_assert(tree->IsRegOptional() || !varDsc->lvLRACandidate);
// Update the life of the lcl var.
genUpdateLife(tree);
}
#ifdef TARGET_XARCH
#ifdef FEATURE_HW_INTRINSICS
else if (tree->OperIs(GT_HWINTRINSIC))
{
// Only load/store HW intrinsics can be contained (and the address may also be contained).
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(tree->AsHWIntrinsic()->GetHWIntrinsicId());
assert((category == HW_Category_MemoryLoad) || (category == HW_Category_MemoryStore));
size_t numArgs = tree->AsHWIntrinsic()->GetOperandCount();
genConsumeAddress(tree->AsHWIntrinsic()->Op(1));
if (category == HW_Category_MemoryStore)
{
assert(numArgs == 2);
GenTree* op2 = tree->AsHWIntrinsic()->Op(2);
assert(op2->isContained());
genConsumeReg(op2);
}
else
{
assert(numArgs == 1);
}
}
#endif // FEATURE_HW_INTRINSICS
#endif // TARGET_XARCH
else if (tree->OperIs(GT_BITCAST, GT_NEG, GT_CAST, GT_LSH))
{
genConsumeRegs(tree->gtGetOp1());
}
else if (tree->OperIs(GT_MUL))
{
genConsumeRegs(tree->gtGetOp1());
genConsumeRegs(tree->gtGetOp2());
}
else
{
#ifdef FEATURE_SIMD
// (In)Equality operation that produces bool result, when compared
// against Vector zero, marks its Vector Zero operand as contained.
assert(tree->OperIsLeaf() || tree->IsSIMDZero() || tree->IsVectorZero());
#else
assert(tree->OperIsLeaf());
#endif
}
}
else
{
genConsumeReg(tree);
}
}
//------------------------------------------------------------------------
// genConsumeOperands: Do liveness update for the operands of a unary or binary tree
//
// Arguments:
// tree - the GenTreeOp whose operands will have their liveness updated.
//
// Return Value:
// None.
//
void CodeGen::genConsumeOperands(GenTreeOp* tree)
{
GenTree* firstOp = tree->gtOp1;
GenTree* secondOp = tree->gtOp2;
if (firstOp != nullptr)
{
genConsumeRegs(firstOp);
}
if (secondOp != nullptr)
{
genConsumeRegs(secondOp);
}
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// genConsumeOperands: Do liveness update for the operands of a multi-operand node,
// currently GT_SIMD or GT_HWINTRINSIC
//
// Arguments:
// tree - the GenTreeMultiOp whose operands will have their liveness updated.
//
// Return Value:
// None.
//
void CodeGen::genConsumeMultiOpOperands(GenTreeMultiOp* tree)
{
for (GenTree* operand : tree->Operands())
{
genConsumeRegs(operand);
}
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if FEATURE_PUT_STRUCT_ARG_STK
//------------------------------------------------------------------------
// genConsumePutStructArgStk: Do liveness update for the operands of a PutArgStk node.
// Also loads in the right register the addresses of the
// src/dst for rep mov operation.
//
// Arguments:
// putArgNode - the PUTARG_STK tree.
// dstReg - the dstReg for the rep move operation.
// srcReg - the srcReg for the rep move operation.
// sizeReg - the sizeReg for the rep move operation.
//
// Return Value:
// None.
//
// Notes:
// sizeReg can be REG_NA when this function is used to consume the dstReg and srcReg
// for copying on the stack a struct with references.
// The source address/offset is determined from the address on the GT_OBJ node, while
// the destination address is the address contained in 'm_stkArgVarNum' plus the offset
// provided in the 'putArgNode'.
// m_stkArgVarNum must be set to the varnum for the local used for placing the "by-value" args on the stack.
void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode,
regNumber dstReg,
regNumber srcReg,
regNumber sizeReg)
{
// The putArgNode children are always contained. We should not consume any registers.
assert(putArgNode->gtGetOp1()->isContained());
// Get the source address.
GenTree* src = putArgNode->gtGetOp1();
assert(varTypeIsStruct(src));
assert((src->gtOper == GT_OBJ) || ((src->gtOper == GT_IND && varTypeIsSIMD(src))));
GenTree* srcAddr = src->gtGetOp1();
assert(dstReg != REG_NA);
assert(srcReg != REG_NA);
// Consume the registers only if they are not contained or set to REG_NA.
if (srcAddr->GetRegNum() != REG_NA)
{
genConsumeReg(srcAddr);
}
// If the op1 is already in the dstReg - nothing to do.
// Otherwise load the op1 (GT_ADDR) into the dstReg to copy the struct on the stack by value.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
assert(dstReg != REG_SPBASE);
inst_Mov(TYP_I_IMPL, dstReg, REG_SPBASE, /* canSkip */ false);
#else // !TARGET_X86
GenTree* dstAddr = putArgNode;
if (dstAddr->GetRegNum() != dstReg)
{
// Generate LEA instruction to load the stack of the outgoing var + SlotNum offset (or the incoming arg area
// for tail calls) in RDI.
// Destination is always local (on the stack) - use EA_PTRSIZE.
assert(m_stkArgVarNum != BAD_VAR_NUM);
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, dstReg, m_stkArgVarNum, putArgNode->getArgOffset());
}
#endif // !TARGET_X86
if (srcAddr->OperIsLocalAddr())
{
// The OperLocalAddr is always contained.
assert(srcAddr->isContained());
const GenTreeLclVarCommon* lclNode = srcAddr->AsLclVarCommon();
// Generate LEA instruction to load the LclVar address in RSI.
// Source is known to be on the stack. Use EA_PTRSIZE.
unsigned int offset = lclNode->GetLclOffs();
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, srcReg, lclNode->GetLclNum(), offset);
}
else
{
assert(srcAddr->GetRegNum() != REG_NA);
// Source is not known to be on the stack. Use EA_BYREF.
GetEmitter()->emitIns_Mov(INS_mov, EA_BYREF, srcReg, srcAddr->GetRegNum(), /* canSkip */ true);
}
if (sizeReg != REG_NA)
{
unsigned size = putArgNode->GetStackByteSize();
inst_RV_IV(INS_mov, sizeReg, size, EA_PTRSIZE);
}
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
#if FEATURE_ARG_SPLIT
//------------------------------------------------------------------------
// genConsumeArgRegSplit: Consume register(s) in Call node to set split struct argument.
//
// Arguments:
// putArgNode - the PUTARG_STK tree.
//
// Return Value:
// None.
//
void CodeGen::genConsumeArgSplitStruct(GenTreePutArgSplit* putArgNode)
{
assert(putArgNode->OperGet() == GT_PUTARG_SPLIT);
assert(putArgNode->gtHasReg());
genUnspillRegIfNeeded(putArgNode);
gcInfo.gcMarkRegSetNpt(putArgNode->gtGetRegMask());
genCheckConsumeNode(putArgNode);
}
#endif // FEATURE_ARG_SPLIT
//------------------------------------------------------------------------
// genPutArgStkFieldList: Generate code for a putArgStk whose source is a GT_FIELD_LIST
//
// Arguments:
// putArgStk - The putArgStk node
// outArgVarNum - The lclVar num for the argument
//
// Notes:
// The x86 version of this is in codegenxarch.cpp, and doesn't take an
// outArgVarNum, as it pushes its args onto the stack.
//
#ifndef TARGET_X86
void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArgVarNum)
{
assert(putArgStk->gtOp1->OperIs(GT_FIELD_LIST));
// Evaluate each of the GT_FIELD_LIST items into their register
// and store their register into the outgoing argument area.
const unsigned argOffset = putArgStk->getArgOffset();
for (GenTreeFieldList::Use& use : putArgStk->gtOp1->AsFieldList()->Uses())
{
GenTree* nextArgNode = use.GetNode();
genConsumeReg(nextArgNode);
regNumber reg = nextArgNode->GetRegNum();
var_types type = use.GetType();
unsigned thisFieldOffset = argOffset + use.GetOffset();
// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
// argument area.
#if defined(FEATURE_SIMD) && defined(TARGET_ARM64)
// storing of TYP_SIMD12 (i.e. Vector3) argument.
if (compMacOsArm64Abi() && (type == TYP_SIMD12))
{
// Need an additional integer register to extract upper 4 bytes from data.
regNumber tmpReg = nextArgNode->GetSingleTempReg();
GetEmitter()->emitStoreSIMD12ToLclOffset(outArgVarNum, thisFieldOffset, reg, tmpReg);
}
else
#endif // FEATURE_SIMD
{
emitAttr attr = emitTypeSize(type);
GetEmitter()->emitIns_S_R(ins_Store(type), attr, reg, outArgVarNum, thisFieldOffset);
}
// We can't write beyond the arg area unless this is a tail call, in which case we use
// the first stack arg as the base of the incoming arg area.
#ifdef DEBUG
unsigned areaSize = compiler->lvaLclSize(outArgVarNum);
#if FEATURE_FASTTAILCALL
if (putArgStk->gtCall->IsFastTailCall())
{
areaSize = compiler->info.compArgStackSize;
}
#endif
assert((thisFieldOffset + genTypeSize(type)) <= areaSize);
#endif
}
}
#endif // !TARGET_X86
//------------------------------------------------------------------------
// genSetBlockSize: Ensure that the block size is in the given register
//
// Arguments:
// blkNode - The block node
// sizeReg - The register into which the block's size should go
//
void CodeGen::genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg)
{
if (sizeReg != REG_NA)
{
unsigned blockSize = blkNode->Size();
if (!blkNode->OperIs(GT_STORE_DYN_BLK))
{
assert((blkNode->gtRsvdRegs & genRegMask(sizeReg)) != 0);
instGen_Set_Reg_To_Imm(EA_4BYTE, sizeReg, blockSize);
}
else
{
GenTree* sizeNode = blkNode->AsStoreDynBlk()->gtDynamicSize;
inst_Mov(sizeNode->TypeGet(), sizeReg, sizeNode->GetRegNum(), /* canSkip */ true);
}
}
}
//------------------------------------------------------------------------
// genConsumeBlockSrc: Consume the source address register of a block node, if any.
//
// Arguments:
// blkNode - The block node
void CodeGen::genConsumeBlockSrc(GenTreeBlk* blkNode)
{
GenTree* src = blkNode->Data();
if (blkNode->OperIsCopyBlkOp())
{
// For a CopyBlk we need the address of the source.
assert(src->isContained());
if (src->OperGet() == GT_IND)
{
src = src->AsOp()->gtOp1;
}
else
{
// This must be a local.
// For this case, there is no source address register, as it is a
// stack-based address.
assert(src->OperIsLocal());
return;
}
}
else
{
if (src->OperIsInitVal())
{
src = src->gtGetOp1();
}
}
genConsumeReg(src);
}
//------------------------------------------------------------------------
// genSetBlockSrc: Ensure that the block source is in its allocated register.
//
// Arguments:
// blkNode - The block node
// srcReg - The register in which to set the source (address or init val).
//
void CodeGen::genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg)
{
GenTree* src = blkNode->Data();
if (blkNode->OperIsCopyBlkOp())
{
// For a CopyBlk we need the address of the source.
if (src->OperGet() == GT_IND)
{
src = src->AsOp()->gtOp1;
}
else
{
// This must be a local struct.
// Load its address into srcReg.
inst_RV_TT(INS_lea, srcReg, src, 0, EA_BYREF);
return;
}
}
else
{
if (src->OperIsInitVal())
{
src = src->gtGetOp1();
}
}
genCopyRegIfNeeded(src, srcReg);
}
//------------------------------------------------------------------------
// genConsumeBlockOp: Ensure that the block's operands are enregistered
// as needed.
// Arguments:
// blkNode - The block node
//
// Notes:
// This ensures that the operands are consumed in the proper order to
// obey liveness modeling.
void CodeGen::genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg)
{
// We have to consume the registers, and perform any copies, in the actual execution order: dst, src, size.
//
// Note that the register allocator ensures that the registers ON THE NODES will not interfere
// with one another if consumed (i.e. reloaded or moved to their ASSIGNED reg) in execution order.
// Further, it ensures that they will not interfere with one another if they are then copied
// to the REQUIRED register (if a fixed register requirement) in execution order. This requires,
// then, that we first consume all the operands, then do any necessary moves.
GenTree* const dstAddr = blkNode->Addr();
// First, consume all the sources in order, and verify that registers have been allocated appropriately,
// based on the 'gtBlkOpKind'.
// The destination is always in a register; 'genConsumeReg' asserts that.
genConsumeReg(dstAddr);
// The source may be a local or in a register; 'genConsumeBlockSrc' will check that.
genConsumeBlockSrc(blkNode);
// 'genSetBlockSize' (called below) will ensure that a register has been reserved as needed
// in the case where the size is a constant (i.e. it is not GT_STORE_DYN_BLK).
if (blkNode->OperGet() == GT_STORE_DYN_BLK)
{
genConsumeReg(blkNode->AsStoreDynBlk()->gtDynamicSize);
}
// Next, perform any necessary moves.
genCopyRegIfNeeded(dstAddr, dstReg);
genSetBlockSrc(blkNode, srcReg);
genSetBlockSize(blkNode, sizeReg);
}
//-------------------------------------------------------------------------
// genSpillLocal: Generate the actual spill of a local var.
//
// Arguments:
// varNum - The variable number of the local to be spilled.
// It may be a local field.
// type - The type of the local.
// lclNode - The node being spilled. Note that for a multi-reg local,
// the gtLclNum will be that of the parent struct.
// regNum - The register that 'varNum' is currently in.
//
// Return Value:
// None.
//
void CodeGen::genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(!varDsc->lvNormalizeOnStore() || (type == varDsc->GetActualRegisterType()));
// We have a register candidate local that is marked with GTF_SPILL.
// This flag generally means that we need to spill this local.
// The exception is the case of a use of an EH/spill-at-single-def var use that is being "spilled"
// to the stack, indicated by GTF_SPILL (note that all EH lclVar defs are always
// spilled, i.e. write-thru. Likewise, single-def vars that are spilled at its definitions).
// An EH or single-def var use is always valid on the stack (so we don't need to actually spill it),
// but the GTF_SPILL flag records the fact that the register value is going dead.
if (((lclNode->gtFlags & GTF_VAR_DEF) != 0) || (!varDsc->IsAlwaysAliveInMemory()))
{
// Store local variable to its home location.
// Ensure that lclVar stores are typed correctly.
GetEmitter()->emitIns_S_R(ins_Store(type, compiler->isSIMDTypeLocalAligned(varNum)), emitTypeSize(type), regNum,
varNum, 0);
}
}
//-------------------------------------------------------------------------
// genProduceReg: do liveness update for register produced by the current
// node in codegen after code has been emitted for it.
//
// Arguments:
// tree - Gentree node
//
// Return Value:
// None.
void CodeGen::genProduceReg(GenTree* tree)
{
#ifdef DEBUG
assert((tree->gtDebugFlags & GTF_DEBUG_NODE_CG_PRODUCED) == 0);
tree->gtDebugFlags |= GTF_DEBUG_NODE_CG_PRODUCED;
#endif
if (tree->gtFlags & GTF_SPILL)
{
// Code for GT_COPY node gets generated as part of consuming regs by its parent.
// A GT_COPY node in turn produces reg result and it should never be marked to
// spill.
//
// Similarly GT_RELOAD node gets generated as part of consuming regs by its
// parent and should never be marked for spilling.
noway_assert(!tree->IsCopyOrReload());
if (genIsRegCandidateLocal(tree))
{
GenTreeLclVar* lclNode = tree->AsLclVar();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
const unsigned varNum = lclNode->GetLclNum();
const var_types spillType = varDsc->GetRegisterType(lclNode);
genSpillLocal(varNum, spillType, lclNode, tree->GetRegNum());
}
else if (tree->IsMultiRegLclVar())
{
assert(compiler->lvaEnregMultiRegVars);
GenTreeLclVar* lclNode = tree->AsLclVar();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
const unsigned regCount = lclNode->GetFieldCount(compiler);
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags flags = lclNode->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILL) != 0)
{
const regNumber reg = lclNode->GetRegNumByIdx(i);
const unsigned fieldVarNum = varDsc->lvFieldLclStart + i;
const var_types spillType = compiler->lvaGetDesc(fieldVarNum)->GetRegisterType();
genSpillLocal(fieldVarNum, spillType, lclNode, reg);
}
}
}
else
{
// In case of multi-reg call node, spill flag on call node
// indicates that one or more of its allocated regs need to
// be spilled. Call node needs to be further queried to
// know which of its result regs needs to be spilled.
if (tree->IsMultiRegCall())
{
GenTreeCall* call = tree->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags flags = call->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILL) != 0)
{
regNumber reg = call->GetRegNumByIdx(i);
regSet.rsSpillTree(reg, call, i);
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
}
}
#if FEATURE_ARG_SPLIT
else if (tree->OperIsPutArgSplit())
{
assert(compFeatureArgSplit());
GenTreePutArgSplit* argSplit = tree->AsPutArgSplit();
unsigned regCount = argSplit->gtNumRegs;
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags flags = argSplit->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILL) != 0)
{
regNumber reg = argSplit->GetRegNumByIdx(i);
regSet.rsSpillTree(reg, argSplit, i);
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
}
}
#ifdef TARGET_ARM
else if (compFeatureArgSplit() && tree->OperIsMultiRegOp())
{
GenTreeMultiRegOp* multiReg = tree->AsMultiRegOp();
unsigned regCount = multiReg->GetRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags flags = multiReg->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILL) != 0)
{
regNumber reg = multiReg->GetRegNumByIdx(i);
regSet.rsSpillTree(reg, multiReg, i);
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
}
}
#endif // TARGET_ARM
#endif // FEATURE_ARG_SPLIT
else
{
regSet.rsSpillTree(tree->GetRegNum(), tree);
gcInfo.gcMarkRegSetNpt(genRegMask(tree->GetRegNum()));
}
tree->gtFlags |= GTF_SPILLED;
tree->gtFlags &= ~GTF_SPILL;
return;
}
}
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
// If we've produced a register, mark it as a pointer, as needed.
if (tree->gtHasReg())
{
// We only mark the register in the following cases:
// 1. It is not a register candidate local. In this case, we're producing a
// register from a local, but the local is not a register candidate. Thus,
// we must be loading it as a temp register, and any "last use" flag on
// the register wouldn't be relevant.
// 2. The register candidate local is going dead. There's no point to mark
// the register as live, with a GC pointer, if the variable is dead.
if (!genIsRegCandidateLocal(tree) || ((tree->gtFlags & GTF_VAR_DEATH) == 0))
{
// Multi-reg nodes will produce more than one register result.
// Mark all the regs produced by the node.
if (tree->IsMultiRegCall())
{
const GenTreeCall* call = tree->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = call->GetRegNumByIdx(i);
var_types type = retTypeDesc->GetReturnRegType(i);
gcInfo.gcMarkRegPtrVal(reg, type);
}
}
else if (tree->IsCopyOrReloadOfMultiRegCall())
{
// we should never see reload of multi-reg call here
// because GT_RELOAD gets generated in reg consuming path.
noway_assert(tree->OperGet() == GT_COPY);
// A multi-reg GT_COPY node produces those regs to which
// copy has taken place.
const GenTreeCopyOrReload* copy = tree->AsCopyOrReload();
const GenTreeCall* call = copy->gtGetOp1()->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc->GetReturnRegType(i);
regNumber toReg = copy->GetRegNumByIdx(i);
if (toReg != REG_NA)
{
gcInfo.gcMarkRegPtrVal(toReg, type);
}
}
}
else if (tree->IsMultiRegLclVar())
{
assert(compiler->lvaEnregMultiRegVars);
GenTreeLclVar* lclNode = tree->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
unsigned regCount = varDsc->lvFieldCnt;
for (unsigned i = 0; i < regCount; i++)
{
if (!lclNode->IsLastUse(i))
{
regNumber reg = lclNode->GetRegByIndex(i);
if (reg != REG_NA)
{
var_types type = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i)->TypeGet();
gcInfo.gcMarkRegPtrVal(reg, type);
}
}
}
}
else
{
gcInfo.gcMarkRegPtrVal(tree->GetRegNum(), tree->TypeGet());
}
}
}
}
// transfer gc/byref status of src reg to dst reg
void CodeGen::genTransferRegGCState(regNumber dst, regNumber src)
{
regMaskTP srcMask = genRegMask(src);
regMaskTP dstMask = genRegMask(dst);
if (gcInfo.gcRegGCrefSetCur & srcMask)
{
gcInfo.gcMarkRegSetGCref(dstMask);
}
else if (gcInfo.gcRegByrefSetCur & srcMask)
{
gcInfo.gcMarkRegSetByref(dstMask);
}
else
{
gcInfo.gcMarkRegSetNpt(dstMask);
}
}
// generates an ip-relative call or indirect call via reg ('call reg')
// pass in 'addr' for a relative call or 'base' for a indirect register call
// methHnd - optional, only used for pretty printing
// retSize - emitter type of return for GC purposes, should be EA_BYREF, EA_GCREF, or EA_PTRSIZE(not GC)
//
// clang-format off
void CodeGen::genEmitCall(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
void* addr
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
regNumber base,
bool isJump)
{
#if !defined(TARGET_X86)
int argSize = 0;
#endif // !defined(TARGET_X86)
// This should have been put in volatile registers to ensure it does not
// get overridden by epilog sequence during tailcall.
noway_assert(!isJump || (base == REG_NA) || ((RBM_INT_CALLEE_TRASH & genRegMask(base)) != 0));
GetEmitter()->emitIns_Call(emitter::EmitCallType(callType),
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr,
argSize,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, base, REG_NA, 0, 0, isJump);
}
// clang-format on
// generates an indirect call via addressing mode (call []) given an indir node
// methHnd - optional, only used for pretty printing
// retSize - emitter type of return for GC purposes, should be EA_BYREF, EA_GCREF, or EA_PTRSIZE(not GC)
//
// clang-format off
void CodeGen::genEmitCallIndir(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
GenTreeIndir* indir
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
bool isJump)
{
#if !defined(TARGET_X86)
int argSize = 0;
#endif // !defined(TARGET_X86)
regNumber iReg = (indir->Base() != nullptr) ? indir->Base()->GetRegNum() : REG_NA;
regNumber xReg = (indir->Index() != nullptr) ? indir->Index()->GetRegNum() : REG_NA;
// These should have been put in volatile registers to ensure they do not
// get overridden by epilog sequence during tailcall.
noway_assert(!isJump || (iReg == REG_NA) || ((RBM_CALLEE_TRASH & genRegMask(iReg)) != 0));
noway_assert(!isJump || (xReg == REG_NA) || ((RBM_CALLEE_TRASH & genRegMask(xReg)) != 0));
GetEmitter()->emitIns_Call(emitter::EmitCallType(callType),
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
argSize,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di,
iReg,
xReg,
indir->Scale(),
indir->Offset(),
isJump);
}
// clang-format on
//------------------------------------------------------------------------
// genCodeForCast: Generates the code for GT_CAST.
//
// Arguments:
// tree - the GT_CAST node.
//
void CodeGen::genCodeForCast(GenTreeOp* tree)
{
assert(tree->OperIs(GT_CAST));
var_types targetType = tree->TypeGet();
if (varTypeIsFloating(targetType) && varTypeIsFloating(tree->gtOp1))
{
// Casts float/double <--> double/float
genFloatToFloatCast(tree);
}
else if (varTypeIsFloating(tree->gtOp1))
{
// Casts float/double --> int32/int64
genFloatToIntCast(tree);
}
else if (varTypeIsFloating(targetType))
{
// Casts int32/uint32/int64/uint64 --> float/double
genIntToFloatCast(tree);
}
#ifndef TARGET_64BIT
else if (varTypeIsLong(tree->gtOp1))
{
genLongToIntCast(tree);
}
#endif // !TARGET_64BIT
else
{
// Casts int <--> int
genIntToIntCast(tree->AsCast());
}
// The per-case functions call genProduceReg()
}
CodeGen::GenIntCastDesc::GenIntCastDesc(GenTreeCast* cast)
{
const var_types srcType = genActualType(cast->gtGetOp1()->TypeGet());
const bool srcUnsigned = cast->IsUnsigned();
const unsigned srcSize = genTypeSize(srcType);
const var_types castType = cast->gtCastType;
const bool castUnsigned = varTypeIsUnsigned(castType);
const unsigned castSize = genTypeSize(castType);
const var_types dstType = genActualType(cast->TypeGet());
const unsigned dstSize = genTypeSize(dstType);
const bool overflow = cast->gtOverflow();
assert((srcSize == 4) || (srcSize == genTypeSize(TYP_I_IMPL)));
assert((dstSize == 4) || (dstSize == genTypeSize(TYP_I_IMPL)));
assert(dstSize == genTypeSize(genActualType(castType)));
if (castSize < 4) // Cast to small int type
{
if (overflow)
{
m_checkKind = CHECK_SMALL_INT_RANGE;
m_checkSrcSize = srcSize;
// Since these are small int types we can compute the min and max
// values of the castType without risk of integer overflow.
const int castNumBits = (castSize * 8) - (castUnsigned ? 0 : 1);
m_checkSmallIntMax = (1 << castNumBits) - 1;
m_checkSmallIntMin = (castUnsigned | srcUnsigned) ? 0 : (-m_checkSmallIntMax - 1);
m_extendKind = COPY;
m_extendSrcSize = dstSize;
}
else
{
m_checkKind = CHECK_NONE;
// Casting to a small type really means widening from that small type to INT/LONG.
m_extendKind = castUnsigned ? ZERO_EXTEND_SMALL_INT : SIGN_EXTEND_SMALL_INT;
m_extendSrcSize = castSize;
}
}
#ifdef TARGET_64BIT
// castType cannot be (U)LONG on 32 bit targets, such casts should have been decomposed.
// srcType cannot be a small int type since it's the "actual type" of the cast operand.
// This means that widening casts do not occur on 32 bit targets.
else if (castSize > srcSize) // (U)INT to (U)LONG widening cast
{
assert((srcSize == 4) && (castSize == 8));
if (overflow && !srcUnsigned && castUnsigned)
{
// Widening from INT to ULONG, check if the value is positive
m_checkKind = CHECK_POSITIVE;
m_checkSrcSize = 4;
// This is the only overflow checking cast that requires changing the
// source value (by zero extending), all others copy the value as is.
assert((srcType == TYP_INT) && (castType == TYP_ULONG));
m_extendKind = ZERO_EXTEND_INT;
m_extendSrcSize = 4;
}
else
{
m_checkKind = CHECK_NONE;
m_extendKind = srcUnsigned ? ZERO_EXTEND_INT : SIGN_EXTEND_INT;
m_extendSrcSize = 4;
}
}
else if (castSize < srcSize) // (U)LONG to (U)INT narrowing cast
{
assert((srcSize == 8) && (castSize == 4));
if (overflow)
{
if (castUnsigned) // (U)LONG to UINT cast
{
m_checkKind = CHECK_UINT_RANGE;
}
else if (srcUnsigned) // ULONG to INT cast
{
m_checkKind = CHECK_POSITIVE_INT_RANGE;
}
else // LONG to INT cast
{
m_checkKind = CHECK_INT_RANGE;
}
m_checkSrcSize = 8;
}
else
{
m_checkKind = CHECK_NONE;
}
m_extendKind = COPY;
m_extendSrcSize = 4;
}
#endif
else // if (castSize == srcSize) // Sign changing or same type cast
{
assert(castSize == srcSize);
if (overflow && (srcUnsigned != castUnsigned))
{
m_checkKind = CHECK_POSITIVE;
m_checkSrcSize = srcSize;
}
else
{
m_checkKind = CHECK_NONE;
}
m_extendKind = COPY;
m_extendSrcSize = srcSize;
}
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// genStoreLongLclVar: Generate code to store a non-enregistered long lclVar
//
// Arguments:
// treeNode - A TYP_LONG lclVar node.
//
// Return Value:
// None.
//
// Assumptions:
// 'treeNode' must be a TYP_LONG lclVar node for a lclVar that has NOT been promoted.
// Its operand must be a GT_LONG node.
//
void CodeGen::genStoreLongLclVar(GenTree* treeNode)
{
emitter* emit = GetEmitter();
GenTreeLclVarCommon* lclNode = treeNode->AsLclVarCommon();
unsigned lclNum = lclNode->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
assert(varDsc->TypeGet() == TYP_LONG);
assert(!varDsc->lvPromoted);
GenTree* op1 = treeNode->AsOp()->gtOp1;
// A GT_LONG is always contained, so it cannot have RELOAD or COPY inserted between it and its consumer,
// but a MUL_LONG may.
noway_assert(op1->OperIs(GT_LONG) || op1->gtSkipReloadOrCopy()->OperIs(GT_MUL_LONG));
genConsumeRegs(op1);
if (op1->OperGet() == GT_LONG)
{
GenTree* loVal = op1->gtGetOp1();
GenTree* hiVal = op1->gtGetOp2();
noway_assert((loVal->GetRegNum() != REG_NA) && (hiVal->GetRegNum() != REG_NA));
emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, loVal->GetRegNum(), lclNum, 0);
emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, hiVal->GetRegNum(), lclNum, genTypeSize(TYP_INT));
}
else
{
assert((op1->gtSkipReloadOrCopy()->gtFlags & GTF_MUL_64RSLT) != 0);
// This is either a multi-reg MUL_LONG, or a multi-reg reload or copy.
assert(op1->IsMultiRegNode() && (op1->GetMultiRegCount() == 2));
// Stack store
emit->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), op1->GetRegByIndex(0), lclNum, 0);
emit->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), op1->GetRegByIndex(1), lclNum,
genTypeSize(TYP_INT));
}
}
#endif // !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// genCodeForJumpTrue: Generate code for a GT_JTRUE node.
//
// Arguments:
// jtrue - The node
//
void CodeGen::genCodeForJumpTrue(GenTreeOp* jtrue)
{
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
assert(jtrue->OperIs(GT_JTRUE));
GenTreeOp* relop = jtrue->gtGetOp1()->AsOp();
GenCondition condition = GenCondition::FromRelop(relop);
if (condition.PreferSwap())
{
condition = GenCondition::Swap(condition);
}
#if defined(TARGET_XARCH)
if ((condition.GetCode() == GenCondition::FNEU) &&
(relop->gtGetOp1()->GetRegNum() == relop->gtGetOp2()->GetRegNum()) &&
!relop->gtGetOp1()->isUsedFromSpillTemp() && !relop->gtGetOp2()->isUsedFromSpillTemp())
{
// For floating point, `x != x` is a common way of
// checking for NaN. So, in the case where both
// operands are the same, we can optimize codegen
// to only do a single check.
condition = GenCondition(GenCondition::P);
}
if (relop->MarkedForSignJumpOpt())
{
// If relop was previously marked for a signed jump check optimization because of SF flag
// reuse, replace jge/jl with jns/js.
assert(relop->OperGet() == GT_LT || relop->OperGet() == GT_GE);
condition = (relop->OperGet() == GT_LT) ? GenCondition(GenCondition::S) : GenCondition(GenCondition::NS);
}
#endif
inst_JCC(condition, compiler->compCurBB->bbJumpDest);
}
//------------------------------------------------------------------------
// genCodeForJcc: Generate code for a GT_JCC node.
//
// Arguments:
// jcc - The node
//
void CodeGen::genCodeForJcc(GenTreeCC* jcc)
{
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
assert(jcc->OperIs(GT_JCC));
inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest);
}
//------------------------------------------------------------------------
// inst_JCC: Generate a conditional branch instruction sequence.
//
// Arguments:
// condition - The branch condition
// target - The basic block to jump to when the condition is true
//
void CodeGen::inst_JCC(GenCondition condition, BasicBlock* target)
{
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
if (desc.oper == GT_NONE)
{
inst_JMP(desc.jumpKind1, target);
}
else if (desc.oper == GT_OR)
{
inst_JMP(desc.jumpKind1, target);
inst_JMP(desc.jumpKind2, target);
}
else // if (desc.oper == GT_AND)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP(emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_JMP(desc.jumpKind2, target);
genDefineTempLabel(labelNext);
}
}
//------------------------------------------------------------------------
// genCodeForSetcc: Generate code for a GT_SETCC node.
//
// Arguments:
// setcc - The node
//
void CodeGen::genCodeForSetcc(GenTreeCC* setcc)
{
assert(setcc->OperIs(GT_SETCC));
inst_SETCC(setcc->gtCondition, setcc->TypeGet(), setcc->GetRegNum());
genProduceReg(setcc);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Code Generation Support Methods for Linear Codegen XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "emit.h"
#include "codegen.h"
//------------------------------------------------------------------------
// genInitializeRegisterState: Initialize the register state contained in 'regSet'.
//
// Assumptions:
// On exit the "rsModifiedRegsMask" (in "regSet") holds all the registers' masks hosting an argument on the function
// and elements of "rsSpillDesc" (in "regSet") are setted to nullptr.
//
// Notes:
// This method is intended to be called only from initializeStructuresBeforeBlockCodeGeneration.
void CodeGen::genInitializeRegisterState()
{
// Initialize the spill tracking logic
regSet.rsSpillBeg();
// If any arguments live in registers, mark those regs as such
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
// Is this variable a parameter assigned to a register?
if (!varDsc->lvIsParam || !varDsc->lvRegister)
{
continue;
}
// Is the argument live on entry to the method?
if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
continue;
}
if (varDsc->IsAddressExposed())
{
continue;
}
// Mark the register as holding the variable
regNumber reg = varDsc->GetRegNum();
if (genIsValidIntReg(reg))
{
regSet.verifyRegUsed(reg);
}
}
}
//------------------------------------------------------------------------
// genInitialize: Initialize Scopes, registers, gcInfo and current liveness variables structures
// used in the generation of blocks' code before.
//
// Assumptions:
// -The pointer logic in "gcInfo" for pointers on registers and variable is cleaned.
// -"compiler->compCurLife" becomes an empty set
// -"compiler->compCurLife" are set to be a clean set
// -If there is local var info siScopes scope logic in codegen is initialized in "siInit()"
//
// Notes:
// This method is intended to be called when code generation for blocks happens, and before the list of blocks is
// iterated.
void CodeGen::genInitialize()
{
// Initialize the line# tracking logic
if (compiler->opts.compScopeInfo)
{
siInit();
}
#ifdef USING_VARIABLE_LIVE_RANGE
initializeVariableLiveKeeper();
#endif // USING_VARIABLE_LIVE_RANGE
genPendingCallLabel = nullptr;
// Initialize the pointer tracking code
gcInfo.gcRegPtrSetInit();
gcInfo.gcVarPtrSetInit();
// Initialize the register set logic
genInitializeRegisterState();
// Make sure a set is allocated for compiler->compCurLife (in the long case), so we can set it to empty without
// allocation at the start of each basic block.
VarSetOps::AssignNoCopy(compiler, compiler->compCurLife, VarSetOps::MakeEmpty(compiler));
// We initialize the stack level before first "BasicBlock" code is generated in case we need to report stack
// variable needs home and so its stack offset.
SetStackLevel(0);
}
//------------------------------------------------------------------------
// genCodeForBBlist: Generate code for all the blocks in a method
//
// Arguments:
// None
//
// Notes:
// This is the main method for linear codegen. It calls genCodeForTreeNode
// to generate the code for each node in each BasicBlock, and handles BasicBlock
// boundaries and branches.
//
void CodeGen::genCodeForBBlist()
{
unsigned savedStkLvl;
#ifdef DEBUG
genInterruptibleUsed = true;
// You have to be careful if you create basic blocks from now on
compiler->fgSafeBasicBlockCreation = false;
#endif // DEBUG
#if defined(DEBUG) && defined(TARGET_X86)
// Check stack pointer on call stress mode is not compatible with fully interruptible GC. REVIEW: why?
//
if (GetInterruptible() && compiler->opts.compStackCheckOnCall)
{
compiler->opts.compStackCheckOnCall = false;
}
#endif // defined(DEBUG) && defined(TARGET_X86)
#if defined(DEBUG) && defined(TARGET_XARCH)
// Check stack pointer on return stress mode is not compatible with fully interruptible GC. REVIEW: why?
// It is also not compatible with any function that makes a tailcall: we aren't smart enough to only
// insert the SP check in the non-tailcall returns.
//
if ((GetInterruptible() || compiler->compTailCallUsed) && compiler->opts.compStackCheckOnRet)
{
compiler->opts.compStackCheckOnRet = false;
}
#endif // defined(DEBUG) && defined(TARGET_XARCH)
genMarkLabelsForCodegen();
assert(!compiler->fgFirstBBScratch ||
compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
/* Initialize structures used in the block list iteration */
genInitialize();
/*-------------------------------------------------------------------------
*
* Walk the basic blocks and generate code for each one
*
*/
BasicBlock* block;
for (block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\n=============== Generating ");
block->dspBlockHeader(compiler, true, true);
compiler->fgDispBBLiveness(block);
}
#endif // DEBUG
assert(LIR::AsRange(block).CheckLIR(compiler));
// Figure out which registers hold variables on entry to this block
regSet.ClearMaskVars();
gcInfo.gcRegGCrefSetCur = RBM_NONE;
gcInfo.gcRegByrefSetCur = RBM_NONE;
compiler->m_pLinearScan->recordVarLocationsAtStartOfBB(block);
// Updating variable liveness after last instruction of previous block was emitted
// and before first of the current block is emitted
genUpdateLife(block->bbLiveIn);
// Even if liveness didn't change, we need to update the registers containing GC references.
// genUpdateLife will update the registers live due to liveness changes. But what about registers that didn't
// change? We cleared them out above. Maybe we should just not clear them out, but update the ones that change
// here. That would require handling the changes in recordVarLocationsAtStartOfBB().
regMaskTP newLiveRegSet = RBM_NONE;
regMaskTP newRegGCrefSet = RBM_NONE;
regMaskTP newRegByrefSet = RBM_NONE;
#ifdef DEBUG
VARSET_TP removedGCVars(VarSetOps::MakeEmpty(compiler));
VARSET_TP addedGCVars(VarSetOps::MakeEmpty(compiler));
#endif
VarSetOps::Iter iter(compiler, block->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
if (varDsc->lvIsInReg())
{
newLiveRegSet |= varDsc->lvRegMask();
if (varDsc->lvType == TYP_REF)
{
newRegGCrefSet |= varDsc->lvRegMask();
}
else if (varDsc->lvType == TYP_BYREF)
{
newRegByrefSet |= varDsc->lvRegMask();
}
if (!varDsc->IsAlwaysAliveInMemory())
{
#ifdef DEBUG
if (verbose && VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varIndex))
{
VarSetOps::AddElemD(compiler, removedGCVars, varIndex);
}
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varIndex);
}
}
if ((!varDsc->lvIsInReg() || varDsc->IsAlwaysAliveInMemory()) && compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (verbose && !VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varIndex))
{
VarSetOps::AddElemD(compiler, addedGCVars, varIndex);
}
#endif // DEBUG
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varIndex);
}
}
regSet.SetMaskVars(newLiveRegSet);
#ifdef DEBUG
if (compiler->verbose)
{
if (!VarSetOps::IsEmpty(compiler, addedGCVars))
{
printf("\t\t\t\t\t\t\tAdded GCVars: ");
dumpConvertedVarSet(compiler, addedGCVars);
printf("\n");
}
if (!VarSetOps::IsEmpty(compiler, removedGCVars))
{
printf("\t\t\t\t\t\t\tRemoved GCVars: ");
dumpConvertedVarSet(compiler, removedGCVars);
printf("\n");
}
}
#endif // DEBUG
gcInfo.gcMarkRegSetGCref(newRegGCrefSet DEBUGARG(true));
gcInfo.gcMarkRegSetByref(newRegByrefSet DEBUGARG(true));
/* Blocks with handlerGetsXcptnObj()==true use GT_CATCH_ARG to
represent the exception object (TYP_REF).
We mark REG_EXCEPTION_OBJECT as holding a GC object on entry
to the block, it will be the first thing evaluated
(thanks to GTF_ORDER_SIDEEFF).
*/
if (handlerGetsXcptnObj(block->bbCatchTyp))
{
for (GenTree* node : LIR::AsRange(block))
{
if (node->OperGet() == GT_CATCH_ARG)
{
gcInfo.gcMarkRegSetGCref(RBM_EXCEPTION_OBJECT);
break;
}
}
}
#if defined(TARGET_ARM)
genInsertNopForUnwinder(block);
#endif
/* Start a new code output block */
genUpdateCurrentFunclet(block);
genLogLabel(block);
// Tell everyone which basic block we're working on
compiler->compCurBB = block;
block->bbEmitCookie = nullptr;
// If this block is a jump target or it requires a label then set 'needLabel' to true,
//
bool needLabel = (block->bbFlags & BBF_HAS_LABEL) != 0;
if (block == compiler->fgFirstColdBlock)
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nThis is the start of the cold region of the method\n");
}
#endif
// We should never have a block that falls through into the Cold section
noway_assert(!block->bbPrev->bbFallsThrough());
needLabel = true;
}
// We also want to start a new Instruction group by calling emitAddLabel below,
// when we need accurate bbWeights for this block in the emitter. We force this
// whenever our previous block was a BBJ_COND and it has a different weight than us.
//
// Note: We need to have set compCurBB before calling emitAddLabel
//
if ((block->bbPrev != nullptr) && (block->bbPrev->bbJumpKind == BBJ_COND) &&
(block->bbWeight != block->bbPrev->bbWeight))
{
JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT
" different from " FMT_BB " with weight " FMT_WT "\n",
block->bbPrev->bbNum, block->bbPrev->bbWeight, block->bbNum, block->bbWeight);
needLabel = true;
}
#if FEATURE_LOOP_ALIGN
if (GetEmitter()->emitEndsWithAlignInstr())
{
// Force new label if current IG ends with an align instruction.
needLabel = true;
}
#endif
if (needLabel)
{
// Mark a label and update the current set of live GC refs
block->bbEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(block));
}
if (block == compiler->fgFirstColdBlock)
{
// We require the block that starts the Cold section to have a label
noway_assert(block->bbEmitCookie);
GetEmitter()->emitSetFirstColdIGCookie(block->bbEmitCookie);
}
// Both stacks are always empty on entry to a basic block.
assert(genStackLevel == 0);
genAdjustStackLevel(block);
savedStkLvl = genStackLevel;
// Needed when jitting debug code
siBeginBlock(block);
// BBF_INTERNAL blocks don't correspond to any single IL instruction.
if (compiler->opts.compDbgInfo && (block->bbFlags & BBF_INTERNAL) &&
!compiler->fgBBisScratch(block)) // If the block is the distinguished first scratch block, then no need to
// emit a NO_MAPPING entry, immediately after the prolog.
{
genIPmappingAdd(IPmappingDscKind::NoMapping, DebugInfo(), true);
}
bool firstMapping = true;
#if defined(FEATURE_EH_FUNCLETS)
if (block->bbFlags & BBF_FUNCLET_BEG)
{
genReserveFuncletProlog(block);
}
#endif // FEATURE_EH_FUNCLETS
// Clear compCurStmt and compCurLifeTree.
compiler->compCurStmt = nullptr;
compiler->compCurLifeTree = nullptr;
// Emit poisoning into scratch BB that comes right after prolog.
// We cannot emit this code in the prolog as it might make the prolog too large.
if (compiler->compShouldPoisonFrame() && compiler->fgBBisScratch(block))
{
genPoisonFrame(newLiveRegSet);
}
// Traverse the block in linear order, generating code for each node as we
// as we encounter it.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Set the use-order numbers for each node.
{
int useNum = 0;
for (GenTree* node : LIR::AsRange(block))
{
assert((node->gtDebugFlags & GTF_DEBUG_NODE_CG_CONSUMED) == 0);
node->gtUseNum = -1;
if (node->isContained() || node->IsCopyOrReload())
{
continue;
}
for (GenTree* operand : node->Operands())
{
genNumberOperandUse(operand, useNum);
}
}
}
bool addPreciseMappings =
(JitConfig.JitDumpPreciseDebugInfoFile() != nullptr) || (JitConfig.JitDisasmWithDebugInfo() != 0);
#endif // DEBUG
DebugInfo currentDI;
for (GenTree* node : LIR::AsRange(block))
{
// Do we have a new IL offset?
if (node->OperGet() == GT_IL_OFFSET)
{
GenTreeILOffset* ilOffset = node->AsILOffset();
DebugInfo rootDI = ilOffset->gtStmtDI.GetRoot();
if (rootDI.IsValid())
{
genEnsureCodeEmitted(currentDI);
currentDI = rootDI;
genIPmappingAdd(IPmappingDscKind::Normal, currentDI, firstMapping);
firstMapping = false;
}
#ifdef DEBUG
if (addPreciseMappings && ilOffset->gtStmtDI.IsValid())
{
genAddPreciseIPMappingHere(ilOffset->gtStmtDI);
}
assert(ilOffset->gtStmtLastILoffs <= compiler->info.compILCodeSize ||
ilOffset->gtStmtLastILoffs == BAD_IL_OFFSET);
if (compiler->opts.dspCode && compiler->opts.dspInstrs && ilOffset->gtStmtLastILoffs != BAD_IL_OFFSET)
{
while (genCurDispOffset <= ilOffset->gtStmtLastILoffs)
{
genCurDispOffset += dumpSingleInstr(compiler->info.compCode, genCurDispOffset, "> ");
}
}
#endif // DEBUG
}
genCodeForTreeNode(node);
if (node->gtHasReg(compiler) && node->IsUnusedValue())
{
genConsumeReg(node);
}
} // end for each node in block
#ifdef DEBUG
// The following set of register spill checks and GC pointer tracking checks used to be
// performed at statement boundaries. Now, with LIR, there are no statements, so they are
// performed at the end of each block.
// TODO: could these checks be performed more frequently? E.g., at each location where
// the register allocator says there are no live non-variable registers. Perhaps this could
// be done by using the map maintained by LSRA (operandToLocationInfoMap) to mark a node
// somehow when, after the execution of that node, there will be no live non-variable registers.
regSet.rsSpillChk();
/* Make sure we didn't bungle pointer register tracking */
regMaskTP ptrRegs = gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur;
regMaskTP nonVarPtrRegs = ptrRegs & ~regSet.GetMaskVars();
// If return is a GC-type, clear it. Note that if a common
// epilog is generated (genReturnBB) it has a void return
// even though we might return a ref. We can't use the compRetType
// as the determiner because something we are tracking as a byref
// might be used as a return value of a int function (which is legal)
GenTree* blockLastNode = block->lastNode();
if ((blockLastNode != nullptr) && (blockLastNode->gtOper == GT_RETURN) &&
(varTypeIsGC(compiler->info.compRetType) ||
(blockLastNode->AsOp()->gtOp1 != nullptr && varTypeIsGC(blockLastNode->AsOp()->gtOp1->TypeGet()))))
{
nonVarPtrRegs &= ~RBM_INTRET;
}
if (nonVarPtrRegs)
{
printf("Regset after " FMT_BB " gcr=", block->bbNum);
printRegMaskInt(gcInfo.gcRegGCrefSetCur & ~regSet.GetMaskVars());
compiler->GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur & ~regSet.GetMaskVars());
printf(", byr=");
printRegMaskInt(gcInfo.gcRegByrefSetCur & ~regSet.GetMaskVars());
compiler->GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur & ~regSet.GetMaskVars());
printf(", regVars=");
printRegMaskInt(regSet.GetMaskVars());
compiler->GetEmitter()->emitDispRegSet(regSet.GetMaskVars());
printf("\n");
}
noway_assert(nonVarPtrRegs == RBM_NONE);
#endif // DEBUG
#if defined(DEBUG)
if (block->bbNext == nullptr)
{
// Unit testing of the emitter: generate a bunch of instructions into the last block
// (it's as good as any, but better than the prologue, which can only be a single instruction
// group) then use COMPlus_JitLateDisasm=* to see if the late disassembler
// thinks the instructions are the same as we do.
#if defined(TARGET_AMD64) && defined(LATE_DISASM)
genAmd64EmitterUnitTests();
#elif defined(TARGET_ARM64)
genArm64EmitterUnitTests();
#endif // TARGET_ARM64
}
#endif // defined(DEBUG)
// It is possible to reach the end of the block without generating code for the current IL offset.
// For example, if the following IR ends the current block, no code will have been generated for
// offset 21:
//
// ( 0, 0) [000040] ------------ il_offset void IL offset: 21
//
// N001 ( 0, 0) [000039] ------------ nop void
//
// This can lead to problems when debugging the generated code. To prevent these issues, make sure
// we've generated code for the last IL offset we saw in the block.
genEnsureCodeEmitted(currentDI);
/* Is this the last block, and are there any open scopes left ? */
bool isLastBlockProcessed = (block->bbNext == nullptr);
if (block->isBBCallAlwaysPair())
{
isLastBlockProcessed = (block->bbNext->bbNext == nullptr);
}
#ifdef USING_VARIABLE_LIVE_RANGE
if (compiler->opts.compDbgInfo && isLastBlockProcessed)
{
varLiveKeeper->siEndAllVariableLiveRange(compiler->compCurLife);
}
#endif // USING_VARIABLE_LIVE_RANGE
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
siEndBlock(block);
#ifdef USING_SCOPE_INFO
if (isLastBlockProcessed && siOpenScopeList.scNext)
{
/* This assert no longer holds, because we may insert a throw
block to demarcate the end of a try or finally region when they
are at the end of the method. It would be nice if we could fix
our code so that this throw block will no longer be necessary. */
// noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
siCloseAllOpenScopes();
}
#endif // USING_SCOPE_INFO
}
SubtractStackLevel(savedStkLvl);
#ifdef DEBUG
// compCurLife should be equal to the liveOut set, except that we don't keep
// it up to date for vars that are not register candidates
// (it would be nice to have a xor set function)
VARSET_TP mismatchLiveVars(VarSetOps::Diff(compiler, block->bbLiveOut, compiler->compCurLife));
VarSetOps::UnionD(compiler, mismatchLiveVars,
VarSetOps::Diff(compiler, compiler->compCurLife, block->bbLiveOut));
VarSetOps::Iter mismatchLiveVarIter(compiler, mismatchLiveVars);
unsigned mismatchLiveVarIndex = 0;
bool foundMismatchedRegVar = false;
while (mismatchLiveVarIter.NextElem(&mismatchLiveVarIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(mismatchLiveVarIndex);
if (varDsc->lvIsRegCandidate())
{
if (!foundMismatchedRegVar)
{
JITDUMP("Mismatched live reg vars after " FMT_BB ":", block->bbNum);
foundMismatchedRegVar = true;
}
JITDUMP(" V%02u", compiler->lvaTrackedIndexToLclNum(mismatchLiveVarIndex));
}
}
if (foundMismatchedRegVar)
{
JITDUMP("\n");
assert(!"Found mismatched live reg var(s) after block");
}
#endif
/* Both stacks should always be empty on exit from a basic block */
noway_assert(genStackLevel == 0);
#ifdef TARGET_AMD64
// On AMD64, we need to generate a NOP after a call that is the last instruction of the block, in several
// situations, to support proper exception handling semantics. This is mostly to ensure that when the stack
// walker computes an instruction pointer for a frame, that instruction pointer is in the correct EH region.
// The document "X64 and ARM ABIs.docx" has more details. The situations:
// 1. If the call instruction is in a different EH region as the instruction that follows it.
// 2. If the call immediately precedes an OS epilog. (Note that what the JIT or VM consider an epilog might
// be slightly different from what the OS considers an epilog, and it is the OS-reported epilog that matters
// here.)
// We handle case #1 here, and case #2 in the emitter.
if (GetEmitter()->emitIsLastInsCall())
{
// Ok, the last instruction generated is a call instruction. Do any of the other conditions hold?
// Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically,
// if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions
// generated before the OS epilog starts, such as a GS cookie check.
if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
// We only need the NOP if we're not going to generate any more code as part of the block end.
switch (block->bbJumpKind)
{
case BBJ_ALWAYS:
case BBJ_THROW:
case BBJ_CALLFINALLY:
case BBJ_EHCATCHRET:
// We're going to generate more code below anyway, so no need for the NOP.
case BBJ_RETURN:
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
// These are the "epilog follows" case, handled in the emitter.
break;
case BBJ_NONE:
if (block->bbNext == nullptr)
{
// Call immediately before the end of the code; we should never get here .
instGen(INS_BREAKPOINT); // This should never get executed
}
else
{
// We need the NOP
instGen(INS_nop);
}
break;
case BBJ_COND:
case BBJ_SWITCH:
// These can't have a call as the last instruction!
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
}
}
#endif // TARGET_AMD64
/* Do we need to generate a jump or return? */
switch (block->bbJumpKind)
{
case BBJ_RETURN:
genExitCode(block);
break;
case BBJ_THROW:
// If we have a throw at the end of a function or funclet, we need to emit another instruction
// afterwards to help the OS unwinder determine the correct context during unwind.
// We insert an unexecuted breakpoint instruction in several situations
// following a throw instruction:
// 1. If the throw is the last instruction of the function or funclet. This helps
// the OS unwinder determine the correct context during an unwind from the
// thrown exception.
// 2. If this is this is the last block of the hot section.
// 3. If the subsequent block is a special throw block.
// 4. On AMD64, if the next block is in a different EH region.
if ((block->bbNext == nullptr) || (block->bbNext->bbFlags & BBF_FUNCLET_BEG) ||
!BasicBlock::sameEHRegion(block, block->bbNext) ||
(!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext)) ||
block->bbNext == compiler->fgFirstColdBlock)
{
instGen(INS_BREAKPOINT); // This should never get executed
}
// Do likewise for blocks that end in DOES_NOT_RETURN calls
// that were not caught by the above rules. This ensures that
// gc register liveness doesn't change across call instructions
// in fully-interruptible mode.
else
{
GenTree* call = block->lastNode();
if ((call != nullptr) && (call->gtOper == GT_CALL))
{
if ((call->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0)
{
instGen(INS_BREAKPOINT); // This should never get executed
}
}
}
break;
case BBJ_CALLFINALLY:
block = genCallFinally(block);
break;
#if defined(FEATURE_EH_FUNCLETS)
case BBJ_EHCATCHRET:
genEHCatchRet(block);
FALLTHROUGH;
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
genReserveFuncletEpilog(block);
break;
#else // !FEATURE_EH_FUNCLETS
case BBJ_EHCATCHRET:
noway_assert(!"Unexpected BBJ_EHCATCHRET"); // not used on x86
break;
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
genEHFinallyOrFilterRet(block);
break;
#endif // !FEATURE_EH_FUNCLETS
case BBJ_NONE:
case BBJ_SWITCH:
break;
case BBJ_ALWAYS:
inst_JMP(EJ_jmp, block->bbJumpDest);
FALLTHROUGH;
case BBJ_COND:
#if FEATURE_LOOP_ALIGN
// This is the last place where we operate on blocks and after this, we operate
// on IG. Hence, if we know that the destination of "block" is the first block
// of a loop and needs alignment (it has BBF_LOOP_ALIGN), then "block" represents
// end of the loop. Propagate that information on the IG through "igLoopBackEdge".
//
// During emitter, this information will be used to calculate the loop size.
// Depending on the loop size, decision of whether to align a loop or not will be taken.
//
// In the emitter, we need to calculate the loop size from `block->bbJumpDest` through
// `block` (inclusive). Thus, we need to ensure there is a label on the lexical fall-through
// block, even if one is not otherwise needed, to be able to calculate the size of this
// loop (loop size is calculated by walking the instruction groups; see emitter::getLoopSize()).
if (block->bbJumpDest->isLoopAlign())
{
GetEmitter()->emitSetLoopBackEdge(block->bbJumpDest);
if (block->bbNext != nullptr)
{
JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->bbNext->bbNum);
block->bbNext->bbFlags |= BBF_HAS_LABEL;
}
}
#endif // FEATURE_LOOP_ALIGN
break;
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
#if FEATURE_LOOP_ALIGN
if (block->hasAlign())
{
// If this block has 'align' instruction in the end (identified by BBF_HAS_ALIGN),
// then need to add align instruction in the current "block".
//
// For non-adaptive alignment, add alignment instruction of size depending on the
// compJitAlignLoopBoundary.
// For adaptive alignment, alignment instruction will always be of 15 bytes for xarch
// and 16 bytes for arm64.
assert(ShouldAlignLoops());
GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->bbJumpKind == BBJ_ALWAYS));
}
if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign()))
{
if (compiler->opts.compJitHideAlignBehindJmp)
{
// The current IG is the one that is just before the IG having loop start.
// Establish a connection of recent align instruction emitted to the loop
// it actually is aligning using 'idaLoopHeadPredIG'.
GetEmitter()->emitConnectAlignInstrWithCurIG();
}
}
#endif
#if defined(DEBUG) && defined(USING_VARIABLE_LIVE_RANGE)
if (compiler->verbose)
{
varLiveKeeper->dumpBlockVariableLiveRanges(block);
}
#endif // defined(DEBUG) && defined(USING_VARIABLE_LIVE_RANGE)
INDEBUG(compiler->compCurBB = nullptr);
} //------------------ END-FOR each block of the method -------------------
// There could be variables alive at this point. For example see lvaKeepAliveAndReportThis.
// This call is for cleaning the GC refs
genUpdateLife(VarSetOps::MakeEmpty(compiler));
/* Finalize the spill tracking logic */
regSet.rsSpillEnd();
/* Finalize the temp tracking logic */
regSet.tmpEnd();
#ifdef DEBUG
if (compiler->verbose)
{
printf("\n# ");
printf("compCycleEstimate = %6d, compSizeEstimate = %5d ", compiler->compCycleEstimate,
compiler->compSizeEstimate);
printf("%s\n", compiler->info.compFullName);
}
#endif
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Register Management XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//------------------------------------------------------------------------
// genSpillVar: Spill a local variable
//
// Arguments:
// tree - the lclVar node for the variable being spilled
//
// Return Value:
// None.
//
// Assumptions:
// The lclVar must be a register candidate (lvRegCandidate)
void CodeGen::genSpillVar(GenTree* tree)
{
unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(varDsc->lvIsRegCandidate());
// We don't actually need to spill if it is already living in memory
bool needsSpill = ((tree->gtFlags & GTF_VAR_DEF) == 0 && varDsc->lvIsInReg());
if (needsSpill)
{
// In order for a lclVar to have been allocated to a register, it must not have been aliasable, and can
// therefore be store-normalized (rather than load-normalized). In fact, not performing store normalization
// can lead to problems on architectures where a lclVar may be allocated to a register that is not
// addressable at the granularity of the lclVar's defined type (e.g. x86).
var_types lclType = varDsc->GetActualRegisterType();
emitAttr size = emitTypeSize(lclType);
// If this is a write-thru or a single-def variable, we don't actually spill at a use,
// but we will kill the var in the reg (below).
if (!varDsc->IsAlwaysAliveInMemory())
{
instruction storeIns = ins_Store(lclType, compiler->isSIMDTypeLocalAligned(varNum));
assert(varDsc->GetRegNum() == tree->GetRegNum());
inst_TT_RV(storeIns, size, tree, tree->GetRegNum());
}
// We should only have both GTF_SPILL (i.e. the flag causing this method to be called) and
// GTF_SPILLED on a write-thru/single-def def, for which we should not be calling this method.
assert((tree->gtFlags & GTF_SPILLED) == 0);
// Remove the live var from the register.
genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(tree));
gcInfo.gcMarkRegSetNpt(varDsc->lvRegMask());
if (VarSetOps::IsMember(compiler, gcInfo.gcTrkStkPtrLcls, varDsc->lvVarIndex))
{
#ifdef DEBUG
if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
}
#endif
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
tree->gtFlags &= ~GTF_SPILL;
// If this is NOT a write-thru, reset the var location.
if ((tree->gtFlags & GTF_SPILLED) == 0)
{
varDsc->SetRegNum(REG_STK);
if (varTypeIsMultiReg(tree))
{
varDsc->SetOtherReg(REG_STK);
}
}
else
{
// We only have 'GTF_SPILL' and 'GTF_SPILLED' on a def of a write-thru lclVar
// or a single-def var that is to be spilled at its definition.
assert((varDsc->IsAlwaysAliveInMemory()) && ((tree->gtFlags & GTF_VAR_DEF) != 0));
}
#ifdef USING_VARIABLE_LIVE_RANGE
if (needsSpill)
{
// We need this after "lvRegNum" has change because now we are sure that varDsc->lvIsInReg() is false.
// "SiVarLoc" constructor uses the "LclVarDsc" of the variable.
varLiveKeeper->siUpdateVariableLiveRange(varDsc, varNum);
}
#endif // USING_VARIABLE_LIVE_RANGE
}
//------------------------------------------------------------------------
// genUpdateVarReg: Update the current register location for a multi-reg lclVar
//
// Arguments:
// varDsc - the LclVarDsc for the lclVar
// tree - the lclVar node
// regIndex - the index of the register in the node
//
// inline
void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTree* tree, int regIndex)
{
// This should only be called for multireg lclVars.
assert(compiler->lvaEnregMultiRegVars);
assert(tree->IsMultiRegLclVar() || (tree->gtOper == GT_COPY));
varDsc->SetRegNum(tree->GetRegByIndex(regIndex));
}
//------------------------------------------------------------------------
// genUpdateVarReg: Update the current register location for a lclVar
//
// Arguments:
// varDsc - the LclVarDsc for the lclVar
// tree - the lclVar node
//
// inline
void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTree* tree)
{
// This should not be called for multireg lclVars.
assert((tree->OperIsScalarLocal() && !tree->IsMultiRegLclVar()) || (tree->gtOper == GT_COPY));
varDsc->SetRegNum(tree->GetRegNum());
}
//------------------------------------------------------------------------
// sameRegAsDst: Return the child that has the same reg as the dst (if any)
//
// Arguments:
// tree - the node of interest
// other - an out parameter to return the other child
//
// Notes:
// If 'tree' has a child with the same assigned register as its target reg,
// that child will be returned, and 'other' will contain the non-matching child.
// Otherwise, both other and the return value will be nullptr.
//
GenTree* sameRegAsDst(GenTree* tree, GenTree*& other /*out*/)
{
if (tree->GetRegNum() == REG_NA)
{
other = nullptr;
return nullptr;
}
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
if (op1->GetRegNum() == tree->GetRegNum())
{
other = op2;
return op1;
}
if (op2->GetRegNum() == tree->GetRegNum())
{
other = op1;
return op2;
}
else
{
other = nullptr;
return nullptr;
}
}
//------------------------------------------------------------------------
// genUnspillLocal: Reload a register candidate local into a register, if needed.
//
// Arguments:
// varNum - The variable number of the local to be reloaded (unspilled).
// It may be a local field.
// type - The type of the local.
// lclNode - The node being unspilled. Note that for a multi-reg local,
// the gtLclNum will be that of the parent struct.
// regNum - The register that 'varNum' should be loaded to.
// reSpill - True if it will be immediately spilled after use.
// isLastUse - True if this is a last use of 'varNum'.
//
// Notes:
// The caller must have determined that this local needs to be unspilled.
void CodeGen::genUnspillLocal(
unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum, bool reSpill, bool isLastUse)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
inst_set_SV_var(lclNode);
instruction ins = ins_Load(type, compiler->isSIMDTypeLocalAligned(varNum));
GetEmitter()->emitIns_R_S(ins, emitTypeSize(type), regNum, varNum, 0);
// TODO-Review: We would like to call:
// genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(tree));
// instead of the following code, but this ends up hitting this assert:
// assert((regSet.GetMaskVars() & regMask) == 0);
// due to issues with LSRA resolution moves.
// So, just force it for now. This probably indicates a condition that creates a GC hole!
//
// Extra note: I think we really want to call something like gcInfo.gcUpdateForRegVarMove,
// because the variable is not really going live or dead, but that method is somewhat poorly
// factored because it, in turn, updates rsMaskVars which is part of RegSet not GCInfo.
// TODO-Cleanup: This code exists in other CodeGen*.cpp files, and should be moved to CodeGenCommon.cpp.
// Don't update the variable's location if we are just re-spilling it again.
if (!reSpill)
{
varDsc->SetRegNum(regNum);
#ifdef USING_VARIABLE_LIVE_RANGE
// We want "VariableLiveRange" inclusive on the beginning and exclusive on the ending.
// For that we shouldn't report an update of the variable location if is becoming dead
// on the same native offset.
if (!isLastUse)
{
// Report the home change for this variable
varLiveKeeper->siUpdateVariableLiveRange(varDsc, varNum);
}
#endif // USING_VARIABLE_LIVE_RANGE
if (!varDsc->IsAlwaysAliveInMemory())
{
#ifdef DEBUG
if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tRemoving V%02u from gcVarPtrSetCur\n", varNum);
}
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tV%02u in reg ", varNum);
varDsc->PrintVarReg();
printf(" is becoming live ");
compiler->printTreeID(lclNode);
printf("\n");
}
#endif // DEBUG
regSet.AddMaskVars(genGetRegMask(varDsc));
}
gcInfo.gcMarkRegPtrVal(regNum, type);
}
//------------------------------------------------------------------------
// genUnspillRegIfNeeded: Reload a MultiReg source value into a register, if needed
//
// Arguments:
// tree - the MultiReg node of interest.
// multiRegIndex - the index of the value to reload, if needed.
//
// Notes:
// It must *not* be a GT_LCL_VAR (those are handled separately).
// In the normal case, the value will be reloaded into the register it
// was originally computed into. However, if that register is not available,
// the register allocator will have allocated a different register, and
// inserted a GT_RELOAD to indicate the register into which it should be
// reloaded.
//
void CodeGen::genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex)
{
GenTree* unspillTree = tree;
assert(unspillTree->IsMultiRegNode());
if (tree->gtOper == GT_RELOAD)
{
unspillTree = tree->AsOp()->gtOp1;
}
// In case of multi-reg node, GTF_SPILLED flag on it indicates that
// one or more of its result regs are spilled. Individual spill flags need to be
// queried to determine which specific result regs need to be unspilled.
if ((unspillTree->gtFlags & GTF_SPILLED) == 0)
{
return;
}
GenTreeFlags spillFlags = unspillTree->GetRegSpillFlagByIdx(multiRegIndex);
if ((spillFlags & GTF_SPILLED) == 0)
{
return;
}
regNumber dstReg = tree->GetRegByIndex(multiRegIndex);
if (dstReg == REG_NA)
{
assert(tree->IsCopyOrReload());
dstReg = unspillTree->GetRegByIndex(multiRegIndex);
}
if (tree->IsMultiRegLclVar())
{
GenTreeLclVar* lclNode = tree->AsLclVar();
unsigned fieldVarNum = compiler->lvaGetDesc(lclNode)->lvFieldLclStart + multiRegIndex;
bool reSpill = ((spillFlags & GTF_SPILL) != 0);
bool isLastUse = lclNode->IsLastUse(multiRegIndex);
genUnspillLocal(fieldVarNum, compiler->lvaGetDesc(fieldVarNum)->TypeGet(), lclNode, dstReg, reSpill, isLastUse);
}
else
{
var_types dstType = unspillTree->GetRegTypeByIndex(multiRegIndex);
regNumber unspillTreeReg = unspillTree->GetRegByIndex(multiRegIndex);
TempDsc* t = regSet.rsUnspillInPlace(unspillTree, unspillTreeReg, multiRegIndex);
emitAttr emitType = emitActualTypeSize(dstType);
GetEmitter()->emitIns_R_S(ins_Load(dstType), emitType, dstReg, t->tdTempNum(), 0);
regSet.tmpRlsTemp(t);
gcInfo.gcMarkRegPtrVal(dstReg, dstType);
}
}
//------------------------------------------------------------------------
// genUnspillRegIfNeeded: Reload the value into a register, if needed
//
// Arguments:
// tree - the node of interest.
//
// Notes:
// In the normal case, the value will be reloaded into the register it
// was originally computed into. However, if that register is not available,
// the register allocator will have allocated a different register, and
// inserted a GT_RELOAD to indicate the register into which it should be
// reloaded.
//
// A GT_RELOAD never has a reg candidate lclVar or multi-reg lclVar as its child.
// This is because register candidates locals always have distinct tree nodes
// for uses and definitions. (This is unlike non-register candidate locals which
// may be "defined" by a GT_LCL_VAR node that loads it into a register. It may
// then have a GT_RELOAD inserted if it needs a different register, though this
// is unlikely to happen except in stress modes.)
//
void CodeGen::genUnspillRegIfNeeded(GenTree* tree)
{
GenTree* unspillTree = tree;
if (tree->gtOper == GT_RELOAD)
{
unspillTree = tree->AsOp()->gtOp1;
}
if ((unspillTree->gtFlags & GTF_SPILLED) != 0)
{
if (genIsRegCandidateLocal(unspillTree))
{
// We never have a GT_RELOAD for this case.
assert(tree == unspillTree);
// Reset spilled flag, since we are going to load a local variable from its home location.
unspillTree->gtFlags &= ~GTF_SPILLED;
GenTreeLclVar* lcl = unspillTree->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
var_types spillType = varDsc->GetRegisterType(lcl);
assert(spillType != TYP_UNDEF);
// TODO-Cleanup: The following code could probably be further merged and cleaned up.
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
// Load local variable from its home location.
// In most cases the tree type will indicate the correct type to use for the load.
// However, if it is NOT a normalizeOnLoad lclVar (i.e. NOT a small int that always gets
// widened when loaded into a register), and its size is not the same as the actual register type
// of the lclVar, then we need to change the type of the tree node when loading.
// This situation happens due to "optimizations" that avoid a cast and
// simply retype the node when using long type lclVar as an int.
// While loading the int in that case would work for this use of the lclVar, if it is
// later used as a long, we will have incorrectly truncated the long.
// In the normalizeOnLoad case ins_Load will return an appropriate sign- or zero-
// extending load.
var_types lclActualType = varDsc->GetActualRegisterType();
assert(lclActualType != TYP_UNDEF);
if (spillType != lclActualType && !varTypeIsGC(spillType) && !varDsc->lvNormalizeOnLoad())
{
assert(!varTypeIsGC(varDsc));
spillType = lclActualType;
}
#elif defined(TARGET_ARM)
// No normalizing for ARM
#else
NYI("Unspilling not implemented for this target architecture.");
#endif
bool reSpill = ((unspillTree->gtFlags & GTF_SPILL) != 0);
bool isLastUse = lcl->IsLastUse(0);
genUnspillLocal(lcl->GetLclNum(), spillType, lcl->AsLclVar(), tree->GetRegNum(), reSpill, isLastUse);
}
else if (unspillTree->IsMultiRegLclVar())
{
// We never have a GT_RELOAD for this case.
assert(tree == unspillTree);
GenTreeLclVar* lclNode = unspillTree->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
unsigned regCount = varDsc->lvFieldCnt;
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags spillFlags = lclNode->GetRegSpillFlagByIdx(i);
if ((spillFlags & GTF_SPILLED) != 0)
{
regNumber reg = lclNode->GetRegNumByIdx(i);
unsigned fieldVarNum = varDsc->lvFieldLclStart + i;
bool reSpill = ((spillFlags & GTF_SPILL) != 0);
bool isLastUse = lclNode->IsLastUse(i);
genUnspillLocal(fieldVarNum, compiler->lvaGetDesc(fieldVarNum)->TypeGet(), lclNode, reg, reSpill,
isLastUse);
}
}
}
else if (unspillTree->IsMultiRegNode())
{
// Here we may have a GT_RELOAD, and we will need to use that node ('tree') to
// do the unspilling if needed. However, that tree doesn't have the register
// count, so we use 'unspillTree' for that.
unsigned regCount = unspillTree->GetMultiRegCount(compiler);
for (unsigned i = 0; i < regCount; ++i)
{
genUnspillRegIfNeeded(tree, i);
}
unspillTree->gtFlags &= ~GTF_SPILLED;
}
else
{
// Here we may have a GT_RELOAD.
// The spill temp allocated for it is associated with the original tree that defined the
// register that it was spilled from.
// So we use 'unspillTree' to recover that spill temp.
TempDsc* t = regSet.rsUnspillInPlace(unspillTree, unspillTree->GetRegNum());
emitAttr emitType = emitActualTypeSize(unspillTree->TypeGet());
// Reload into the register specified by 'tree' which may be a GT_RELOAD.
regNumber dstReg = tree->GetRegNum();
GetEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType), emitType, dstReg, t->tdTempNum(), 0);
regSet.tmpRlsTemp(t);
unspillTree->gtFlags &= ~GTF_SPILLED;
gcInfo.gcMarkRegPtrVal(dstReg, unspillTree->TypeGet());
}
}
}
//------------------------------------------------------------------------
// genCopyRegIfNeeded: Copy the given node into the specified register
//
// Arguments:
// node - The node that has been evaluated (consumed).
// needReg - The register in which its value is needed.
//
// Notes:
// This must be a node that has a register.
//
void CodeGen::genCopyRegIfNeeded(GenTree* node, regNumber needReg)
{
assert((node->GetRegNum() != REG_NA) && (needReg != REG_NA));
assert(!node->isUsedFromSpillTemp());
inst_Mov(node->TypeGet(), needReg, node->GetRegNum(), /* canSkip */ true);
}
// Do Liveness update for a subnodes that is being consumed by codegen
// including the logic for reload in case is needed and also takes care
// of locating the value on the desired register.
void CodeGen::genConsumeRegAndCopy(GenTree* node, regNumber needReg)
{
if (needReg == REG_NA)
{
return;
}
genConsumeReg(node);
genCopyRegIfNeeded(node, needReg);
}
// Check that registers are consumed in the right order for the current node being generated.
#ifdef DEBUG
void CodeGen::genNumberOperandUse(GenTree* const operand, int& useNum) const
{
assert(operand != nullptr);
// Ignore argument placeholders.
if (operand->OperGet() == GT_ARGPLACE)
{
return;
}
assert(operand->gtUseNum == -1);
if (!operand->isContained() && !operand->IsCopyOrReload())
{
operand->gtUseNum = useNum;
useNum++;
}
else
{
for (GenTree* op : operand->Operands())
{
genNumberOperandUse(op, useNum);
}
}
}
void CodeGen::genCheckConsumeNode(GenTree* const node)
{
assert(node != nullptr);
if (verbose)
{
if (node->gtUseNum == -1)
{
// nothing wrong if the node was not consumed
}
else if ((node->gtDebugFlags & GTF_DEBUG_NODE_CG_CONSUMED) != 0)
{
printf("Node was consumed twice:\n");
compiler->gtDispTree(node, nullptr, nullptr, true);
}
else if ((lastConsumedNode != nullptr) && (node->gtUseNum < lastConsumedNode->gtUseNum))
{
printf("Nodes were consumed out-of-order:\n");
compiler->gtDispTree(lastConsumedNode, nullptr, nullptr, true);
compiler->gtDispTree(node, nullptr, nullptr, true);
}
}
assert((node->OperGet() == GT_CATCH_ARG) || ((node->gtDebugFlags & GTF_DEBUG_NODE_CG_CONSUMED) == 0));
assert((lastConsumedNode == nullptr) || (node->gtUseNum == -1) || (node->gtUseNum > lastConsumedNode->gtUseNum));
node->gtDebugFlags |= GTF_DEBUG_NODE_CG_CONSUMED;
lastConsumedNode = node;
}
#endif // DEBUG
//--------------------------------------------------------------------
// genConsumeReg: Do liveness update for a single register of a multireg child node
// that is being consumed by codegen.
//
// Arguments:
// tree - GenTree node
// multiRegIndex - The index of the register to be consumed
//
// Return Value:
// Returns the reg number for the given multiRegIndex.
//
regNumber CodeGen::genConsumeReg(GenTree* tree, unsigned multiRegIndex)
{
regNumber reg = tree->GetRegByIndex(multiRegIndex);
if (tree->OperIs(GT_COPY))
{
reg = genRegCopy(tree, multiRegIndex);
}
else if (reg == REG_NA)
{
assert(tree->OperIs(GT_RELOAD));
reg = tree->gtGetOp1()->GetRegByIndex(multiRegIndex);
assert(reg != REG_NA);
}
genUnspillRegIfNeeded(tree, multiRegIndex);
// UpdateLifeFieldVar() will return true if local var should be spilled.
if (tree->IsMultiRegLclVar() && treeLifeUpdater->UpdateLifeFieldVar(tree->AsLclVar(), multiRegIndex))
{
GenTreeLclVar* lcl = tree->AsLclVar();
genSpillLocal(lcl->GetLclNum(), lcl->GetFieldTypeByIndex(compiler, multiRegIndex), lcl,
lcl->GetRegByIndex(multiRegIndex));
}
if (tree->gtSkipReloadOrCopy()->OperIs(GT_LCL_VAR))
{
assert(compiler->lvaEnregMultiRegVars);
GenTreeLclVar* lcl = tree->gtSkipReloadOrCopy()->AsLclVar();
assert(lcl->IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
assert(varDsc->lvPromoted);
assert(multiRegIndex < varDsc->lvFieldCnt);
unsigned fieldVarNum = varDsc->lvFieldLclStart + multiRegIndex;
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(fieldVarNum);
assert(fldVarDsc->lvLRACandidate);
if (fldVarDsc->GetRegNum() == REG_STK)
{
// We have loaded this into a register only temporarily
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
else if (lcl->IsLastUse(multiRegIndex))
{
gcInfo.gcMarkRegSetNpt(genRegMask(fldVarDsc->GetRegNum()));
}
}
else
{
gcInfo.gcMarkRegSetNpt(tree->gtGetRegMask());
}
return reg;
}
//--------------------------------------------------------------------
// genConsumeReg: Do liveness update for a subnode that is being
// consumed by codegen.
//
// Arguments:
// tree - GenTree node
//
// Return Value:
// Returns the reg number of tree.
// In case of multi-reg call node returns the first reg number
// of the multi-reg return.
//
regNumber CodeGen::genConsumeReg(GenTree* tree)
{
if (tree->OperGet() == GT_COPY)
{
genRegCopy(tree);
}
// Handle the case where we have a lclVar that needs to be copied before use (i.e. because it
// interferes with one of the other sources (or the target, if it's a "delayed use" register)).
// TODO-Cleanup: This is a special copyReg case in LSRA - consider eliminating these and
// always using GT_COPY to make the lclVar location explicit.
// Note that we have to do this before calling genUpdateLife because otherwise if we spill it
// the lvRegNum will be set to REG_STK and we will lose track of what register currently holds
// the lclVar (normally when a lclVar is spilled it is then used from its former register
// location, which matches the GetRegNum() on the node).
// (Note that it doesn't matter if we call this before or after genUnspillRegIfNeeded
// because if it's on the stack it will always get reloaded into tree->GetRegNum()).
if (genIsRegCandidateLocal(tree))
{
GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
if (varDsc->GetRegNum() != REG_STK)
{
var_types regType = varDsc->GetRegisterType(lcl);
inst_Mov(regType, tree->GetRegNum(), varDsc->GetRegNum(), /* canSkip */ true);
}
}
genUnspillRegIfNeeded(tree);
// genUpdateLife() will also spill local var if marked as GTF_SPILL by calling CodeGen::genSpillVar
genUpdateLife(tree);
// there are three cases where consuming a reg means clearing the bit in the live mask
// 1. it was not produced by a local
// 2. it was produced by a local that is going dead
// 3. it was produced by a local that does not live in that reg (like one allocated on the stack)
if (genIsRegCandidateLocal(tree))
{
assert(tree->gtHasReg(compiler));
GenTreeLclVarCommon* lcl = tree->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
assert(varDsc->lvLRACandidate);
if (varDsc->GetRegNum() == REG_STK)
{
// We have loaded this into a register only temporarily
gcInfo.gcMarkRegSetNpt(genRegMask(tree->GetRegNum()));
}
else if ((tree->gtFlags & GTF_VAR_DEATH) != 0)
{
gcInfo.gcMarkRegSetNpt(genRegMask(varDsc->GetRegNum()));
}
}
else if (tree->gtSkipReloadOrCopy()->IsMultiRegLclVar())
{
assert(compiler->lvaEnregMultiRegVars);
GenTreeLclVar* lcl = tree->gtSkipReloadOrCopy()->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
unsigned firstFieldVarNum = varDsc->lvFieldLclStart;
for (unsigned i = 0; i < varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(firstFieldVarNum + i);
assert(fldVarDsc->lvLRACandidate);
regNumber reg;
if (tree->OperIs(GT_COPY, GT_RELOAD) && (tree->AsCopyOrReload()->GetRegByIndex(i) != REG_NA))
{
reg = tree->AsCopyOrReload()->GetRegByIndex(i);
}
else
{
reg = lcl->AsLclVar()->GetRegNumByIdx(i);
}
if (fldVarDsc->GetRegNum() == REG_STK)
{
// We have loaded this into a register only temporarily
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
else if (lcl->IsLastUse(i))
{
gcInfo.gcMarkRegSetNpt(genRegMask(fldVarDsc->GetRegNum()));
}
}
}
else
{
gcInfo.gcMarkRegSetNpt(tree->gtGetRegMask());
}
genCheckConsumeNode(tree);
return tree->GetRegNum();
}
// Do liveness update for an address tree: one of GT_LEA, GT_LCL_VAR, or GT_CNS_INT (for call indirect).
void CodeGen::genConsumeAddress(GenTree* addr)
{
if (!addr->isContained())
{
genConsumeReg(addr);
}
else if (addr->OperGet() == GT_LEA)
{
genConsumeAddrMode(addr->AsAddrMode());
}
}
// do liveness update for a subnode that is being consumed by codegen
void CodeGen::genConsumeAddrMode(GenTreeAddrMode* addr)
{
genConsumeOperands(addr);
}
void CodeGen::genConsumeRegs(GenTree* tree)
{
#if !defined(TARGET_64BIT)
if (tree->OperGet() == GT_LONG)
{
genConsumeRegs(tree->gtGetOp1());
genConsumeRegs(tree->gtGetOp2());
return;
}
#endif // !defined(TARGET_64BIT)
if (tree->isUsedFromSpillTemp())
{
// spill temps are un-tracked and hence no need to update life
}
else if (tree->isContained())
{
if (tree->OperIsIndir())
{
genConsumeAddress(tree->AsIndir()->Addr());
}
else if (tree->OperIs(GT_LEA))
{
genConsumeAddress(tree);
}
#ifdef TARGET_ARM64
else if (tree->OperIs(GT_BFIZ))
{
// Can be contained as part of LEA on ARM64
GenTreeCast* cast = tree->gtGetOp1()->AsCast();
assert(cast->isContained());
genConsumeAddress(cast->CastOp());
}
#endif
else if (tree->OperIsLocalRead())
{
// A contained lcl var must be living on stack and marked as reg optional, or not be a
// register candidate.
unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
noway_assert(varDsc->GetRegNum() == REG_STK);
noway_assert(tree->IsRegOptional() || !varDsc->lvLRACandidate);
// Update the life of the lcl var.
genUpdateLife(tree);
}
#ifdef TARGET_XARCH
#ifdef FEATURE_HW_INTRINSICS
else if (tree->OperIs(GT_HWINTRINSIC))
{
// Only load/store HW intrinsics can be contained (and the address may also be contained).
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(tree->AsHWIntrinsic()->GetHWIntrinsicId());
assert((category == HW_Category_MemoryLoad) || (category == HW_Category_MemoryStore));
size_t numArgs = tree->AsHWIntrinsic()->GetOperandCount();
genConsumeAddress(tree->AsHWIntrinsic()->Op(1));
if (category == HW_Category_MemoryStore)
{
assert(numArgs == 2);
GenTree* op2 = tree->AsHWIntrinsic()->Op(2);
assert(op2->isContained());
genConsumeReg(op2);
}
else
{
assert(numArgs == 1);
}
}
#endif // FEATURE_HW_INTRINSICS
#endif // TARGET_XARCH
else if (tree->OperIs(GT_BITCAST, GT_NEG, GT_CAST, GT_LSH))
{
genConsumeRegs(tree->gtGetOp1());
}
else if (tree->OperIs(GT_MUL))
{
genConsumeRegs(tree->gtGetOp1());
genConsumeRegs(tree->gtGetOp2());
}
else
{
#ifdef FEATURE_SIMD
// (In)Equality operation that produces bool result, when compared
// against Vector zero, marks its Vector Zero operand as contained.
assert(tree->OperIsLeaf() || tree->IsSIMDZero() || tree->IsVectorZero());
#else
assert(tree->OperIsLeaf());
#endif
}
}
else
{
genConsumeReg(tree);
}
}
//------------------------------------------------------------------------
// genConsumeOperands: Do liveness update for the operands of a unary or binary tree
//
// Arguments:
// tree - the GenTreeOp whose operands will have their liveness updated.
//
// Return Value:
// None.
//
void CodeGen::genConsumeOperands(GenTreeOp* tree)
{
GenTree* firstOp = tree->gtOp1;
GenTree* secondOp = tree->gtOp2;
if (firstOp != nullptr)
{
genConsumeRegs(firstOp);
}
if (secondOp != nullptr)
{
genConsumeRegs(secondOp);
}
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// genConsumeOperands: Do liveness update for the operands of a multi-operand node,
// currently GT_SIMD or GT_HWINTRINSIC
//
// Arguments:
// tree - the GenTreeMultiOp whose operands will have their liveness updated.
//
// Return Value:
// None.
//
void CodeGen::genConsumeMultiOpOperands(GenTreeMultiOp* tree)
{
for (GenTree* operand : tree->Operands())
{
genConsumeRegs(operand);
}
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if FEATURE_PUT_STRUCT_ARG_STK
//------------------------------------------------------------------------
// genConsumePutStructArgStk: Do liveness update for the operands of a PutArgStk node.
// Also loads in the right register the addresses of the
// src/dst for rep mov operation.
//
// Arguments:
// putArgNode - the PUTARG_STK tree.
// dstReg - the dstReg for the rep move operation.
// srcReg - the srcReg for the rep move operation.
// sizeReg - the sizeReg for the rep move operation.
//
// Return Value:
// None.
//
// Notes:
// sizeReg can be REG_NA when this function is used to consume the dstReg and srcReg
// for copying on the stack a struct with references.
// The source address/offset is determined from the address on the GT_OBJ node, while
// the destination address is the address contained in 'm_stkArgVarNum' plus the offset
// provided in the 'putArgNode'.
// m_stkArgVarNum must be set to the varnum for the local used for placing the "by-value" args on the stack.
void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode,
regNumber dstReg,
regNumber srcReg,
regNumber sizeReg)
{
// The putArgNode children are always contained. We should not consume any registers.
assert(putArgNode->gtGetOp1()->isContained());
// Get the source address.
GenTree* src = putArgNode->gtGetOp1();
assert(varTypeIsStruct(src));
assert((src->gtOper == GT_OBJ) || ((src->gtOper == GT_IND && varTypeIsSIMD(src))));
GenTree* srcAddr = src->gtGetOp1();
assert(dstReg != REG_NA);
assert(srcReg != REG_NA);
// Consume the registers only if they are not contained or set to REG_NA.
if (srcAddr->GetRegNum() != REG_NA)
{
genConsumeReg(srcAddr);
}
// If the op1 is already in the dstReg - nothing to do.
// Otherwise load the op1 (GT_ADDR) into the dstReg to copy the struct on the stack by value.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
assert(dstReg != REG_SPBASE);
inst_Mov(TYP_I_IMPL, dstReg, REG_SPBASE, /* canSkip */ false);
#else // !TARGET_X86
GenTree* dstAddr = putArgNode;
if (dstAddr->GetRegNum() != dstReg)
{
// Generate LEA instruction to load the stack of the outgoing var + SlotNum offset (or the incoming arg area
// for tail calls) in RDI.
// Destination is always local (on the stack) - use EA_PTRSIZE.
assert(m_stkArgVarNum != BAD_VAR_NUM);
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, dstReg, m_stkArgVarNum, putArgNode->getArgOffset());
}
#endif // !TARGET_X86
if (srcAddr->OperIsLocalAddr())
{
// The OperLocalAddr is always contained.
assert(srcAddr->isContained());
const GenTreeLclVarCommon* lclNode = srcAddr->AsLclVarCommon();
// Generate LEA instruction to load the LclVar address in RSI.
// Source is known to be on the stack. Use EA_PTRSIZE.
unsigned int offset = lclNode->GetLclOffs();
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, srcReg, lclNode->GetLclNum(), offset);
}
else
{
assert(srcAddr->GetRegNum() != REG_NA);
// Source is not known to be on the stack. Use EA_BYREF.
GetEmitter()->emitIns_Mov(INS_mov, EA_BYREF, srcReg, srcAddr->GetRegNum(), /* canSkip */ true);
}
if (sizeReg != REG_NA)
{
unsigned size = putArgNode->GetStackByteSize();
inst_RV_IV(INS_mov, sizeReg, size, EA_PTRSIZE);
}
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
#if FEATURE_ARG_SPLIT
//------------------------------------------------------------------------
// genConsumeArgRegSplit: Consume register(s) in Call node to set split struct argument.
//
// Arguments:
// putArgNode - the PUTARG_STK tree.
//
// Return Value:
// None.
//
void CodeGen::genConsumeArgSplitStruct(GenTreePutArgSplit* putArgNode)
{
assert(putArgNode->OperGet() == GT_PUTARG_SPLIT);
assert(putArgNode->gtHasReg(compiler));
genUnspillRegIfNeeded(putArgNode);
gcInfo.gcMarkRegSetNpt(putArgNode->gtGetRegMask());
genCheckConsumeNode(putArgNode);
}
#endif // FEATURE_ARG_SPLIT
//------------------------------------------------------------------------
// genPutArgStkFieldList: Generate code for a putArgStk whose source is a GT_FIELD_LIST
//
// Arguments:
// putArgStk - The putArgStk node
// outArgVarNum - The lclVar num for the argument
//
// Notes:
// The x86 version of this is in codegenxarch.cpp, and doesn't take an
// outArgVarNum, as it pushes its args onto the stack.
//
#ifndef TARGET_X86
void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArgVarNum)
{
assert(putArgStk->gtOp1->OperIs(GT_FIELD_LIST));
// Evaluate each of the GT_FIELD_LIST items into their register
// and store their register into the outgoing argument area.
const unsigned argOffset = putArgStk->getArgOffset();
for (GenTreeFieldList::Use& use : putArgStk->gtOp1->AsFieldList()->Uses())
{
GenTree* nextArgNode = use.GetNode();
genConsumeReg(nextArgNode);
regNumber reg = nextArgNode->GetRegNum();
var_types type = use.GetType();
unsigned thisFieldOffset = argOffset + use.GetOffset();
// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
// argument area.
#if defined(FEATURE_SIMD) && defined(TARGET_ARM64)
// storing of TYP_SIMD12 (i.e. Vector3) argument.
if (compMacOsArm64Abi() && (type == TYP_SIMD12))
{
// Need an additional integer register to extract upper 4 bytes from data.
regNumber tmpReg = nextArgNode->GetSingleTempReg();
GetEmitter()->emitStoreSIMD12ToLclOffset(outArgVarNum, thisFieldOffset, reg, tmpReg);
}
else
#endif // FEATURE_SIMD
{
emitAttr attr = emitTypeSize(type);
GetEmitter()->emitIns_S_R(ins_Store(type), attr, reg, outArgVarNum, thisFieldOffset);
}
// We can't write beyond the arg area unless this is a tail call, in which case we use
// the first stack arg as the base of the incoming arg area.
#ifdef DEBUG
unsigned areaSize = compiler->lvaLclSize(outArgVarNum);
#if FEATURE_FASTTAILCALL
if (putArgStk->gtCall->IsFastTailCall())
{
areaSize = compiler->info.compArgStackSize;
}
#endif
assert((thisFieldOffset + genTypeSize(type)) <= areaSize);
#endif
}
}
#endif // !TARGET_X86
//------------------------------------------------------------------------
// genSetBlockSize: Ensure that the block size is in the given register
//
// Arguments:
// blkNode - The block node
// sizeReg - The register into which the block's size should go
//
void CodeGen::genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg)
{
if (sizeReg != REG_NA)
{
unsigned blockSize = blkNode->Size();
if (!blkNode->OperIs(GT_STORE_DYN_BLK))
{
assert((blkNode->gtRsvdRegs & genRegMask(sizeReg)) != 0);
instGen_Set_Reg_To_Imm(EA_4BYTE, sizeReg, blockSize);
}
else
{
GenTree* sizeNode = blkNode->AsStoreDynBlk()->gtDynamicSize;
inst_Mov(sizeNode->TypeGet(), sizeReg, sizeNode->GetRegNum(), /* canSkip */ true);
}
}
}
//------------------------------------------------------------------------
// genConsumeBlockSrc: Consume the source address register of a block node, if any.
//
// Arguments:
// blkNode - The block node
void CodeGen::genConsumeBlockSrc(GenTreeBlk* blkNode)
{
GenTree* src = blkNode->Data();
if (blkNode->OperIsCopyBlkOp())
{
// For a CopyBlk we need the address of the source.
assert(src->isContained());
if (src->OperGet() == GT_IND)
{
src = src->AsOp()->gtOp1;
}
else
{
// This must be a local.
// For this case, there is no source address register, as it is a
// stack-based address.
assert(src->OperIsLocal());
return;
}
}
else
{
if (src->OperIsInitVal())
{
src = src->gtGetOp1();
}
}
genConsumeReg(src);
}
//------------------------------------------------------------------------
// genSetBlockSrc: Ensure that the block source is in its allocated register.
//
// Arguments:
// blkNode - The block node
// srcReg - The register in which to set the source (address or init val).
//
void CodeGen::genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg)
{
GenTree* src = blkNode->Data();
if (blkNode->OperIsCopyBlkOp())
{
// For a CopyBlk we need the address of the source.
if (src->OperGet() == GT_IND)
{
src = src->AsOp()->gtOp1;
}
else
{
// This must be a local struct.
// Load its address into srcReg.
inst_RV_TT(INS_lea, srcReg, src, 0, EA_BYREF);
return;
}
}
else
{
if (src->OperIsInitVal())
{
src = src->gtGetOp1();
}
}
genCopyRegIfNeeded(src, srcReg);
}
//------------------------------------------------------------------------
// genConsumeBlockOp: Ensure that the block's operands are enregistered
// as needed.
// Arguments:
// blkNode - The block node
//
// Notes:
// This ensures that the operands are consumed in the proper order to
// obey liveness modeling.
void CodeGen::genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg)
{
// We have to consume the registers, and perform any copies, in the actual execution order: dst, src, size.
//
// Note that the register allocator ensures that the registers ON THE NODES will not interfere
// with one another if consumed (i.e. reloaded or moved to their ASSIGNED reg) in execution order.
// Further, it ensures that they will not interfere with one another if they are then copied
// to the REQUIRED register (if a fixed register requirement) in execution order. This requires,
// then, that we first consume all the operands, then do any necessary moves.
GenTree* const dstAddr = blkNode->Addr();
// First, consume all the sources in order, and verify that registers have been allocated appropriately,
// based on the 'gtBlkOpKind'.
// The destination is always in a register; 'genConsumeReg' asserts that.
genConsumeReg(dstAddr);
// The source may be a local or in a register; 'genConsumeBlockSrc' will check that.
genConsumeBlockSrc(blkNode);
// 'genSetBlockSize' (called below) will ensure that a register has been reserved as needed
// in the case where the size is a constant (i.e. it is not GT_STORE_DYN_BLK).
if (blkNode->OperGet() == GT_STORE_DYN_BLK)
{
genConsumeReg(blkNode->AsStoreDynBlk()->gtDynamicSize);
}
// Next, perform any necessary moves.
genCopyRegIfNeeded(dstAddr, dstReg);
genSetBlockSrc(blkNode, srcReg);
genSetBlockSize(blkNode, sizeReg);
}
//-------------------------------------------------------------------------
// genSpillLocal: Generate the actual spill of a local var.
//
// Arguments:
// varNum - The variable number of the local to be spilled.
// It may be a local field.
// type - The type of the local.
// lclNode - The node being spilled. Note that for a multi-reg local,
// the gtLclNum will be that of the parent struct.
// regNum - The register that 'varNum' is currently in.
//
// Return Value:
// None.
//
void CodeGen::genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(!varDsc->lvNormalizeOnStore() || (type == varDsc->GetActualRegisterType()));
// We have a register candidate local that is marked with GTF_SPILL.
// This flag generally means that we need to spill this local.
// The exception is the case of a use of an EH/spill-at-single-def var use that is being "spilled"
// to the stack, indicated by GTF_SPILL (note that all EH lclVar defs are always
// spilled, i.e. write-thru. Likewise, single-def vars that are spilled at its definitions).
// An EH or single-def var use is always valid on the stack (so we don't need to actually spill it),
// but the GTF_SPILL flag records the fact that the register value is going dead.
if (((lclNode->gtFlags & GTF_VAR_DEF) != 0) || (!varDsc->IsAlwaysAliveInMemory()))
{
// Store local variable to its home location.
// Ensure that lclVar stores are typed correctly.
GetEmitter()->emitIns_S_R(ins_Store(type, compiler->isSIMDTypeLocalAligned(varNum)), emitTypeSize(type), regNum,
varNum, 0);
}
}
//-------------------------------------------------------------------------
// genProduceReg: do liveness update for register produced by the current
// node in codegen after code has been emitted for it.
//
// Arguments:
// tree - Gentree node
//
// Return Value:
// None.
void CodeGen::genProduceReg(GenTree* tree)
{
#ifdef DEBUG
assert((tree->gtDebugFlags & GTF_DEBUG_NODE_CG_PRODUCED) == 0);
tree->gtDebugFlags |= GTF_DEBUG_NODE_CG_PRODUCED;
#endif
if (tree->gtFlags & GTF_SPILL)
{
// Code for GT_COPY node gets generated as part of consuming regs by its parent.
// A GT_COPY node in turn produces reg result and it should never be marked to
// spill.
//
// Similarly GT_RELOAD node gets generated as part of consuming regs by its
// parent and should never be marked for spilling.
noway_assert(!tree->IsCopyOrReload());
if (genIsRegCandidateLocal(tree))
{
GenTreeLclVar* lclNode = tree->AsLclVar();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
const unsigned varNum = lclNode->GetLclNum();
const var_types spillType = varDsc->GetRegisterType(lclNode);
genSpillLocal(varNum, spillType, lclNode, tree->GetRegNum());
}
else if (tree->IsMultiRegLclVar())
{
assert(compiler->lvaEnregMultiRegVars);
GenTreeLclVar* lclNode = tree->AsLclVar();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
const unsigned regCount = lclNode->GetFieldCount(compiler);
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags flags = lclNode->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILL) != 0)
{
const regNumber reg = lclNode->GetRegNumByIdx(i);
const unsigned fieldVarNum = varDsc->lvFieldLclStart + i;
const var_types spillType = compiler->lvaGetDesc(fieldVarNum)->GetRegisterType();
genSpillLocal(fieldVarNum, spillType, lclNode, reg);
}
}
}
else
{
// In case of multi-reg call node, spill flag on call node
// indicates that one or more of its allocated regs need to
// be spilled. Call node needs to be further queried to
// know which of its result regs needs to be spilled.
if (tree->IsMultiRegCall())
{
GenTreeCall* call = tree->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags flags = call->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILL) != 0)
{
regNumber reg = call->GetRegNumByIdx(i);
regSet.rsSpillTree(reg, call, i);
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
}
}
#if FEATURE_ARG_SPLIT
else if (tree->OperIsPutArgSplit())
{
assert(compFeatureArgSplit());
GenTreePutArgSplit* argSplit = tree->AsPutArgSplit();
unsigned regCount = argSplit->gtNumRegs;
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags flags = argSplit->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILL) != 0)
{
regNumber reg = argSplit->GetRegNumByIdx(i);
regSet.rsSpillTree(reg, argSplit, i);
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
}
}
#ifdef TARGET_ARM
else if (compFeatureArgSplit() && tree->OperIsMultiRegOp())
{
GenTreeMultiRegOp* multiReg = tree->AsMultiRegOp();
unsigned regCount = multiReg->GetRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
GenTreeFlags flags = multiReg->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILL) != 0)
{
regNumber reg = multiReg->GetRegNumByIdx(i);
regSet.rsSpillTree(reg, multiReg, i);
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
}
}
#endif // TARGET_ARM
#endif // FEATURE_ARG_SPLIT
else
{
regSet.rsSpillTree(tree->GetRegNum(), tree);
gcInfo.gcMarkRegSetNpt(genRegMask(tree->GetRegNum()));
}
tree->gtFlags |= GTF_SPILLED;
tree->gtFlags &= ~GTF_SPILL;
return;
}
}
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
// If we've produced a register, mark it as a pointer, as needed.
if (tree->gtHasReg(compiler))
{
// We only mark the register in the following cases:
// 1. It is not a register candidate local. In this case, we're producing a
// register from a local, but the local is not a register candidate. Thus,
// we must be loading it as a temp register, and any "last use" flag on
// the register wouldn't be relevant.
// 2. The register candidate local is going dead. There's no point to mark
// the register as live, with a GC pointer, if the variable is dead.
if (!genIsRegCandidateLocal(tree) || ((tree->gtFlags & GTF_VAR_DEATH) == 0))
{
// Multi-reg nodes will produce more than one register result.
// Mark all the regs produced by the node.
if (tree->IsMultiRegCall())
{
const GenTreeCall* call = tree->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = call->GetRegNumByIdx(i);
var_types type = retTypeDesc->GetReturnRegType(i);
gcInfo.gcMarkRegPtrVal(reg, type);
}
}
else if (tree->IsCopyOrReloadOfMultiRegCall())
{
// we should never see reload of multi-reg call here
// because GT_RELOAD gets generated in reg consuming path.
noway_assert(tree->OperGet() == GT_COPY);
// A multi-reg GT_COPY node produces those regs to which
// copy has taken place.
const GenTreeCopyOrReload* copy = tree->AsCopyOrReload();
const GenTreeCall* call = copy->gtGetOp1()->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc->GetReturnRegType(i);
regNumber toReg = copy->GetRegNumByIdx(i);
if (toReg != REG_NA)
{
gcInfo.gcMarkRegPtrVal(toReg, type);
}
}
}
else if (tree->IsMultiRegLclVar())
{
assert(compiler->lvaEnregMultiRegVars);
GenTreeLclVar* lclNode = tree->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
unsigned regCount = varDsc->lvFieldCnt;
for (unsigned i = 0; i < regCount; i++)
{
if (!lclNode->IsLastUse(i))
{
regNumber reg = lclNode->GetRegByIndex(i);
if (reg != REG_NA)
{
var_types type = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i)->TypeGet();
gcInfo.gcMarkRegPtrVal(reg, type);
}
}
}
}
else
{
gcInfo.gcMarkRegPtrVal(tree->GetRegNum(), tree->TypeGet());
}
}
}
}
// transfer gc/byref status of src reg to dst reg
void CodeGen::genTransferRegGCState(regNumber dst, regNumber src)
{
regMaskTP srcMask = genRegMask(src);
regMaskTP dstMask = genRegMask(dst);
if (gcInfo.gcRegGCrefSetCur & srcMask)
{
gcInfo.gcMarkRegSetGCref(dstMask);
}
else if (gcInfo.gcRegByrefSetCur & srcMask)
{
gcInfo.gcMarkRegSetByref(dstMask);
}
else
{
gcInfo.gcMarkRegSetNpt(dstMask);
}
}
// generates an ip-relative call or indirect call via reg ('call reg')
// pass in 'addr' for a relative call or 'base' for a indirect register call
// methHnd - optional, only used for pretty printing
// retSize - emitter type of return for GC purposes, should be EA_BYREF, EA_GCREF, or EA_PTRSIZE(not GC)
//
// clang-format off
void CodeGen::genEmitCall(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
void* addr
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
regNumber base,
bool isJump)
{
#if !defined(TARGET_X86)
int argSize = 0;
#endif // !defined(TARGET_X86)
// This should have been put in volatile registers to ensure it does not
// get overridden by epilog sequence during tailcall.
noway_assert(!isJump || (base == REG_NA) || ((RBM_INT_CALLEE_TRASH & genRegMask(base)) != 0));
GetEmitter()->emitIns_Call(emitter::EmitCallType(callType),
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr,
argSize,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, base, REG_NA, 0, 0, isJump);
}
// clang-format on
// generates an indirect call via addressing mode (call []) given an indir node
// methHnd - optional, only used for pretty printing
// retSize - emitter type of return for GC purposes, should be EA_BYREF, EA_GCREF, or EA_PTRSIZE(not GC)
//
// clang-format off
void CodeGen::genEmitCallIndir(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
GenTreeIndir* indir
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
bool isJump)
{
#if !defined(TARGET_X86)
int argSize = 0;
#endif // !defined(TARGET_X86)
regNumber iReg = (indir->Base() != nullptr) ? indir->Base()->GetRegNum() : REG_NA;
regNumber xReg = (indir->Index() != nullptr) ? indir->Index()->GetRegNum() : REG_NA;
// These should have been put in volatile registers to ensure they do not
// get overridden by epilog sequence during tailcall.
noway_assert(!isJump || (iReg == REG_NA) || ((RBM_CALLEE_TRASH & genRegMask(iReg)) != 0));
noway_assert(!isJump || (xReg == REG_NA) || ((RBM_CALLEE_TRASH & genRegMask(xReg)) != 0));
GetEmitter()->emitIns_Call(emitter::EmitCallType(callType),
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
argSize,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di,
iReg,
xReg,
indir->Scale(),
indir->Offset(),
isJump);
}
// clang-format on
//------------------------------------------------------------------------
// genCodeForCast: Generates the code for GT_CAST.
//
// Arguments:
// tree - the GT_CAST node.
//
void CodeGen::genCodeForCast(GenTreeOp* tree)
{
assert(tree->OperIs(GT_CAST));
var_types targetType = tree->TypeGet();
if (varTypeIsFloating(targetType) && varTypeIsFloating(tree->gtOp1))
{
// Casts float/double <--> double/float
genFloatToFloatCast(tree);
}
else if (varTypeIsFloating(tree->gtOp1))
{
// Casts float/double --> int32/int64
genFloatToIntCast(tree);
}
else if (varTypeIsFloating(targetType))
{
// Casts int32/uint32/int64/uint64 --> float/double
genIntToFloatCast(tree);
}
#ifndef TARGET_64BIT
else if (varTypeIsLong(tree->gtOp1))
{
genLongToIntCast(tree);
}
#endif // !TARGET_64BIT
else
{
// Casts int <--> int
genIntToIntCast(tree->AsCast());
}
// The per-case functions call genProduceReg()
}
CodeGen::GenIntCastDesc::GenIntCastDesc(GenTreeCast* cast)
{
const var_types srcType = genActualType(cast->gtGetOp1()->TypeGet());
const bool srcUnsigned = cast->IsUnsigned();
const unsigned srcSize = genTypeSize(srcType);
const var_types castType = cast->gtCastType;
const bool castUnsigned = varTypeIsUnsigned(castType);
const unsigned castSize = genTypeSize(castType);
const var_types dstType = genActualType(cast->TypeGet());
const unsigned dstSize = genTypeSize(dstType);
const bool overflow = cast->gtOverflow();
assert((srcSize == 4) || (srcSize == genTypeSize(TYP_I_IMPL)));
assert((dstSize == 4) || (dstSize == genTypeSize(TYP_I_IMPL)));
assert(dstSize == genTypeSize(genActualType(castType)));
if (castSize < 4) // Cast to small int type
{
if (overflow)
{
m_checkKind = CHECK_SMALL_INT_RANGE;
m_checkSrcSize = srcSize;
// Since these are small int types we can compute the min and max
// values of the castType without risk of integer overflow.
const int castNumBits = (castSize * 8) - (castUnsigned ? 0 : 1);
m_checkSmallIntMax = (1 << castNumBits) - 1;
m_checkSmallIntMin = (castUnsigned | srcUnsigned) ? 0 : (-m_checkSmallIntMax - 1);
m_extendKind = COPY;
m_extendSrcSize = dstSize;
}
else
{
m_checkKind = CHECK_NONE;
// Casting to a small type really means widening from that small type to INT/LONG.
m_extendKind = castUnsigned ? ZERO_EXTEND_SMALL_INT : SIGN_EXTEND_SMALL_INT;
m_extendSrcSize = castSize;
}
}
#ifdef TARGET_64BIT
// castType cannot be (U)LONG on 32 bit targets, such casts should have been decomposed.
// srcType cannot be a small int type since it's the "actual type" of the cast operand.
// This means that widening casts do not occur on 32 bit targets.
else if (castSize > srcSize) // (U)INT to (U)LONG widening cast
{
assert((srcSize == 4) && (castSize == 8));
if (overflow && !srcUnsigned && castUnsigned)
{
// Widening from INT to ULONG, check if the value is positive
m_checkKind = CHECK_POSITIVE;
m_checkSrcSize = 4;
// This is the only overflow checking cast that requires changing the
// source value (by zero extending), all others copy the value as is.
assert((srcType == TYP_INT) && (castType == TYP_ULONG));
m_extendKind = ZERO_EXTEND_INT;
m_extendSrcSize = 4;
}
else
{
m_checkKind = CHECK_NONE;
m_extendKind = srcUnsigned ? ZERO_EXTEND_INT : SIGN_EXTEND_INT;
m_extendSrcSize = 4;
}
}
else if (castSize < srcSize) // (U)LONG to (U)INT narrowing cast
{
assert((srcSize == 8) && (castSize == 4));
if (overflow)
{
if (castUnsigned) // (U)LONG to UINT cast
{
m_checkKind = CHECK_UINT_RANGE;
}
else if (srcUnsigned) // ULONG to INT cast
{
m_checkKind = CHECK_POSITIVE_INT_RANGE;
}
else // LONG to INT cast
{
m_checkKind = CHECK_INT_RANGE;
}
m_checkSrcSize = 8;
}
else
{
m_checkKind = CHECK_NONE;
}
m_extendKind = COPY;
m_extendSrcSize = 4;
}
#endif
else // if (castSize == srcSize) // Sign changing or same type cast
{
assert(castSize == srcSize);
if (overflow && (srcUnsigned != castUnsigned))
{
m_checkKind = CHECK_POSITIVE;
m_checkSrcSize = srcSize;
}
else
{
m_checkKind = CHECK_NONE;
}
m_extendKind = COPY;
m_extendSrcSize = srcSize;
}
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// genStoreLongLclVar: Generate code to store a non-enregistered long lclVar
//
// Arguments:
// treeNode - A TYP_LONG lclVar node.
//
// Return Value:
// None.
//
// Assumptions:
// 'treeNode' must be a TYP_LONG lclVar node for a lclVar that has NOT been promoted.
// Its operand must be a GT_LONG node.
//
void CodeGen::genStoreLongLclVar(GenTree* treeNode)
{
emitter* emit = GetEmitter();
GenTreeLclVarCommon* lclNode = treeNode->AsLclVarCommon();
unsigned lclNum = lclNode->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
assert(varDsc->TypeGet() == TYP_LONG);
assert(!varDsc->lvPromoted);
GenTree* op1 = treeNode->AsOp()->gtOp1;
// A GT_LONG is always contained, so it cannot have RELOAD or COPY inserted between it and its consumer,
// but a MUL_LONG may.
noway_assert(op1->OperIs(GT_LONG) || op1->gtSkipReloadOrCopy()->OperIs(GT_MUL_LONG));
genConsumeRegs(op1);
if (op1->OperGet() == GT_LONG)
{
GenTree* loVal = op1->gtGetOp1();
GenTree* hiVal = op1->gtGetOp2();
noway_assert((loVal->GetRegNum() != REG_NA) && (hiVal->GetRegNum() != REG_NA));
emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, loVal->GetRegNum(), lclNum, 0);
emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, hiVal->GetRegNum(), lclNum, genTypeSize(TYP_INT));
}
else
{
assert((op1->gtSkipReloadOrCopy()->gtFlags & GTF_MUL_64RSLT) != 0);
// This is either a multi-reg MUL_LONG, or a multi-reg reload or copy.
assert(op1->IsMultiRegNode() && (op1->GetMultiRegCount(compiler) == 2));
// Stack store
emit->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), op1->GetRegByIndex(0), lclNum, 0);
emit->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), op1->GetRegByIndex(1), lclNum,
genTypeSize(TYP_INT));
}
}
#endif // !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// genCodeForJumpTrue: Generate code for a GT_JTRUE node.
//
// Arguments:
// jtrue - The node
//
void CodeGen::genCodeForJumpTrue(GenTreeOp* jtrue)
{
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
assert(jtrue->OperIs(GT_JTRUE));
GenTreeOp* relop = jtrue->gtGetOp1()->AsOp();
GenCondition condition = GenCondition::FromRelop(relop);
if (condition.PreferSwap())
{
condition = GenCondition::Swap(condition);
}
#if defined(TARGET_XARCH)
if ((condition.GetCode() == GenCondition::FNEU) &&
(relop->gtGetOp1()->GetRegNum() == relop->gtGetOp2()->GetRegNum()) &&
!relop->gtGetOp1()->isUsedFromSpillTemp() && !relop->gtGetOp2()->isUsedFromSpillTemp())
{
// For floating point, `x != x` is a common way of
// checking for NaN. So, in the case where both
// operands are the same, we can optimize codegen
// to only do a single check.
condition = GenCondition(GenCondition::P);
}
if (relop->MarkedForSignJumpOpt())
{
// If relop was previously marked for a signed jump check optimization because of SF flag
// reuse, replace jge/jl with jns/js.
assert(relop->OperGet() == GT_LT || relop->OperGet() == GT_GE);
condition = (relop->OperGet() == GT_LT) ? GenCondition(GenCondition::S) : GenCondition(GenCondition::NS);
}
#endif
inst_JCC(condition, compiler->compCurBB->bbJumpDest);
}
//------------------------------------------------------------------------
// genCodeForJcc: Generate code for a GT_JCC node.
//
// Arguments:
// jcc - The node
//
void CodeGen::genCodeForJcc(GenTreeCC* jcc)
{
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
assert(jcc->OperIs(GT_JCC));
inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest);
}
//------------------------------------------------------------------------
// inst_JCC: Generate a conditional branch instruction sequence.
//
// Arguments:
// condition - The branch condition
// target - The basic block to jump to when the condition is true
//
void CodeGen::inst_JCC(GenCondition condition, BasicBlock* target)
{
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
if (desc.oper == GT_NONE)
{
inst_JMP(desc.jumpKind1, target);
}
else if (desc.oper == GT_OR)
{
inst_JMP(desc.jumpKind1, target);
inst_JMP(desc.jumpKind2, target);
}
else // if (desc.oper == GT_AND)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP(emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_JMP(desc.jumpKind2, target);
genDefineTempLabel(labelNext);
}
}
//------------------------------------------------------------------------
// genCodeForSetcc: Generate code for a GT_SETCC node.
//
// Arguments:
// setcc - The node
//
void CodeGen::genCodeForSetcc(GenTreeCC* setcc)
{
assert(setcc->OperIs(GT_SETCC));
inst_SETCC(setcc->gtCondition, setcc->TypeGet(), setcc->GetRegNum());
genProduceReg(setcc);
}
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/codegenxarch.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Amd64/x86 Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#pragma warning(disable : 4310) // cast truncates constant value - happens for (int8_t)0xb1
#endif
#ifdef TARGET_XARCH
#include "emit.h"
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
#include "patchpointinfo.h"
//---------------------------------------------------------------------
// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog.
//
// Arguments:
// initReg - register to use as a scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
{
return;
}
if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
{
// Security cookie is on original frame and was initialized there.
return;
}
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
#ifdef TARGET_AMD64
if ((size_t)(int)compiler->gsGlobalSecurityCookieVal != compiler->gsGlobalSecurityCookieVal)
{
// initReg = #GlobalSecurityCookieVal64; [frame.GSSecurityCookie] = initReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
*pInitRegZeroed = false;
}
else
#endif
{
// mov dword ptr [frame.GSSecurityCookie], #GlobalSecurityCookieVal
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
else
{
// Always use EAX on x86 and x64
// On x64, if we're not moving into RAX, and the address isn't RIP relative, we can't encode it.
// mov eax, dword ptr [compiler->gsGlobalSecurityCookieAddr]
// mov dword ptr [frame.GSSecurityCookie], eax
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_EAX, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
regSet.verifyRegUsed(REG_EAX);
GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, REG_EAX, compiler->lvaGSSecurityCookie, 0);
if (initReg == REG_EAX)
{
*pInitRegZeroed = false;
}
}
}
/*****************************************************************************
*
* Generate code to check that the GS cookie wasn't thrashed by a buffer
* overrun. If pushReg is true, preserve all registers around code sequence.
* Otherwise ECX could be modified.
*
* Implementation Note: pushReg = true, in case of tail calls.
*/
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that EAX is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by EAX.
//
// For Amd64 System V, a two-register-returned struct could be returned in RAX and RDX
// In such case make sure that the correct GC-ness of RDX is reported as well, so
// a GC object pointed by RDX will not be collected.
if (!pushReg)
{
// Handle multi-reg return type values
if (compiler->compMethodReturnsMultiRegRetType())
{
ReturnTypeDesc retTypeDesc;
if (varTypeIsLong(compiler->info.compRetNativeType))
{
retTypeDesc.InitializeLongReturnType();
}
else // we must have a struct return type
{
retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass,
compiler->info.compCallConv);
}
const unsigned regCount = retTypeDesc.GetReturnRegCount();
// Only x86 and x64 Unix ABI allows multi-reg return and
// number of result regs should be equal to MAX_RET_REG_COUNT.
assert(regCount == MAX_RET_REG_COUNT);
for (unsigned i = 0; i < regCount; ++i)
{
gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
}
}
else if (compiler->compMethodReturnsRetBufAddr())
{
// This is for returning in an implicit RetBuf.
// If the address of the buffer is returned in REG_INTRET, mark the content of INTRET as ByRef.
// In case the return is in an implicit RetBuf, the native return type should be a struct
assert(varTypeIsStruct(compiler->info.compRetNativeType));
gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
}
// ... all other cases.
else
{
#ifdef TARGET_AMD64
// For x64, structs that are not returned in registers are always
// returned in implicit RetBuf. If we reached here, we should not have
// a RetBuf and the return type should not be a struct.
assert(compiler->info.compRetBuffArg == BAD_VAR_NUM);
assert(!varTypeIsStruct(compiler->info.compRetNativeType));
#endif // TARGET_AMD64
// For x86 Windows we can't make such assertions since we generate code for returning of
// the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise
// compRetNativeType could be TYP_STRUCT.
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
}
}
regNumber regGSCheck;
regMaskTP regMaskGSCheck = RBM_NONE;
if (!pushReg)
{
// Non-tail call: we can use any callee trash register that is not
// a return register or contain 'this' pointer (keep alive this), since
// we are generating GS cookie check after a GT_RETURN block.
// Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
// as return register for two-register-returned structs.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaGetDesc(compiler->info.compThisArg)->lvIsInReg() &&
(compiler->lvaGetDesc(compiler->info.compThisArg)->GetRegNum() == REG_ARG_0))
{
regGSCheck = REG_ARG_1;
}
else
{
regGSCheck = REG_ARG_0;
}
}
else
{
#ifdef TARGET_X86
// It doesn't matter which register we pick, since we're going to save and restore it
// around the check.
// TODO-CQ: Can we optimize the choice of register to avoid doing the push/pop sometimes?
regGSCheck = REG_EAX;
regMaskGSCheck = RBM_EAX;
#else // !TARGET_X86
// Jmp calls: specify method handle using which JIT queries VM for its entry point
// address and hence it can neither be a VSD call nor PInvoke calli with cookie
// parameter. Therefore, in case of jmp calls it is safe to use R11.
regGSCheck = REG_R11;
#endif // !TARGET_X86
}
regMaskTP byrefPushedRegs = RBM_NONE;
regMaskTP norefPushedRegs = RBM_NONE;
regMaskTP pushedRegs = RBM_NONE;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
#if defined(TARGET_AMD64)
// If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'.
// Otherwise, load the value into a reg and use 'cmp mem64, reg64'.
if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, regGSCheck, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
else
#endif // defined(TARGET_AMD64)
{
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
else
{
// Ngen case - GS cookie value needs to be accessed through an indirection.
pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_je, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
genPopRegs(pushedRegs, byrefPushedRegs, norefPushedRegs);
}
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
#if defined(FEATURE_EH_FUNCLETS)
// Generate a call to the finally, like this:
// mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
// call finally-funclet
// jmp finally-return // Only for non-retless finally calls
// The jmp can be a NOP if we're going to the next block.
// If we're generating code for the main function (not a funclet), and there is no localloc,
// then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP
// instead of loading the PSPSym in this case, or if PSPSym is not used (CoreRT ABI).
if ((compiler->lvaPSPSym == BAD_VAR_NUM) ||
(!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT)))
{
#ifndef UNIX_X86_ABI
inst_Mov(TYP_I_IMPL, REG_ARG_0, REG_SPBASE, /* canSkip */ false);
#endif // !UNIX_X86_ABI
}
else
{
GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
}
GetEmitter()->emitIns_J(INS_call, block->bbJumpDest);
if (block->bbFlags & BBF_RETLESS_CALL)
{
// We have a retless call, and the last instruction generated was a call.
// If the next block is in a different EH region (or is the end of the code
// block), then we need to generate a breakpoint here (since it will never
// get executed) to get proper unwind behavior.
if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
instGen(INS_BREAKPOINT); // This should never get executed
}
}
else
{
// TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other
// architectures?
#ifndef JIT32_GCENCODER
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
GetEmitter()->emitDisableGC();
#endif // JIT32_GCENCODER
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
{
// Fall-through.
// TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
// to the next instruction? This would depend on stack walking from within the finally
// handler working without this instruction being in this special EH region.
instGen(INS_nop);
}
else
{
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
#ifndef JIT32_GCENCODER
GetEmitter()->emitEnableGC();
#endif // JIT32_GCENCODER
}
#else // !FEATURE_EH_FUNCLETS
// If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
// corresponding to the finally's nesting level. When invoked in response to an exception, the
// EE does this.
//
// We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
//
// We will emit :
// mov [ebp - (n + 1)], 0
// mov [ebp - n ], 0xFC
// push &step
// jmp finallyBlock
// ...
// step:
// mov [ebp - n ], 0
// jmp leaveTarget
// ...
// leaveTarget:
noway_assert(isFramePointerUsed());
// Get the nesting level which contains the finally
unsigned finallyNesting = 0;
compiler->fgGetNestingLevel(block, &finallyNesting);
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
unsigned curNestingSlotOffs;
curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
// Zero out the slot for the next nesting level
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar,
curNestingSlotOffs - TARGET_POINTER_SIZE, 0);
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, LCL_FINALLY_MARK);
// Now push the address where the finally funclet should return to directly.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
GetEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
}
else
{
// EE expects a DWORD, so we provide 0
inst_IV(INS_push_hide, 0);
}
// Jump to the finally BB
inst_JMP(EJ_jmp, block->bbJumpDest);
#endif // !FEATURE_EH_FUNCLETS
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
// block is RETLESS.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
block = block->bbNext;
}
return block;
}
#if defined(FEATURE_EH_FUNCLETS)
void CodeGen::genEHCatchRet(BasicBlock* block)
{
// Set RAX to the address the VM should return to after the catch.
// Generate a RIP-relative
// lea reg, [rip + disp32] ; the RIP is implicit
// which will be position-independent.
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
}
#else // !FEATURE_EH_FUNCLETS
void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block)
{
// The last statement of the block must be a GT_RETFILT, which has already been generated.
assert(block->lastNode() != nullptr);
assert(block->lastNode()->OperGet() == GT_RETFILT);
if (block->bbJumpKind == BBJ_EHFINALLYRET)
{
assert(block->lastNode()->AsOp()->gtOp1 == nullptr); // op1 == nullptr means endfinally
// Return using a pop-jmp sequence. As the "try" block calls
// the finally with a jmp, this leaves the x86 call-ret stack
// balanced in the normal flow of path.
noway_assert(isFramePointerRequired());
inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL);
inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL);
}
else
{
assert(block->bbJumpKind == BBJ_EHFILTERRET);
// The return value has already been computed.
instGen_Return(0);
}
}
#endif // !FEATURE_EH_FUNCLETS
// Move an immediate value into an integer register
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags))
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
emitAttr origAttr = size;
if (!compiler->opts.compReloc)
{
// Strip any reloc flags from size if we aren't doing relocs
size = EA_REMOVE_FLG(size, EA_CNS_RELOC_FLG | EA_DSP_RELOC_FLG);
}
if ((imm == 0) && !EA_IS_RELOC(size))
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
// Only use lea if the original was relocatable. Otherwise we can get spurious
// instruction selection due to different memory placement at runtime.
if (EA_IS_RELOC(origAttr) && genDataIndirAddrCanBeEncodedAsPCRelOffset(imm))
{
// We will use lea so displacement and not immediate will be relocatable
size = EA_SET_FLG(EA_REMOVE_FLG(size, EA_CNS_RELOC_FLG), EA_DSP_RELOC_FLG);
GetEmitter()->emitIns_R_AI(INS_lea, size, reg, imm);
}
else
{
GetEmitter()->emitIns_R_I(INS_mov, size, reg, imm DEBUGARG(gtFlags));
}
}
regSet.verifyRegUsed(reg);
}
/***********************************************************************************
*
* Generate code to set a register 'targetReg' of type 'targetType' to the constant
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree)
{
switch (tree->gtOper)
{
case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
// Currently this cannot be done for all handles due to
// https://github.com/dotnet/runtime/issues/60712. However, it is
// also unclear whether we unconditionally want to use rip-relative
// lea instructions when not necessary. While a mov is larger, on
// many Intel CPUs rip-relative lea instructions have higher
// latency.
if (con->ImmedValNeedsReloc(compiler))
{
attr = EA_SET_FLG(attr, EA_CNS_RELOC_FLG);
}
if (targetType == TYP_BYREF)
{
attr = EA_SET_FLG(attr, EA_BYREF_FLG);
}
instGen_Set_Reg_To_Imm(attr, targetReg, cnsVal, INS_FLAGS_DONT_CARE DEBUGARG(0) DEBUGARG(tree->gtFlags));
regSet.verifyRegUsed(targetReg);
}
break;
case GT_CNS_DBL:
{
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(targetType);
double constValue = tree->AsDblCon()->gtDconVal;
// Make sure we use "xorps reg, reg" only for +ve zero constant (0.0) and not for -ve zero (-0.0)
if (*(__int64*)&constValue == 0)
{
// A faster/smaller way to generate 0
emit->emitIns_R_R(INS_xorps, size, targetReg, targetReg);
}
else
{
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(constValue, size);
emit->emitIns_R_C(ins_Load(targetType), size, targetReg, hnd, 0);
}
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForNegNot(GenTree* tree)
{
assert(tree->OperIs(GT_NEG, GT_NOT));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
if (varTypeIsFloating(targetType))
{
assert(tree->gtOper == GT_NEG);
genSSE2BitwiseOp(tree);
}
else
{
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
inst_RV(ins, targetReg, targetType);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForBswap: Produce code for a GT_BSWAP / GT_BSWAP16 node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForBswap(GenTree* tree)
{
// TODO: If we're swapping immediately after a read from memory or immediately before
// a write to memory, use the MOVBE instruction instead of the BSWAP instruction if
// the platform supports it.
assert(tree->OperIs(GT_BSWAP, GT_BSWAP16));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
if (tree->OperIs(GT_BSWAP))
{
// 32-bit and 64-bit byte swaps use "bswap reg"
inst_RV(INS_bswap, targetReg, targetType);
}
else
{
// 16-bit byte swaps use "ror reg.16, 8"
inst_RV_IV(INS_ror_N, targetReg, 8 /* val */, emitAttr::EA_2BYTE);
}
genProduceReg(tree);
}
// Produce code for a GT_INC_SATURATE node.
void CodeGen::genCodeForIncSaturate(GenTree* tree)
{
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
inst_RV_IV(INS_add, targetReg, 1, emitActualTypeSize(targetType));
inst_RV_IV(INS_sbb, targetReg, 0, emitActualTypeSize(targetType));
genProduceReg(tree);
}
// Generate code to get the high N bits of a N*N=2N bit multiplication result
void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
assert(!treeNode->gtOverflowEx());
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(treeNode);
GenTree* op1 = treeNode->AsOp()->gtOp1;
GenTree* op2 = treeNode->AsOp()->gtOp2;
// to get the high bits of the multiply, we are constrained to using the
// 1-op form: RDX:RAX = RAX * rm
// The 3-op form (Rx=Ry*Rz) does not support it.
genConsumeOperands(treeNode->AsOp());
GenTree* regOp = op1;
GenTree* rmOp = op2;
// Set rmOp to the memory operand (if any)
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == REG_RAX)))
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, REG_RAX, regOp->GetRegNum(), /* canSkip */ true);
instruction ins;
if ((treeNode->gtFlags & GTF_UNSIGNED) == 0)
{
ins = INS_imulEAX;
}
else
{
ins = INS_mulEAX;
}
emit->emitInsBinary(ins, size, treeNode, rmOp);
// Move the result to the desired register, if necessary
if (treeNode->OperGet() == GT_MULHI)
{
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
genProduceReg(treeNode);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genCodeForLongUMod: Generate code for a tree of the form
// `(umod (gt_long x y) (const int))`
//
// Arguments:
// node - the node for which to generate code
//
void CodeGen::genCodeForLongUMod(GenTreeOp* node)
{
assert(node != nullptr);
assert(node->OperGet() == GT_UMOD);
assert(node->TypeGet() == TYP_INT);
GenTreeOp* const dividend = node->gtOp1->AsOp();
assert(dividend->OperGet() == GT_LONG);
assert(varTypeIsLong(dividend));
genConsumeOperands(node);
GenTree* const dividendLo = dividend->gtOp1;
GenTree* const dividendHi = dividend->gtOp2;
assert(dividendLo->isUsedFromReg());
assert(dividendHi->isUsedFromReg());
GenTree* const divisor = node->gtOp2;
assert(divisor->gtSkipReloadOrCopy()->OperGet() == GT_CNS_INT);
assert(divisor->gtSkipReloadOrCopy()->isUsedFromReg());
assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal >= 2);
assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal <= 0x3fffffff);
// dividendLo must be in RAX; dividendHi must be in RDX
genCopyRegIfNeeded(dividendLo, REG_EAX);
genCopyRegIfNeeded(dividendHi, REG_EDX);
// At this point, EAX:EDX contains the 64bit dividend and op2->GetRegNum()
// contains the 32bit divisor. We want to generate the following code:
//
// cmp edx, divisor->GetRegNum()
// jb noOverflow
//
// mov temp, eax
// mov eax, edx
// xor edx, edx
// div divisor->GetRegNum()
// mov eax, temp
//
// noOverflow:
// div divisor->GetRegNum()
//
// This works because (a * 2^32 + b) % c = ((a % c) * 2^32 + b) % c.
BasicBlock* const noOverflow = genCreateTempLabel();
// cmp edx, divisor->GetRegNum()
// jb noOverflow
inst_RV_RV(INS_cmp, REG_EDX, divisor->GetRegNum());
inst_JMP(EJ_jb, noOverflow);
// mov temp, eax
// mov eax, edx
// xor edx, edx
// div divisor->GetRegNum()
// mov eax, temp
const regNumber tempReg = node->GetSingleTempReg();
inst_Mov(TYP_INT, tempReg, REG_EAX, /* canSkip */ false);
inst_Mov(TYP_INT, REG_EAX, REG_EDX, /* canSkip */ false);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
inst_RV(INS_div, divisor->GetRegNum(), TYP_INT);
inst_Mov(TYP_INT, REG_EAX, tempReg, /* canSkip */ false);
// noOverflow:
// div divisor->GetRegNum()
genDefineTempLabel(noOverflow);
inst_RV(INS_div, divisor->GetRegNum(), TYP_INT);
const regNumber targetReg = node->GetRegNum();
inst_Mov(TYP_INT, targetReg, REG_RDX, /* canSkip */ true);
genProduceReg(node);
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// genCodeForDivMod: Generate code for a DIV or MOD operation.
//
// Arguments:
// treeNode - the node to generate the code for
//
void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
{
assert(treeNode->OperIs(GT_DIV, GT_UDIV, GT_MOD, GT_UMOD));
GenTree* dividend = treeNode->gtOp1;
#ifdef TARGET_X86
if (varTypeIsLong(dividend->TypeGet()))
{
genCodeForLongUMod(treeNode);
return;
}
#endif // TARGET_X86
GenTree* divisor = treeNode->gtOp2;
genTreeOps oper = treeNode->OperGet();
emitAttr size = emitTypeSize(treeNode);
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
// Node's type must be int/native int, small integer types are not
// supported and floating point types are handled by genCodeForBinary.
assert(varTypeIsIntOrI(targetType));
// dividend is in a register.
assert(dividend->isUsedFromReg());
genConsumeOperands(treeNode->AsOp());
// dividend must be in RAX
genCopyRegIfNeeded(dividend, REG_RAX);
// zero or sign extend rax to rdx
if (oper == GT_UMOD || oper == GT_UDIV ||
(dividend->IsIntegralConst() && (dividend->AsIntConCommon()->IconValue() > 0)))
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
}
else
{
emit->emitIns(INS_cdq, size);
// the cdq instruction writes RDX, So clear the gcInfo for RDX
gcInfo.gcMarkRegSetNpt(RBM_RDX);
}
// Perform the 'targetType' (64-bit or 32-bit) divide instruction
instruction ins;
if (oper == GT_UMOD || oper == GT_UDIV)
{
ins = INS_div;
}
else
{
ins = INS_idiv;
}
emit->emitInsBinary(ins, size, treeNode, divisor);
// DIV/IDIV instructions always store the quotient in RAX and the remainder in RDX.
// Move the result to the desired register, if necessary
if (oper == GT_DIV || oper == GT_UDIV)
{
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
}
else
{
assert((oper == GT_MOD) || (oper == GT_UMOD));
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForBinary: Generate code for many binary arithmetic operators
//
// Arguments:
// treeNode - The binary operation for which we are generating code.
//
// Return Value:
// None.
//
// Notes:
// Integer MUL and DIV variants have special constraints on x64 so are not handled here.
// See the assert below for the operators that are handled.
void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
#ifdef DEBUG
bool isValidOper = treeNode->OperIs(GT_ADD, GT_SUB);
if (varTypeIsFloating(treeNode->TypeGet()))
{
isValidOper |= treeNode->OperIs(GT_MUL, GT_DIV);
}
else
{
isValidOper |= treeNode->OperIs(GT_AND, GT_OR, GT_XOR);
#ifndef TARGET_64BIT
isValidOper |= treeNode->OperIs(GT_ADD_LO, GT_ADD_HI, GT_SUB_LO, GT_SUB_HI);
#endif
}
assert(isValidOper);
#endif
genConsumeOperands(treeNode);
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
// Commutative operations can mark op1 as contained or reg-optional to generate "op reg, memop/immed"
if (!op1->isUsedFromReg())
{
assert(treeNode->OperIsCommutative());
assert(op1->isMemoryOp() || op1->IsLocal() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() ||
op1->IsRegOptional());
op1 = treeNode->gtGetOp2();
op2 = treeNode->gtGetOp1();
}
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
noway_assert(targetReg != REG_NA);
regNumber op1reg = op1->isUsedFromReg() ? op1->GetRegNum() : REG_NA;
regNumber op2reg = op2->isUsedFromReg() ? op2->GetRegNum() : REG_NA;
if (varTypeIsFloating(treeNode->TypeGet()))
{
// floating-point addition, subtraction, multiplication, and division
// all have RMW semantics if VEX support is not available
bool isRMW = !compiler->canUseVexEncoding();
inst_RV_RV_TT(ins, emitTypeSize(treeNode), targetReg, op1reg, op2, isRMW);
genProduceReg(treeNode);
return;
}
GenTree* dst;
GenTree* src;
// This is the case of reg1 = reg1 op reg2
// We're ready to emit the instruction without any moves
if (op1reg == targetReg)
{
dst = op1;
src = op2;
}
// We have reg1 = reg2 op reg1
// In order for this operation to be correct
// we need that op is a commutative operation so
// we can convert it into reg1 = reg1 op reg2 and emit
// the same code as above
else if (op2reg == targetReg)
{
noway_assert(GenTree::OperIsCommutative(oper));
dst = op2;
src = op1;
}
// now we know there are 3 different operands so attempt to use LEA
else if (oper == GT_ADD && !varTypeIsFloating(treeNode) && !treeNode->gtOverflowEx() // LEA does not set flags
&& (op2->isContainedIntOrIImmed() || op2->isUsedFromReg()) && !treeNode->gtSetFlags())
{
if (op2->isContainedIntOrIImmed())
{
emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
(int)op2->AsIntConCommon()->IconValue());
}
else
{
assert(op2reg != REG_NA);
emit->emitIns_R_ARX(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, op2reg, 1, 0);
}
genProduceReg(treeNode);
return;
}
// dest, op1 and op2 registers are different:
// reg3 = reg1 op reg2
// We can implement this by issuing a mov:
// reg3 = reg1
// reg3 = reg3 op reg2
else
{
var_types op1Type = op1->TypeGet();
inst_Mov(op1Type, targetReg, op1reg, /* canSkip */ false);
regSet.verifyRegUsed(targetReg);
gcInfo.gcMarkRegPtrVal(targetReg, op1Type);
dst = treeNode;
src = op2;
}
// try to use an inc or dec
if (oper == GT_ADD && !varTypeIsFloating(treeNode) && src->isContainedIntOrIImmed() && !treeNode->gtOverflowEx())
{
if (src->IsIntegralConst(1))
{
emit->emitIns_R(INS_inc, emitTypeSize(treeNode), targetReg);
genProduceReg(treeNode);
return;
}
else if (src->IsIntegralConst(-1))
{
emit->emitIns_R(INS_dec, emitTypeSize(treeNode), targetReg);
genProduceReg(treeNode);
return;
}
}
regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
noway_assert(r == targetReg);
if (treeNode->gtOverflowEx())
{
#if !defined(TARGET_64BIT)
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI);
#else
assert(oper == GT_ADD || oper == GT_SUB);
#endif
genCheckOverflow(treeNode);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForMul: Generate code for a MUL operation.
//
// Arguments:
// treeNode - the node to generate the code for
//
void CodeGen::genCodeForMul(GenTreeOp* treeNode)
{
assert(treeNode->OperIs(GT_MUL));
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
// Node's type must be int or long (only on x64), small integer types are not
// supported and floating point types are handled by genCodeForBinary.
assert(varTypeIsIntOrI(targetType));
instruction ins;
emitAttr size = emitTypeSize(treeNode);
bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
bool requiresOverflowCheck = treeNode->gtOverflowEx();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
// there are 3 forms of x64 multiply:
// 1-op form with 128 result: RDX:RAX = RAX * rm
// 2-op form: reg *= rm
// 3-op form: reg = rm * imm
genConsumeOperands(treeNode);
// This matches the 'mul' lowering in Lowering::SetMulOpCounts()
//
// immOp :: Only one operand can be an immediate
// rmOp :: Only one operand can be a memory op.
// regOp :: A register op (especially the operand that matches 'targetReg')
// (can be nullptr when we have both a memory op and an immediate op)
GenTree* immOp = nullptr;
GenTree* rmOp = op1;
GenTree* regOp;
if (op2->isContainedIntOrIImmed())
{
immOp = op2;
}
else if (op1->isContainedIntOrIImmed())
{
immOp = op1;
rmOp = op2;
}
if (immOp != nullptr)
{
// CQ: When possible use LEA for mul by imm 3, 5 or 9
ssize_t imm = immOp->AsIntConCommon()->IconValue();
if (!requiresOverflowCheck && rmOp->isUsedFromReg() && ((imm == 3) || (imm == 5) || (imm == 9)))
{
// We will use the LEA instruction to perform this multiply
// Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
unsigned int scale = (unsigned int)(imm - 1);
GetEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->GetRegNum(), rmOp->GetRegNum(), scale, 0);
}
else if (!requiresOverflowCheck && rmOp->isUsedFromReg() && (imm == genFindLowestBit(imm)) && (imm != 0))
{
// Use shift for constant multiply when legal
uint64_t zextImm = static_cast<uint64_t>(static_cast<size_t>(imm));
unsigned int shiftAmount = genLog2(zextImm);
// Copy reg src to dest register
inst_Mov(targetType, targetReg, rmOp->GetRegNum(), /* canSkip */ true);
inst_RV_SH(INS_shl, size, targetReg, shiftAmount);
}
else
{
// use the 3-op form with immediate
ins = GetEmitter()->inst3opImulForReg(targetReg);
emit->emitInsBinary(ins, size, rmOp, immOp);
}
}
else // we have no contained immediate operand
{
regOp = op1;
rmOp = op2;
regNumber mulTargetReg = targetReg;
if (isUnsignedMultiply && requiresOverflowCheck)
{
ins = INS_mulEAX;
mulTargetReg = REG_RAX;
}
else
{
ins = INS_imul;
}
// Set rmOp to the memory operand (if any)
// or set regOp to the op2 when it has the matching target register for our multiply op
//
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == mulTargetReg)))
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, mulTargetReg, regOp->GetRegNum(), /* canSkip */ true);
emit->emitInsBinary(ins, size, treeNode, rmOp);
// Move the result to the desired register, if necessary
if (ins == INS_mulEAX)
{
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
}
}
if (requiresOverflowCheck)
{
// Overflow checking is only used for non-floating point types
noway_assert(!varTypeIsFloating(treeNode));
genCheckOverflow(treeNode);
}
genProduceReg(treeNode);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDSplitReturn: Generates code for returning a fixed-size SIMD type that lives
// in a single register, but is returned in multiple registers.
//
// Arguments:
// src - The source of the return
// retTypeDesc - The return type descriptor.
//
void CodeGen::genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc)
{
assert(varTypeIsSIMD(src));
assert(src->isUsedFromReg());
// This is a case of operand is in a single reg and needs to be
// returned in multiple ABI return registers.
regNumber opReg = src->GetRegNum();
regNumber reg0 = retTypeDesc->GetABIReturnReg(0);
regNumber reg1 = retTypeDesc->GetABIReturnReg(1);
assert((reg0 != REG_NA) && (reg1 != REG_NA) && (opReg != REG_NA));
const bool srcIsFloatReg = genIsValidFloatReg(opReg);
const bool dstIsFloatReg = genIsValidFloatReg(reg0);
assert(srcIsFloatReg);
#ifdef TARGET_AMD64
assert(src->TypeIs(TYP_SIMD16));
assert(srcIsFloatReg == dstIsFloatReg);
if (opReg != reg0 && opReg != reg1)
{
// Operand reg is different from return regs.
// Copy opReg to reg0 and let it to be handled by one of the
// two cases below.
inst_Mov(TYP_SIMD16, reg0, opReg, /* canSkip */ false);
opReg = reg0;
}
if (opReg == reg0)
{
assert(opReg != reg1);
// reg1 = opReg.
inst_Mov(TYP_SIMD16, reg1, opReg, /* canSkip */ false);
}
else
{
assert(opReg == reg1);
// reg0 = opReg.
inst_Mov(TYP_SIMD16, reg0, opReg, /* canSkip */ false);
}
// reg0 - already has required 8-byte in bit position [63:0].
// swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, reg1, reg1, 0x01);
#else // TARGET_X86
assert(src->TypeIs(TYP_SIMD8));
assert(srcIsFloatReg != dstIsFloatReg);
assert((reg0 == REG_EAX) && (reg1 == REG_EDX));
// reg0 = opReg[31:0]
inst_Mov(TYP_INT, reg0, opReg, /* canSkip */ false);
// reg1 = opRef[61:32]
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
inst_RV_TT_IV(INS_pextrd, EA_4BYTE, reg1, src, 1);
}
else
{
int8_t shuffleMask = 1; // we only need [61:32]->[31:0], the rest is not read.
inst_RV_TT_IV(INS_pshufd, EA_8BYTE, opReg, src, shuffleMask);
inst_Mov(TYP_INT, reg1, opReg, /* canSkip */ false);
}
#endif // TARGET_X86
}
#endif // FEATURE_SIMD
#if defined(TARGET_X86)
//------------------------------------------------------------------------
// genFloatReturn: Generates code for float return statement for x86.
//
// Note: treeNode's and op1's registers are already consumed.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node with float type.
//
// Return Value:
// None
//
void CodeGen::genFloatReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
assert(varTypeIsFloating(treeNode));
GenTree* op1 = treeNode->gtGetOp1();
// Spill the return value register from an XMM register to the stack, then load it on the x87 stack.
// If it already has a home location, use that. Otherwise, we need a temp.
if (genIsRegCandidateLocal(op1) && compiler->lvaGetDesc(op1->AsLclVarCommon())->lvOnFrame)
{
if (compiler->lvaGetDesc(op1->AsLclVarCommon())->GetRegNum() != REG_STK)
{
op1->gtFlags |= GTF_SPILL;
inst_TT_RV(ins_Store(op1->gtType, compiler->isSIMDTypeLocalAligned(op1->AsLclVarCommon()->GetLclNum())),
emitTypeSize(op1->TypeGet()), op1, op1->GetRegNum());
}
// Now, load it to the fp stack.
GetEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->GetLclNum(), 0);
}
else
{
// Spill the value, which should be in a register, then load it to the fp stack.
// TODO-X86-CQ: Deal with things that are already in memory (don't call genConsumeReg yet).
op1->gtFlags |= GTF_SPILL;
regSet.rsSpillTree(op1->GetRegNum(), op1);
op1->gtFlags |= GTF_SPILLED;
op1->gtFlags &= ~GTF_SPILL;
TempDsc* t = regSet.rsUnspillInPlace(op1, op1->GetRegNum());
inst_FS_ST(INS_fld, emitActualTypeSize(op1->gtType), t, 0);
op1->gtFlags &= ~GTF_SPILLED;
regSet.tmpRlsTemp(t);
}
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE/GT_CMP node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
assert(tree->OperIs(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE, GT_CMP));
// TODO-XArch-CQ: Check if we can use the currently set flags.
// TODO-XArch-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
GenTree* op1 = tree->gtOp1;
var_types op1Type = op1->TypeGet();
if (varTypeIsFloating(op1Type))
{
genCompareFloat(tree);
}
else
{
genCompareInt(tree);
}
}
//------------------------------------------------------------------------
// genCodeForBT: Generates code for a GT_BT node.
//
// Arguments:
// tree - The node.
//
void CodeGen::genCodeForBT(GenTreeOp* bt)
{
assert(bt->OperIs(GT_BT));
GenTree* op1 = bt->gtGetOp1();
GenTree* op2 = bt->gtGetOp2();
var_types type = genActualType(op1->TypeGet());
assert(op1->isUsedFromReg() && op2->isUsedFromReg());
assert((genTypeSize(type) >= genTypeSize(TYP_INT)) && (genTypeSize(type) <= genTypeSize(TYP_I_IMPL)));
genConsumeOperands(bt);
// Note that the emitter doesn't fully support INS_bt, it only supports the reg,reg
// form and encodes the registers in reverse order. To get the correct order we need
// to reverse the operands when calling emitIns_R_R.
GetEmitter()->emitIns_R_R(INS_bt, emitTypeSize(type), op2->GetRegNum(), op1->GetRegNum());
}
// clang-format off
const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32]
{
{ }, // NONE
{ }, // 1
{ EJ_jl }, // SLT
{ EJ_jle }, // SLE
{ EJ_jge }, // SGE
{ EJ_jg }, // SGT
{ EJ_js }, // S
{ EJ_jns }, // NS
{ EJ_je }, // EQ
{ EJ_jne }, // NE
{ EJ_jb }, // ULT
{ EJ_jbe }, // ULE
{ EJ_jae }, // UGE
{ EJ_ja }, // UGT
{ EJ_jb }, // C
{ EJ_jae }, // NC
// Floating point compare instructions (UCOMISS, UCOMISD etc.) set the condition flags as follows:
// ZF PF CF Meaning
// ---------------------
// 1 1 1 Unordered
// 0 0 0 Greater
// 0 0 1 Less Than
// 1 0 0 Equal
//
// Since ZF and CF are also set when the result is unordered, in some cases we first need to check
// PF before checking ZF/CF. In general, ordered conditions will result in a jump only if PF is not
// set and unordered conditions will result in a jump only if PF is set.
{ EJ_jnp, GT_AND, EJ_je }, // FEQ
{ EJ_jne }, // FNE
{ EJ_jnp, GT_AND, EJ_jb }, // FLT
{ EJ_jnp, GT_AND, EJ_jbe }, // FLE
{ EJ_jae }, // FGE
{ EJ_ja }, // FGT
{ EJ_jo }, // O
{ EJ_jno }, // NO
{ EJ_je }, // FEQU
{ EJ_jp, GT_OR, EJ_jne }, // FNEU
{ EJ_jb }, // FLTU
{ EJ_jbe }, // FLEU
{ EJ_jp, GT_OR, EJ_jae }, // FGEU
{ EJ_jp, GT_OR, EJ_ja }, // FGTU
{ EJ_jp }, // P
{ EJ_jnp }, // NP
};
// clang-format on
//------------------------------------------------------------------------
// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition.
//
// Arguments:
// condition - The condition
// type - The type of the value to be produced
// dstReg - The destination register to be set to 1 or 0
//
void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg)
{
assert(varTypeIsIntegral(type));
assert(genIsValidIntReg(dstReg) && isByteReg(dstReg));
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
inst_SET(desc.jumpKind1, dstReg);
if (desc.oper != GT_NONE)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_SET(desc.jumpKind2, dstReg);
genDefineTempLabel(labelNext);
}
if (!varTypeIsByte(type))
{
GetEmitter()->emitIns_Mov(INS_movzx, EA_1BYTE, dstReg, dstReg, /* canSkip */ false);
}
}
//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
// tree - the GT_RETURNTRAP node
//
void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
{
assert(tree->OperGet() == GT_RETURNTRAP);
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
cns.SetContained();
GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
BasicBlock* skipLabel = genCreateTempLabel();
inst_JMP(EJ_je, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
regNumber tmpReg = tree->GetSingleTempReg(RBM_ALLINT);
assert(genIsValidIntReg(tmpReg));
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN, tmpReg);
genDefineTempLabel(skipLabel);
}
/*****************************************************************************
*
* Generate code for a single node in the tree.
* Preconditions: All operands have been evaluated
*
*/
void CodeGen::genCodeForTreeNode(GenTree* treeNode)
{
regNumber targetReg;
#if !defined(TARGET_64BIT)
if (treeNode->TypeGet() == TYP_LONG)
{
// All long enregistered nodes will have been decomposed into their
// constituent lo and hi nodes.
targetReg = REG_NA;
}
else
#endif // !defined(TARGET_64BIT)
{
targetReg = treeNode->GetRegNum();
}
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
compiler->gtDispLIRNode(treeNode, "Generating: ");
}
#endif // DEBUG
// Is this a node whose value is already in a register? LSRA denotes this by
// setting the GTF_REUSE_REG_VAL flag.
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
assert((treeNode->OperIsConst()));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
// contained nodes are part of their parents for codegen purposes
// ex : immediates, most LEAs
if (treeNode->isContained())
{
return;
}
switch (treeNode->gtOper)
{
#ifndef JIT32_GCENCODER
case GT_START_NONGC:
GetEmitter()->emitDisableGC();
break;
#endif // !defined(JIT32_GCENCODER)
case GT_START_PREEMPTGC:
// Kill callee saves GC registers, and create a label
// so that information gets propagated to the emitter.
gcInfo.gcMarkRegSetNpt(RBM_INT_CALLEE_SAVED);
genDefineTempLabel(genCreateTempLabel());
break;
case GT_PROF_HOOK:
#ifdef PROFILING_SUPPORTED
// We should be seeing this only if profiler hook is needed
noway_assert(compiler->compIsProfilerHookNeeded());
// Right now this node is used only for tail calls. In future if
// we intend to use it for Enter or Leave hooks, add a data member
// to this node indicating the kind of profiler hook. For example,
// helper number can be used.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
break;
case GT_LCLHEAP:
genLclHeap(treeNode);
break;
case GT_CNS_INT:
#ifdef TARGET_X86
assert(!treeNode->IsIconHandle(GTF_ICON_TLS_HDL));
#endif // TARGET_X86
FALLTHROUGH;
case GT_CNS_DBL:
genSetRegToConst(targetReg, targetType, treeNode);
genProduceReg(treeNode);
break;
case GT_NOT:
case GT_NEG:
genCodeForNegNot(treeNode);
break;
case GT_BSWAP:
case GT_BSWAP16:
genCodeForBswap(treeNode);
break;
case GT_DIV:
if (varTypeIsFloating(treeNode->TypeGet()))
{
genCodeForBinary(treeNode->AsOp());
break;
}
FALLTHROUGH;
case GT_MOD:
case GT_UMOD:
case GT_UDIV:
genCodeForDivMod(treeNode->AsOp());
break;
case GT_OR:
case GT_XOR:
case GT_AND:
assert(varTypeIsIntegralOrI(treeNode));
FALLTHROUGH;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif // !defined(TARGET_64BIT)
case GT_ADD:
case GT_SUB:
genCodeForBinary(treeNode->AsOp());
break;
case GT_MUL:
if (varTypeIsFloating(treeNode->TypeGet()))
{
genCodeForBinary(treeNode->AsOp());
break;
}
genCodeForMul(treeNode->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
genCodeForShift(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LSH_HI:
case GT_RSH_LO:
genCodeForShiftLong(treeNode);
break;
#endif // !defined(TARGET_64BIT)
case GT_CAST:
genCodeForCast(treeNode->AsOp());
break;
case GT_BITCAST:
genCodeForBitCast(treeNode->AsOp());
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
genCodeForLclAddr(treeNode);
break;
case GT_LCL_FLD:
genCodeForLclFld(treeNode->AsLclFld());
break;
case GT_LCL_VAR:
genCodeForLclVar(treeNode->AsLclVar());
break;
case GT_STORE_LCL_FLD:
genCodeForStoreLclFld(treeNode->AsLclFld());
break;
case GT_STORE_LCL_VAR:
genCodeForStoreLclVar(treeNode->AsLclVar());
break;
case GT_RETFILT:
case GT_RETURN:
genReturn(treeNode);
break;
case GT_LEA:
// If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
genLeaInstruction(treeNode->AsAddrMode());
break;
case GT_INDEX_ADDR:
genCodeForIndexAddr(treeNode->AsIndexAddr());
break;
case GT_IND:
genCodeForIndir(treeNode->AsIndir());
break;
case GT_INC_SATURATE:
genCodeForIncSaturate(treeNode);
break;
case GT_MULHI:
#ifdef TARGET_X86
case GT_MUL_LONG:
#endif
genCodeForMulHi(treeNode->AsOp());
break;
case GT_INTRINSIC:
genIntrinsic(treeNode);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
genSIMDIntrinsic(treeNode->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
genHWIntrinsic(treeNode->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_CKFINITE:
genCkfinite(treeNode);
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_CMP:
genCodeForCompare(treeNode->AsOp());
break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;
case GT_JCC:
genCodeForJcc(treeNode->AsCC());
break;
case GT_SETCC:
genCodeForSetcc(treeNode->AsCC());
break;
case GT_BT:
genCodeForBT(treeNode->AsOp());
break;
case GT_RETURNTRAP:
genCodeForReturnTrap(treeNode->AsOp());
break;
case GT_STOREIND:
genCodeForStoreInd(treeNode->AsStoreInd());
break;
case GT_COPY:
// This is handled at the time we call genConsumeReg() on the GT_COPY
break;
case GT_FIELD_LIST:
// Should always be marked contained.
assert(!"LIST, FIELD_LIST nodes should always be marked contained.");
break;
case GT_SWAP:
genCodeForSwap(treeNode->AsOp());
break;
case GT_PUTARG_STK:
genPutArgStk(treeNode->AsPutArgStk());
break;
case GT_PUTARG_REG:
genPutArgReg(treeNode->AsOp());
break;
case GT_CALL:
genCall(treeNode->AsCall());
break;
case GT_JMP:
genJmpMethod(treeNode);
break;
case GT_LOCKADD:
genCodeForLockAdd(treeNode->AsOp());
break;
case GT_XCHG:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;
case GT_XORR:
case GT_XAND:
NYI("Interlocked.Or and Interlocked.And aren't implemented for x86 yet.");
break;
case GT_MEMORYBARRIER:
{
CodeGen::BarrierKind barrierKind =
treeNode->gtFlags & GTF_MEMORYBARRIER_LOAD ? BARRIER_LOAD_ONLY : BARRIER_FULL;
instGen_MemoryBarrier(barrierKind);
break;
}
case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
// into the register specified in this node.
break;
case GT_NOP:
break;
case GT_KEEPALIVE:
genConsumeRegs(treeNode->AsOp()->gtOp1);
break;
case GT_NO_OP:
GetEmitter()->emitIns_Nop(1);
break;
case GT_BOUNDS_CHECK:
genRangeCheck(treeNode);
break;
case GT_PHYSREG:
genCodeForPhysReg(treeNode->AsPhysReg());
break;
case GT_NULLCHECK:
genCodeForNullCheck(treeNode->AsIndir());
break;
case GT_CATCH_ARG:
noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
/* Catch arguments get passed in a register. genCodeForBBlist()
would have marked it as holding a GC object, but not used. */
noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
genConsumeReg(treeNode);
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
// Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
// mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
size_t finallyNesting;
finallyNesting = treeNode->AsVal()->gtVal1;
noway_assert(treeNode->AsVal()->gtVal1 < compiler->compHndBBtabCount);
noway_assert(finallyNesting < compiler->compHndBBtabCount);
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
TARGET_POINTER_SIZE); // below doesn't underflow.
filterEndOffsetSlotOffs =
(unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
size_t curNestingSlotOffs;
curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, (unsigned)curNestingSlotOffs,
0);
break;
#endif // !FEATURE_EH_FUNCLETS
case GT_PINVOKE_PROLOG:
noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
#ifdef PSEUDORANDOM_NOP_INSERTION
// the runtime side requires the codegen here to be consistent
emit->emitDisableRandomNops();
#endif // PSEUDORANDOM_NOP_INSERTION
break;
case GT_LABEL:
genPendingCallLabel = genCreateTempLabel();
emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->GetRegNum());
break;
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
case GT_JMPTABLE:
genJumpTable(treeNode);
break;
case GT_SWITCH_TABLE:
genTableBasedSwitch(treeNode);
break;
case GT_ARR_INDEX:
genCodeForArrIndex(treeNode->AsArrIndex());
break;
case GT_ARR_OFFSET:
genCodeForArrOffset(treeNode->AsArrOffs());
break;
case GT_CLS_VAR_ADDR:
emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0);
genProduceReg(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LONG:
assert(treeNode->isUsedFromReg());
genConsumeRegs(treeNode);
break;
#endif
case GT_IL_OFFSET:
// Do nothing; these nodes are simply markers for debug info.
break;
default:
{
#ifdef DEBUG
char message[256];
_snprintf_s(message, ArrLen(message), _TRUNCATE, "NYI: Unimplemented node type %s\n",
GenTree::OpName(treeNode->OperGet()));
NYIRAW(message);
#endif
assert(!"Unknown node in codegen");
}
break;
}
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------
// genMultiRegStoreToSIMDLocal: store multi-reg value to a single-reg SIMD local
//
// Arguments:
// lclNode - GenTreeLclVar of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode)
{
assert(varTypeIsSIMD(lclNode));
regNumber dst = lclNode->GetRegNum();
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount =
actualOp1->IsMultiRegLclVar() ? actualOp1->AsLclVar()->GetFieldCount(compiler) : actualOp1->GetMultiRegCount();
assert(op1->IsMultiRegNode());
genConsumeRegs(op1);
// Right now the only enregistrable structs supported are SIMD types.
// They are only returned in 1 or 2 registers - the 1 register case is
// handled as a regular STORE_LCL_VAR.
// This case is always a call (AsCall() will assert if it is not).
GenTreeCall* call = actualOp1->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
assert(retTypeDesc->GetReturnRegCount() == MAX_RET_REG_COUNT);
assert(regCount == 2);
regNumber targetReg = lclNode->GetRegNum();
regNumber reg0 = call->GetRegNumByIdx(0);
regNumber reg1 = call->GetRegNumByIdx(1);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
// that need to be copied or reloaded.
regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
if (reloadReg != REG_NA)
{
reg0 = reloadReg;
}
reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
if (reloadReg != REG_NA)
{
reg1 = reloadReg;
}
}
#ifdef UNIX_AMD64_ABI
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(0)));
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(1)));
// This is a case where the two 8-bytes that comprise the operand are in
// two different xmm registers and need to be assembled into a single
// xmm register.
if (targetReg != reg0 && targetReg != reg1)
{
// targetReg = reg0;
// targetReg[127:64] = reg1[127:64]
inst_Mov(TYP_DOUBLE, targetReg, reg0, /* canSkip */ false);
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
else if (targetReg == reg0)
{
// (elided) targetReg = reg0
// targetReg[127:64] = reg1[127:64]
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
else
{
assert(targetReg == reg1);
// We need two shuffles to achieve this
// First:
// targetReg[63:0] = targetReg[63:0]
// targetReg[127:64] = reg0[63:0]
//
// Second:
// targetReg[63:0] = targetReg[127:64]
// targetReg[127:64] = targetReg[63:0]
//
// Essentially copy low 8-bytes from reg0 to high 8-bytes of targetReg
// and next swap low and high 8-bytes of targetReg to have them
// rearranged in the right order.
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg0, 0x00);
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, targetReg, 0x01);
}
genProduceReg(lclNode);
#elif defined(TARGET_X86)
if (TargetOS::IsWindows)
{
assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(0)));
assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(1)));
assert(lclNode->TypeIs(TYP_SIMD8));
// This is a case where a SIMD8 struct returned as [EAX, EDX]
// and needs to be assembled into a single xmm register,
// note we can't check reg0=EAX, reg1=EDX because they could be already moved.
inst_Mov(TYP_FLOAT, targetReg, reg0, /* canSkip */ false);
const emitAttr size = emitTypeSize(TYP_SIMD8);
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
GetEmitter()->emitIns_SIMD_R_R_R_I(INS_pinsrd, size, targetReg, targetReg, reg1, 1);
}
else
{
regNumber tempXmm = lclNode->GetSingleTempReg();
assert(tempXmm != targetReg);
inst_Mov(TYP_FLOAT, tempXmm, reg1, /* canSkip */ false);
GetEmitter()->emitIns_SIMD_R_R_R(INS_punpckldq, size, targetReg, targetReg, tempXmm);
}
genProduceReg(lclNode);
}
#elif defined(TARGET_AMD64)
assert(!TargetOS::IsWindows || !"Multireg store to SIMD reg not supported on Windows x64");
#else
#error Unsupported or unset target architecture
#endif
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer.
//
// Arguments:
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
//
void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
if (delta == 0)
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, /* canSkip */ false);
#ifdef USING_SCOPE_INFO
psiMoveESPtoEBP();
#endif // USING_SCOPE_INFO
}
else
{
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
// We don't update prolog scope info (there is no function to handle lea), but that is currently dead code
// anyway.
}
if (reportUnwindData)
{
compiler->unwindSetFrameReg(REG_FPBASE, delta);
}
}
//------------------------------------------------------------------------
// genAllocLclFrame: Probe the stack and allocate the local stack frame - subtract from SP.
//
// Arguments:
// frameSize - the size of the stack frame being allocated.
// initReg - register to use as a scratch register.
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
// maskArgRegsLiveIn - incoming argument registers that are currently live.
//
// Return value:
// None
//
void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
if (frameSize == 0)
{
return;
}
const target_size_t pageSize = compiler->eeGetPageSize();
if (frameSize == REGSIZE_BYTES)
{
// Frame size is the same as register size.
GetEmitter()->emitIns_R(INS_push, EA_PTRSIZE, REG_EAX);
compiler->unwindAllocStack(frameSize);
}
else if (frameSize < pageSize)
{
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
compiler->unwindAllocStack(frameSize);
const unsigned lastProbedLocToFinalSp = frameSize;
if (lastProbedLocToFinalSp + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize)
{
// We haven't probed almost a complete page. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we need to probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_R_AR(INS_test, EA_4BYTE, REG_EAX, REG_SPBASE, 0);
}
}
else
{
#ifdef TARGET_X86
int spOffset = -(int)frameSize;
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_R(INS_push, EA_PTRSIZE, REG_SECRET_STUB_PARAM);
spOffset += REGSIZE_BYTES;
}
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, spOffset);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN);
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_R(INS_pop, EA_PTRSIZE, REG_SECRET_STUB_PARAM);
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
}
else
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
}
#else // !TARGET_X86
static_assert_no_msg((RBM_STACK_PROBE_HELPER_ARG & (RBM_SECRET_STUB_PARAM | RBM_DEFAULT_HELPER_CALL_TARGET)) ==
RBM_NONE);
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, -(int)frameSize);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN);
if (initReg == REG_DEFAULT_HELPER_CALL_TARGET)
{
*pInitRegZeroed = false;
}
static_assert_no_msg((RBM_STACK_PROBE_HELPER_TRASH & RBM_STACK_PROBE_HELPER_ARG) == RBM_NONE);
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
#endif // !TARGET_X86
compiler->unwindAllocStack(frameSize);
if (initReg == REG_STACK_PROBE_HELPER_ARG)
{
*pInitRegZeroed = false;
}
}
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(frameSize);
}
#endif // USING_SCOPE_INFO
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustment: add a specified constant value to the stack pointer.
// No probe is done.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
// We assert that the SP change is less than one page. If it's greater, you should have called a
// function that does a probe, which will in turn call this function.
assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize());
#ifdef TARGET_X86
if (regTmp != REG_NA)
{
// For x86, some cases don't want to use "sub ESP" because we don't want the emitter to track the adjustment
// to ESP. So do the work in the count register.
// TODO-CQ: manipulate ESP directly, to share code, reduce #ifdefs, and improve CQ. This would require
// creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't
// track".
inst_Mov(TYP_I_IMPL, regTmp, REG_SPBASE, /* canSkip */ false);
inst_RV_IV(INS_sub, regTmp, (target_ssize_t)-spDelta, EA_PTRSIZE);
inst_Mov(TYP_I_IMPL, REG_SPBASE, regTmp, /* canSkip */ false);
}
else
#endif // TARGET_X86
{
inst_RV_IV(INS_sub, REG_SPBASE, (target_ssize_t)-spDelta, EA_PTRSIZE);
}
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Should only be called as a helper for
// genStackPointerConstantAdjustmentLoopWithProbe.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens,
// but the stack pointer doesn't move.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Generates one probe per page, up to the total amount required.
// This will generate a sequence of probes in-line. It is required for the case where we need to expose
// (not hide) the stack level adjustment. We can't use the dynamic loop in that case, because the total
// stack adjustment would not be visible to the emitter. It would be possible to use this version for
// multiple hidden constant stack level adjustments but we don't do that currently (we use the loop
// version in genStackPointerDynamicAdjustmentWithProbe instead).
//
// Arguments:
// spDelta - the value to add to SP. Must be negative.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// Offset in bytes from SP to last probed address.
//
target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
const target_size_t pageSize = compiler->eeGetPageSize();
ssize_t spRemainingDelta = spDelta;
do
{
ssize_t spOneDelta = -(ssize_t)min((target_size_t)-spRemainingDelta, pageSize);
genStackPointerConstantAdjustmentWithProbe(spOneDelta, regTmp);
spRemainingDelta -= spOneDelta;
} while (spRemainingDelta < 0);
// What offset from the final SP was the last probe? This depends on the fact that
// genStackPointerConstantAdjustmentWithProbe() probes first, then does "SUB SP".
target_size_t lastTouchDelta = (target_size_t)(-spDelta) % pageSize;
if ((lastTouchDelta == 0) || (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize))
{
// We haven't probed almost a complete page. If lastTouchDelta==0, then spDelta was an exact
// multiple of pageSize, which means we last probed exactly one page back. Otherwise, we probed
// the page, but very far from the end. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we do one more probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, 0);
lastTouchDelta = 0;
}
return lastTouchDelta;
}
//------------------------------------------------------------------------
// genStackPointerDynamicAdjustmentWithProbe: add a register value to the stack pointer,
// and probe the stack as appropriate.
//
// Note that for x86, we hide the ESP adjustment from the emitter. To do that, currently,
// requires a temporary register and extra code.
//
// Arguments:
// regSpDelta - the register value to add to SP. The value in this register must be negative.
// This register might be trashed.
// regTmp - an available temporary register. Will be trashed.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp)
{
assert(regSpDelta != REG_NA);
assert(regTmp != REG_NA);
// Tickle the pages to ensure that ESP is always valid and is
// in sync with the "stack guard page". Note that in the worst
// case ESP is on the last byte of the guard page. Thus you must
// touch ESP-0 first not ESP-0x1000.
//
// Another subtlety is that you don't want ESP to be exactly on the
// boundary of the guard page because PUSH is predecrement, thus
// call setup would not touch the guard page but just beyond it.
//
// Note that we go through a few hoops so that ESP never points to
// illegal pages at any time during the tickling process
//
// add regSpDelta, ESP // reg now holds ultimate ESP
// jb loop // result is smaller than original ESP (no wrap around)
// xor regSpDelta, regSpDelta // Overflow, pick lowest possible number
// loop:
// test ESP, [ESP+0] // tickle the page
// mov regTmp, ESP
// sub regTmp, eeGetPageSize()
// mov ESP, regTmp
// cmp ESP, regSpDelta
// jae loop
// mov ESP, regSpDelta
BasicBlock* loop = genCreateTempLabel();
inst_RV_RV(INS_add, regSpDelta, REG_SPBASE, TYP_I_IMPL);
inst_JMP(EJ_jb, loop);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regSpDelta);
genDefineTempLabel(loop);
// Tickle the decremented value. Note that it must be done BEFORE the update of ESP since ESP might already
// be on the guard page. It is OK to leave the final value of ESP on the guard page.
GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
// Subtract a page from ESP. This is a trick to avoid the emitter trying to track the
// decrement of the ESP - we do the subtraction in another reg instead of adjusting ESP directly.
inst_Mov(TYP_I_IMPL, regTmp, REG_SPBASE, /* canSkip */ false);
inst_RV_IV(INS_sub, regTmp, compiler->eeGetPageSize(), EA_PTRSIZE);
inst_Mov(TYP_I_IMPL, REG_SPBASE, regTmp, /* canSkip */ false);
inst_RV_RV(INS_cmp, REG_SPBASE, regSpDelta, TYP_I_IMPL);
inst_JMP(EJ_jae, loop);
// Move the final value to ESP
inst_Mov(TYP_I_IMPL, REG_SPBASE, regSpDelta, /* canSkip */ false);
}
//------------------------------------------------------------------------
// genLclHeap: Generate code for localloc.
//
// Arguments:
// tree - the localloc tree to generate.
//
// Notes:
// Note that for x86, we don't track ESP movements while generating the localloc code.
// The ESP tracking is used to report stack pointer-relative GC info, which is not
// interesting while doing the localloc construction. Also, for functions with localloc,
// we have EBP frames, and EBP-relative locals, and ESP-relative accesses only for function
// call arguments.
//
// For x86, we store the ESP after the localloc is complete in the LocAllocSP
// variable. This variable is implicitly reported to the VM in the GC info (its position
// is defined by convention relative to other items), and is used by the GC to find the
// "base" stack pointer in functions with localloc.
//
void CodeGen::genLclHeap(GenTree* tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
GenTree* size = tree->AsOp()->gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
regNumber targetReg = tree->GetRegNum();
regNumber regCnt = REG_NA;
var_types type = genActualType(size->gtType);
emitAttr easz = emitTypeSize(type);
BasicBlock* endLabel = nullptr;
target_ssize_t lastTouchDelta = (target_ssize_t)-1;
#ifdef DEBUG
genStackPointerCheck(compiler->opts.compStackCheckOnRet, compiler->lvaReturnSpCheck);
#endif
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
target_size_t stackAdjustment = 0;
target_size_t locAllocStackOffset = 0;
// compute the amount of memory to allocate to properly STACK_ALIGN.
size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
assert(size->isContained());
// If amount is zero then return null in targetReg
amount = size->AsIntCon()->gtIconVal;
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
goto BAILOUT;
}
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
amount = AlignUp(amount, STACK_ALIGN);
}
else
{
// The localloc requested memory size is non-constant.
// Put the size value in targetReg. If it is zero, bail out by returning null in targetReg.
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
GetEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
inst_JMP(EJ_je, endLabel);
// Compute the size of the block to allocate and perform alignment.
// If compInitMem=true, we can reuse targetReg as regcnt,
// since we don't need any internal registers.
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
// Above, we put the size in targetReg. Now, copy it to our new temp register if necessary.
inst_Mov(size->TypeGet(), regCnt, targetReg, /* canSkip */ true);
}
// Round up the number of bytes to allocate to a STACK_ALIGN boundary. This is done
// by code like:
// add reg, 15
// and reg, -16
// However, in the initialized memory case, we need the count of STACK_ALIGN-sized
// elements, not a byte count, after the alignment. So instead of the "and", which
// becomes unnecessary, generate a shift, e.g.:
// add reg, 15
// shr reg, 4
inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
if (compiler->info.compInitMem)
{
// Convert the count from a count of bytes to a loop count. We will loop once per
// stack alignment size, so each loop will zero 4 bytes on Windows/x86, and 16 bytes
// on x64 and Linux/x86.
//
// Note that we zero a single reg-size word per iteration on x86, and 2 reg-size
// words per iteration on x64. We will shift off all the stack alignment bits
// added above, so there is no need for an 'and' instruction.
// --- shr regCnt, 2 (or 4) ---
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_PTRSIZE, regCnt, STACK_ALIGN_SHIFT);
}
else
{
// Otherwise, mask off the low bits to align the byte count.
inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
}
bool initMemOrLargeAlloc; // Declaration must be separate from initialization to avoid clang compiler error.
initMemOrLargeAlloc = compiler->info.compInitMem || (amount >= compiler->eeGetPageSize()); // must be >= not >
#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
//
// Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
// are the cases that need to be handled:
// i) Method has out-going arg area.
// It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
// Therefore, we will pop off the out-going arg area from RSP before allocating the localloc space.
// ii) Method has no out-going arg area.
// Nothing to pop off from the stack.
if (compiler->lvaOutgoingArgSpaceSize > 0)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
// If the localloc amount is a small enough constant, and we're not initializing the allocated
// memory, then don't bother popping off the ougoing arg space first; just allocate the amount
// of space needed by the allocation, and call the bottom part the new outgoing arg space.
if ((amount > 0) && !initMemOrLargeAlloc)
{
lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, REG_NA);
stackAdjustment = 0;
locAllocStackOffset = (target_size_t)compiler->lvaOutgoingArgSpaceSize;
goto ALLOC_DONE;
}
inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
stackAdjustment += (target_size_t)compiler->lvaOutgoingArgSpaceSize;
locAllocStackOffset = stackAdjustment;
}
#endif
if (size->IsCnsIntOrI())
{
// We should reach here only for non-zero, constant size allocations.
assert(amount > 0);
assert((amount % STACK_ALIGN) == 0);
assert((amount % REGSIZE_BYTES) == 0);
// For small allocations we will generate up to six push 0 inline
size_t cntRegSizedWords = amount / REGSIZE_BYTES;
if (compiler->info.compInitMem && (cntRegSizedWords <= 6))
{
for (; cntRegSizedWords != 0; cntRegSizedWords--)
{
inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
}
lastTouchDelta = 0;
goto ALLOC_DONE;
}
#ifdef TARGET_X86
bool needRegCntRegister = true;
#else // !TARGET_X86
bool needRegCntRegister = initMemOrLargeAlloc;
#endif // !TARGET_X86
if (needRegCntRegister)
{
// If compInitMem=true, we can reuse targetReg as regcnt.
// Since size is a constant, regCnt is not yet initialized.
assert(regCnt == REG_NA);
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
}
}
if (!initMemOrLargeAlloc)
{
// Since the size is less than a page, and we don't need to zero init memory, simply adjust ESP.
// ESP might already be in the guard page, so we must touch it BEFORE
// the alloc, not after.
assert(amount < compiler->eeGetPageSize()); // must be < not <=
lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, regCnt);
goto ALLOC_DONE;
}
// else, "mov regCnt, amount"
if (compiler->info.compInitMem)
{
// When initializing memory, we want 'amount' to be the loop count.
assert((amount % STACK_ALIGN) == 0);
amount /= STACK_ALIGN;
}
instGen_Set_Reg_To_Imm(((size_t)(int)amount == amount) ? EA_4BYTE : EA_8BYTE, regCnt, amount);
}
if (compiler->info.compInitMem)
{
// At this point 'regCnt' is set to the number of loop iterations for this loop, if each
// iteration zeros (and subtracts from the stack pointer) STACK_ALIGN bytes.
// Since we have to zero out the allocated memory AND ensure that RSP is always valid
// by tickling the pages, we will just push 0's on the stack.
assert(genIsValidIntReg(regCnt));
// Loop:
BasicBlock* loop = genCreateTempLabel();
genDefineTempLabel(loop);
static_assert_no_msg((STACK_ALIGN % REGSIZE_BYTES) == 0);
unsigned const count = (STACK_ALIGN / REGSIZE_BYTES);
for (unsigned i = 0; i < count; i++)
{
inst_IV(INS_push_hide, 0); // --- push REG_SIZE bytes of 0
}
// Note that the stack must always be aligned to STACK_ALIGN bytes
// Decrement the loop counter and loop if not done.
inst_RV(INS_dec, regCnt, TYP_I_IMPL);
inst_JMP(EJ_jne, loop);
lastTouchDelta = 0;
}
else
{
// At this point 'regCnt' is set to the total number of bytes to localloc.
// Negate this value before calling the function to adjust the stack (which
// adds to ESP).
inst_RV(INS_NEG, regCnt, TYP_I_IMPL);
regNumber regTmp = tree->GetSingleTempReg();
genStackPointerDynamicAdjustmentWithProbe(regCnt, regTmp);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
}
ALLOC_DONE:
// Re-adjust SP to allocate out-going arg area. Note: this also requires probes, if we have
// a very large stack adjustment! For simplicity, we use the same function used elsewhere,
// which probes the current address before subtracting. We may end up probing multiple
// times relatively "nearby".
if (stackAdjustment > 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert(lastTouchDelta >= -1);
if ((lastTouchDelta == (target_ssize_t)-1) ||
(stackAdjustment + (target_size_t)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, REG_NA);
}
else
{
genStackPointerConstantAdjustment(-(ssize_t)stackAdjustment, REG_NA);
}
}
// Return the stackalloc'ed address in result register.
// TargetReg = RSP + locAllocStackOffset
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, (int)locAllocStackOffset);
if (endLabel != nullptr)
{
genDefineTempLabel(endLabel);
}
BAILOUT:
#ifdef JIT32_GCENCODER
if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
}
#endif // JIT32_GCENCODER
#ifdef DEBUG
// Update local variable to reflect the new stack pointer.
if (compiler->opts.compStackCheckOnRet)
{
noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
}
#endif
genProduceReg(tree);
}
void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode)
{
assert(storeBlkNode->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
if (storeBlkNode->OperIs(GT_STORE_OBJ))
{
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
assert(storeBlkNode->OperIsCopyBlkOp());
assert(storeBlkNode->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(storeBlkNode->AsObj());
return;
}
bool isCopyBlk = storeBlkNode->OperIsCopyBlkOp();
switch (storeBlkNode->gtBlkOpKind)
{
#ifdef TARGET_AMD64
case GenTreeBlk::BlkOpKindHelper:
assert(!storeBlkNode->gtBlkOpGcUnsafe);
if (isCopyBlk)
{
genCodeForCpBlkHelper(storeBlkNode);
}
else
{
genCodeForInitBlkHelper(storeBlkNode);
}
break;
#endif // TARGET_AMD64
case GenTreeBlk::BlkOpKindRepInstr:
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
if (isCopyBlk)
{
genCodeForCpBlkRepMovs(storeBlkNode);
}
else
{
genCodeForInitBlkRepStos(storeBlkNode);
}
break;
case GenTreeBlk::BlkOpKindUnroll:
if (isCopyBlk)
{
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
GetEmitter()->emitDisableGC();
}
#endif
genCodeForCpBlkUnroll(storeBlkNode);
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
GetEmitter()->emitEnableGC();
}
#endif
}
else
{
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
genCodeForInitBlkUnroll(storeBlkNode);
}
break;
default:
unreached();
}
}
//
//------------------------------------------------------------------------
// genCodeForInitBlkRepStos: Generate code for InitBlk using rep stos.
//
// Arguments:
// initBlkNode - The Block store for which we are generating code.
//
void CodeGen::genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode)
{
genConsumeBlockOp(initBlkNode, REG_RDI, REG_RAX, REG_RCX);
instGen(INS_r_stosb);
}
//----------------------------------------------------------------------------------
// genCodeForInitBlkUnroll: Generate unrolled block initialization code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
regNumber dstAddrIndexReg = REG_NA;
unsigned dstAddrIndexScale = 1;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = dstAddr->AsAddrMode();
if (addrMode->HasBase())
{
dstAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
dstAddrIndexReg = genConsumeReg(addrMode->Index());
dstAddrIndexScale = addrMode->GetScale();
}
dstOffset = addrMode->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
regNumber srcIntReg = REG_NA;
GenTree* src = node->Data();
if (src->OperIs(GT_INIT_VAL))
{
assert(src->isContained());
src = src->AsUnOp()->gtGetOp1();
}
unsigned size = node->GetLayout()->GetSize();
// An SSE mov that accesses data larger than 8 bytes may be implemented using
// multiple memory accesses. Hence, the JIT must not use such stores when
// INITBLK zeroes a struct that contains GC pointers and can be observed by
// other threads (i.e. when dstAddr is not an address of a local).
// For example, this can happen when initializing a struct field of an object.
const bool canUse16BytesSimdMov = !node->IsOnHeapAndContainsReferences();
#ifdef TARGET_AMD64
// On Amd64 the JIT will not use SIMD stores for such structs and instead
// will always allocate a GP register for src node.
const bool willUseSimdMov = canUse16BytesSimdMov && (size >= XMM_REGSIZE_BYTES);
#else
// On X86 the JIT will use movq for structs that are larger than 16 bytes
// since it is more beneficial than using two mov-s from a GP register.
const bool willUseSimdMov = (size >= 16);
#endif
if (!src->isContained())
{
srcIntReg = genConsumeReg(src);
}
else
{
// If src is contained then it must be 0.
assert(src->IsIntegralConst(0));
assert(willUseSimdMov);
#ifdef TARGET_AMD64
assert(size >= XMM_REGSIZE_BYTES);
#else
assert(size % 8 == 0);
#endif
}
emitter* emit = GetEmitter();
assert(size <= INT32_MAX);
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
if (willUseSimdMov)
{
regNumber srcXmmReg = node->GetSingleTempReg(RBM_ALLFLOAT);
unsigned regSize = (size >= YMM_REGSIZE_BYTES) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX)
? YMM_REGSIZE_BYTES
: XMM_REGSIZE_BYTES;
if (src->gtSkipReloadOrCopy()->IsIntegralConst(0))
{
// If the source is constant 0 then always use xorps, it's faster
// than copying the constant from a GPR to a XMM register.
emit->emitIns_R_R(INS_xorps, EA_ATTR(regSize), srcXmmReg, srcXmmReg);
}
else
{
emit->emitIns_Mov(INS_movd, EA_PTRSIZE, srcXmmReg, srcIntReg, /* canSkip */ false);
emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg);
#ifdef TARGET_X86
// For x86, we need one more to convert it from 8 bytes to 16 bytes.
emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg);
#endif
if (regSize == YMM_REGSIZE_BYTES)
{
// Extend the bytes in the lower lanes to the upper lanes
emit->emitIns_R_R_R_I(INS_vinsertf128, EA_32BYTE, srcXmmReg, srcXmmReg, srcXmmReg, 1);
}
}
instruction simdMov = simdUnalignedMovIns();
unsigned bytesWritten = 0;
while (bytesWritten < size)
{
#ifdef TARGET_X86
if (!canUse16BytesSimdMov || (bytesWritten + regSize > size))
{
simdMov = INS_movq;
regSize = 8;
}
#endif
if (bytesWritten + regSize > size)
{
assert(srcIntReg != REG_NA);
break;
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
dstOffset += regSize;
bytesWritten += regSize;
if (regSize == YMM_REGSIZE_BYTES && size - bytesWritten < YMM_REGSIZE_BYTES)
{
regSize = XMM_REGSIZE_BYTES;
}
}
size -= bytesWritten;
}
// Fill the remainder using normal stores.
#ifdef TARGET_AMD64
unsigned regSize = REGSIZE_BYTES;
while (regSize > size)
{
regSize /= 2;
}
for (; size > regSize; size -= regSize, dstOffset += regSize)
{
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
if (size > 0)
{
unsigned shiftBack = regSize - size;
assert(shiftBack <= regSize);
dstOffset -= shiftBack;
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#else // TARGET_X86
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#endif
}
#ifdef TARGET_AMD64
//------------------------------------------------------------------------
// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call
//
// Arguments:
// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
#endif // TARGET_AMD64
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Generate code for a load from some address + offset
// baseNode: tree node which can be either a local address or arbitrary node
// offset: distance from the baseNode from which to load
void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
{
emitter* emit = GetEmitter();
if (baseNode->OperIsLocalAddr())
{
const GenTreeLclVarCommon* lclVar = baseNode->AsLclVarCommon();
offset += lclVar->GetLclOffs();
emit->emitIns_R_S(ins, size, dst, lclVar->GetLclNum(), offset);
}
else
{
emit->emitIns_R_AR(ins, size, dst, baseNode->GetRegNum(), offset);
}
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
//----------------------------------------------------------------------------------
// genCodeForCpBlkUnroll - Generate unrolled block copy code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
regNumber dstAddrIndexReg = REG_NA;
unsigned dstAddrIndexScale = 1;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = dstAddr->AsAddrMode();
if (addrMode->HasBase())
{
dstAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
dstAddrIndexReg = genConsumeReg(addrMode->Index());
dstAddrIndexScale = addrMode->GetScale();
}
dstOffset = addrMode->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
const GenTreeLclVarCommon* lclVar = dstAddr->AsLclVarCommon();
dstLclNum = lclVar->GetLclNum();
dstOffset = lclVar->GetLclOffs();
}
unsigned srcLclNum = BAD_VAR_NUM;
regNumber srcAddrBaseReg = REG_NA;
regNumber srcAddrIndexReg = REG_NA;
unsigned srcAddrIndexScale = 1;
int srcOffset = 0;
GenTree* src = node->Data();
assert(src->isContained());
if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
srcLclNum = src->AsLclVarCommon()->GetLclNum();
srcOffset = src->AsLclVarCommon()->GetLclOffs();
}
else
{
assert(src->OperIs(GT_IND));
GenTree* srcAddr = src->AsIndir()->Addr();
if (!srcAddr->isContained())
{
srcAddrBaseReg = genConsumeReg(srcAddr);
}
else if (srcAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = srcAddr->AsAddrMode();
if (addrMode->HasBase())
{
srcAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
srcAddrIndexReg = genConsumeReg(addrMode->Index());
srcAddrIndexScale = addrMode->GetScale();
}
srcOffset = addrMode->Offset();
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(srcOffset < (INT32_MAX - static_cast<int>(size)));
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
if (size >= XMM_REGSIZE_BYTES)
{
regNumber tempReg = node->GetSingleTempReg(RBM_ALLFLOAT);
instruction simdMov = simdUnalignedMovIns();
// Get the largest SIMD register available if the size is large enough
unsigned regSize = (size >= YMM_REGSIZE_BYTES) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX)
? YMM_REGSIZE_BYTES
: XMM_REGSIZE_BYTES;
while (size >= regSize)
{
for (; size >= regSize; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(simdMov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(simdMov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
// Size is too large for YMM moves, try stepping down to XMM size to finish SIMD copies.
if (regSize == YMM_REGSIZE_BYTES)
{
regSize = XMM_REGSIZE_BYTES;
}
}
}
// Fill the remainder with normal loads/stores
if (size > 0)
{
regNumber tempReg = node->GetSingleTempReg(RBM_ALLINT);
#ifdef TARGET_AMD64
unsigned regSize = REGSIZE_BYTES;
while (regSize > size)
{
regSize /= 2;
}
for (; size > regSize; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
if (size > 0)
{
unsigned shiftBack = regSize - size;
assert(shiftBack <= regSize);
srcOffset -= shiftBack;
dstOffset -= shiftBack;
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#else // TARGET_X86
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#endif
}
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkRepMovs - Generate code for CpBlk by using rep movs
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode)
{
// Destination address goes in RDI, source address goes in RSE, and size goes in RCX.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
#ifdef FEATURE_PUT_STRUCT_ARG_STK
//------------------------------------------------------------------------
// CodeGen::genMove8IfNeeded: Conditionally move 8 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// longTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (8 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// On x86, longTmpReg must be an xmm reg; on x64 it must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove8IfNeeded(unsigned size, regNumber longTmpReg, GenTree* srcAddr, unsigned offset)
{
#ifdef TARGET_X86
instruction longMovIns = INS_movq;
#else // !TARGET_X86
instruction longMovIns = INS_mov;
#endif // !TARGET_X86
if ((size & 8) != 0)
{
genCodeForLoadOffset(longMovIns, EA_8BYTE, longTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_LONG, longTmpReg, offset);
return 8;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove4IfNeeded: Conditionally move 4 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (4 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove4IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 4) != 0)
{
genCodeForLoadOffset(INS_mov, EA_4BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_INT, intTmpReg, offset);
return 4;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove2IfNeeded: Conditionally move 2 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (2 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove2IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 2) != 0)
{
genCodeForLoadOffset(INS_mov, EA_2BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_SHORT, intTmpReg, offset);
return 2;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove1IfNeeded: Conditionally move 1 byte of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (1 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove1IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 1) != 0)
{
genCodeForLoadOffset(INS_mov, EA_1BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_BYTE, intTmpReg, offset);
return 1;
}
return 0;
}
//---------------------------------------------------------------------------------------------------------------//
// genStructPutArgUnroll: Generates code for passing a struct arg on stack by value using loop unrolling.
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// m_stkArgVarNum must be set to the base var number, relative to which the by-val struct will be copied to the
// stack.
//
// TODO-Amd64-Unix: Try to share code with copyblk.
// Need refactoring of copyblk before it could be used for putarg_stk.
// The difference for now is that a putarg_stk contains its children, while cpyblk does not.
// This creates differences in code. After some significant refactoring it could be reused.
//
void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode)
{
GenTree* src = putArgNode->AsOp()->gtOp1;
// We will never call this method for SIMD types, which are stored directly
// in genPutStructArgStk().
assert(src->isContained() && src->OperIs(GT_OBJ) && src->TypeIs(TYP_STRUCT));
assert(!src->AsObj()->GetLayout()->HasGCPtr());
#ifdef TARGET_X86
assert(!m_pushStkArg);
#endif
unsigned size = putArgNode->GetStackByteSize();
#ifdef TARGET_X86
assert((XMM_REGSIZE_BYTES <= size) && (size <= CPBLK_UNROLL_LIMIT));
#else // !TARGET_X86
assert(size <= CPBLK_UNROLL_LIMIT);
#endif // !TARGET_X86
if (src->AsOp()->gtOp1->isUsedFromReg())
{
genConsumeReg(src->AsOp()->gtOp1);
}
unsigned offset = 0;
regNumber xmmTmpReg = REG_NA;
regNumber intTmpReg = REG_NA;
regNumber longTmpReg = REG_NA;
if (size >= XMM_REGSIZE_BYTES)
{
xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
}
if ((size % XMM_REGSIZE_BYTES) != 0)
{
intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
}
#ifdef TARGET_X86
longTmpReg = xmmTmpReg;
#else
longTmpReg = intTmpReg;
#endif
// Let's use SSE2 to be able to do 16 byte at a time with loads and stores.
size_t slots = size / XMM_REGSIZE_BYTES;
while (slots-- > 0)
{
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
// Load
genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmTmpReg, src->gtGetOp1(), offset);
// Store
genStoreRegToStackArg(TYP_STRUCT, xmmTmpReg, offset);
offset += XMM_REGSIZE_BYTES;
}
// Fill the remainder (15 bytes or less) if there's one.
if ((size % XMM_REGSIZE_BYTES) != 0)
{
offset += genMove8IfNeeded(size, longTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove4IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove2IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove1IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
assert(offset == size);
}
}
//------------------------------------------------------------------------
// genStructPutArgRepMovs: Generates code for passing a struct arg by value on stack using Rep Movs.
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Preconditions:
// m_stkArgVarNum must be set to the base var number, relative to which the by-val struct bits will go.
//
void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode)
{
GenTree* src = putArgNode->gtGetOp1();
assert(src->TypeGet() == TYP_STRUCT);
assert(!src->AsObj()->GetLayout()->HasGCPtr());
// Make sure we got the arguments of the cpblk operation in the right registers, and that
// 'src' is contained as expected.
assert(putArgNode->gtRsvdRegs == (RBM_RDI | RBM_RCX | RBM_RSI));
assert(src->isContained());
genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genStructPutArgPush: Generates code for passing a struct arg by value on stack using "push".
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// Used only on x86, in two cases:
// - Structs 4, 8, or 12 bytes in size (less than XMM_REGSIZE_BYTES, multiple of TARGET_POINTER_SIZE).
// - Structs that contain GC pointers - they are guaranteed to be sized correctly by the VM.
//
void CodeGen::genStructPutArgPush(GenTreePutArgStk* putArgNode)
{
// On x86, any struct that contains GC references must be stored to the stack using `push` instructions so
// that the emitter properly detects the need to update the method's GC information.
//
// Strictly speaking, it is only necessary to use "push" to store the GC references themselves, so for structs
// with large numbers of consecutive non-GC-ref-typed fields, we may be able to improve the code size in the
// future.
assert(m_pushStkArg);
GenTree* src = putArgNode->Data();
GenTree* srcAddr = putArgNode->Data()->AsObj()->Addr();
regNumber srcAddrReg = srcAddr->GetRegNum();
const bool srcAddrInReg = srcAddrReg != REG_NA;
unsigned srcLclNum = 0;
unsigned srcLclOffset = 0;
if (srcAddrInReg)
{
srcAddrReg = genConsumeReg(srcAddr);
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcLclOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
ClassLayout* layout = src->AsObj()->GetLayout();
const unsigned byteSize = putArgNode->GetStackByteSize();
assert((byteSize % TARGET_POINTER_SIZE == 0) && ((byteSize < XMM_REGSIZE_BYTES) || layout->HasGCPtr()));
const unsigned numSlots = byteSize / TARGET_POINTER_SIZE;
assert(putArgNode->gtNumSlots == numSlots);
for (int i = numSlots - 1; i >= 0; --i)
{
emitAttr slotAttr = emitTypeSize(layout->GetGCPtrType(i));
const unsigned byteOffset = i * TARGET_POINTER_SIZE;
if (srcAddrInReg)
{
GetEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcAddrReg, byteOffset);
}
else
{
GetEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + byteOffset);
}
AddStackLevel(TARGET_POINTER_SIZE);
}
}
#endif // TARGET_X86
#ifndef TARGET_X86
//------------------------------------------------------------------------
// genStructPutArgPartialRepMovs: Generates code for passing a struct arg by value on stack using
// a mix of pointer-sized stores, "movsq" and "rep movsd".
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// Used on non-x86 targets (Unix x64) for structs with GC pointers.
//
void CodeGen::genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgNode)
{
// Consume these registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_NA);
GenTreeObj* src = putArgNode->gtGetOp1()->AsObj();
ClassLayout* layout = src->GetLayout();
const bool srcIsLocal = src->Addr()->OperIsLocalAddr();
const emitAttr srcAddrAttr = srcIsLocal ? EA_PTRSIZE : EA_BYREF;
#if DEBUG
unsigned numGCSlotsCopied = 0;
#endif // DEBUG
assert(layout->HasGCPtr());
const unsigned byteSize = putArgNode->GetStackByteSize();
assert(byteSize % TARGET_POINTER_SIZE == 0);
const unsigned numSlots = byteSize / TARGET_POINTER_SIZE;
assert(putArgNode->gtNumSlots == numSlots);
// No need to disable GC the way COPYOBJ does. Here the refs are copied in atomic operations always.
for (unsigned i = 0; i < numSlots;)
{
if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively)
// instead of a sequence of movsp instructions to save cycles and code size.
unsigned adjacentNonGCSlotCount = 0;
do
{
adjacentNonGCSlotCount++;
i++;
} while ((i < numSlots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-ref region, it's better just to
// emit a sequence of movsp instructions
if (adjacentNonGCSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
{
for (; adjacentNonGCSlotCount > 0; adjacentNonGCSlotCount--)
{
instGen(INS_movsp);
}
}
else
{
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
instGen(INS_r_movsp);
}
}
else
{
// We have a GC (byref or ref) pointer
// TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction,
// but the logic for emitting a GC info record is not available (it is internal for the emitter
// only.) See emitGCVarLiveUpd function. If we could call it separately, we could do
// instGen(INS_movsp); and emission of gc info.
var_types memType = layout->GetGCPtrType(i);
GetEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
#ifdef DEBUG
numGCSlotsCopied++;
#endif // DEBUG
i++;
if (i < numSlots)
{
// Source for the copy operation.
// If a LocalAddr, use EA_PTRSIZE - copy from stack.
// If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
GetEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
// Always copying to the stack - outgoing arg area
// (or the outgoing arg area of the caller for a tail call) - use EA_PTRSIZE.
GetEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
}
}
}
assert(numGCSlotsCopied == layout->GetGCPtrCount());
}
#endif // !TARGET_X86
//------------------------------------------------------------------------
// If any Vector3 args are on stack and they are not pass-by-ref, the upper 32bits
// must be cleared to zeroes. The native compiler doesn't clear the upper bits
// and there is no way to know if the caller is native or not. So, the upper
// 32 bits of Vector argument on stack are always cleared to zero.
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void CodeGen::genClearStackVec3ArgUpperBits()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genClearStackVec3ArgUpperBits()\n");
}
#endif
assert(compiler->compGeneratingProlog);
unsigned varNum = 0;
for (unsigned varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(varDsc->lvIsParam);
// Does var has simd12 type?
if (varDsc->lvType != TYP_SIMD12)
{
continue;
}
if (!varDsc->lvIsRegArg)
{
// Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
GetEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
}
else
{
// Assume that for x64 linux, an argument is fully in registers
// or fully on stack.
regNumber argReg = varDsc->GetOtherArgReg();
// Clear the upper 32 bits by two shift instructions.
// argReg = argReg << 96
GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
// argReg = argReg >> 96
GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
}
}
}
#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#endif // FEATURE_PUT_STRUCT_ARG_STK
//
// genCodeForCpObj - Generate code for CpObj nodes to copy structs that have interleaved
// GC pointers.
//
// Arguments:
// cpObjNode - the GT_STORE_OBJ
//
// Notes:
// This will generate a sequence of movsp instructions for the cases of non-gc members.
// Note that movsp is an alias for movsd on x86 and movsq on x64.
// and calls to the BY_REF_ASSIGN helper otherwise.
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
{
// Make sure we got the arguments of the cpobj operation in the right registers
GenTree* dstAddr = cpObjNode->Addr();
GenTree* source = cpObjNode->Data();
GenTree* srcAddr = nullptr;
var_types srcAddrType = TYP_BYREF;
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
// If the GenTree node has data about GC pointers, this means we're dealing
// with CpObj, so this requires special logic.
assert(cpObjNode->GetLayout()->HasGCPtr());
// MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields
// and it needs src = RSI and dst = RDI.
// Either these registers must not contain lclVars, or they must be dying or marked for spill.
// This is because these registers are incremented as we go through the struct.
if (!source->IsLocal())
{
assert(source->gtOper == GT_IND);
srcAddr = source->gtGetOp1();
GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
unsigned srcLclVarNum = BAD_VAR_NUM;
unsigned dstLclVarNum = BAD_VAR_NUM;
bool isSrcAddrLiveOut = false;
bool isDstAddrLiveOut = false;
if (genIsRegCandidateLocal(actualSrcAddr))
{
srcLclVarNum = actualSrcAddr->AsLclVarCommon()->GetLclNum();
isSrcAddrLiveOut = ((actualSrcAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
if (genIsRegCandidateLocal(actualDstAddr))
{
dstLclVarNum = actualDstAddr->AsLclVarCommon()->GetLclNum();
isDstAddrLiveOut = ((actualDstAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
assert((actualSrcAddr->GetRegNum() != REG_RSI) || !isSrcAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isDstAddrLiveOut));
assert((actualDstAddr->GetRegNum() != REG_RDI) || !isDstAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isSrcAddrLiveOut));
srcAddrType = srcAddr->TypeGet();
}
#endif // DEBUG
// Consume the operands and get them into the right registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumeBlockOp(cpObjNode, REG_RDI, REG_RSI, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
unsigned slots = cpObjNode->GetLayout()->GetSlotCount();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
if (slots >= CPOBJ_NONGC_SLOTS_LIMIT)
{
// If the destination of the CpObj is on the stack, make sure we allocated
// RCX to emit the movsp (alias for movsd or movsq for 32 and 64 bits respectively).
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
instGen(INS_r_movsp);
}
else
{
// For small structs, it's better to emit a sequence of movsp than to
// emit a rep movsp instruction.
while (slots > 0)
{
instGen(INS_movsp);
slots--;
}
}
}
else
{
ClassLayout* layout = cpObjNode->GetLayout();
unsigned gcPtrCount = layout->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp instead of a sequence of movsp instructions
// to save cycles and code size.
unsigned nonGcSlotCount = 0;
do
{
nonGcSlotCount++;
i++;
} while ((i < slots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-gc region, it's better just to
// emit a sequence of movsp instructions
if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
{
while (nonGcSlotCount > 0)
{
instGen(INS_movsp);
nonGcSlotCount--;
}
}
else
{
// Otherwise, we can save code-size and improve CQ by emitting
// rep movsp (alias for movsd/movsq for x86/x64)
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
instGen(INS_r_movsp);
}
}
else
{
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
gcPtrCount--;
i++;
}
}
assert(gcPtrCount == 0);
}
// Clear the gcInfo for RSI and RDI.
// While we normally update GC info prior to the last instruction that uses them,
// these actually live into the helper call.
gcInfo.gcMarkRegSetNpt(RBM_RSI);
gcInfo.gcMarkRegSetNpt(RBM_RDI);
}
#ifdef TARGET_AMD64
//----------------------------------------------------------------------------------
// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
}
#endif // TARGET_AMD64
// generate code do a switch statement based on a table of ip-relative offsets
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
regNumber idxReg = treeNode->AsOp()->gtOp1->GetRegNum();
regNumber baseReg = treeNode->AsOp()->gtOp2->GetRegNum();
regNumber tmpReg = treeNode->GetSingleTempReg();
// load the ip-relative offset (which is relative to start of fgFirstBB)
GetEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
// add it to the absolute address of fgFirstBB
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
// jmp baseReg
GetEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
}
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabOffs;
unsigned jmpTabBase;
jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
jmpTabOffs = 0;
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
GetEmitter()->emitDataGenData(i, target);
};
GetEmitter()->emitDataGenEnd();
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
GetEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->GetRegNum(),
compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForLockAdd: Generate code for a GT_LOCKADD node
//
// Arguments:
// node - the GT_LOCKADD node
//
void CodeGen::genCodeForLockAdd(GenTreeOp* node)
{
assert(node->OperIs(GT_LOCKADD));
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtGetOp2();
emitAttr size = emitActualTypeSize(data->TypeGet());
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg() || data->isContainedIntOrIImmed());
assert((size == EA_4BYTE) || (size == EA_PTRSIZE));
genConsumeOperands(node);
instGen(INS_lock);
if (data->isContainedIntOrIImmed())
{
int imm = static_cast<int>(data->AsIntCon()->IconValue());
assert(imm == data->AsIntCon()->IconValue());
GetEmitter()->emitIns_I_AR(INS_add, size, imm, addr->GetRegNum(), 0);
}
else
{
GetEmitter()->emitIns_AR_R(INS_add, size, data->GetRegNum(), addr->GetRegNum(), 0);
}
}
//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD or GT_XCHG node.
//
// Arguments:
// node - the GT_XADD/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* node)
{
assert(node->OperIs(GT_XADD, GT_XCHG));
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtGetOp2();
emitAttr size = emitTypeSize(node->TypeGet());
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert((size == EA_4BYTE) || (size == EA_PTRSIZE));
genConsumeOperands(node);
// If the destination register is different from the data register then we need
// to first move the data to the target register. Make sure we don't overwrite
// the address, the register allocator should have taken care of this.
assert((node->GetRegNum() != addr->GetRegNum()) || (node->GetRegNum() == data->GetRegNum()));
GetEmitter()->emitIns_Mov(INS_mov, size, node->GetRegNum(), data->GetRegNum(), /* canSkip */ true);
instruction ins = node->OperIs(GT_XADD) ? INS_xadd : INS_xchg;
// XCHG has an implied lock prefix when the first operand is a memory operand.
if (ins != INS_xchg)
{
instGen(INS_lock);
}
GetEmitter()->emitIns_AR_R(ins, size, node->GetRegNum(), addr->GetRegNum(), 0);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* tree)
{
assert(tree->OperIs(GT_CMPXCHG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
GenTree* location = tree->gtOpLocation; // arg1
GenTree* value = tree->gtOpValue; // arg2
GenTree* comparand = tree->gtOpComparand; // arg3
assert(location->GetRegNum() != REG_NA && location->GetRegNum() != REG_RAX);
assert(value->GetRegNum() != REG_NA && value->GetRegNum() != REG_RAX);
genConsumeReg(location);
genConsumeReg(value);
genConsumeReg(comparand);
// comparand goes to RAX;
// Note that we must issue this move after the genConsumeRegs(), in case any of the above
// have a GT_COPY from RAX.
inst_Mov(comparand->TypeGet(), REG_RAX, comparand->GetRegNum(), /* canSkip */ true);
// location is Rm
instGen(INS_lock);
GetEmitter()->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->GetRegNum(), location->GetRegNum(), 0);
// Result is in RAX
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
genProduceReg(tree);
}
// generate code for BoundsCheck nodes
void CodeGen::genRangeCheck(GenTree* oper)
{
noway_assert(oper->OperIs(GT_BOUNDS_CHECK));
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
GenTree* arrIndex = bndsChk->GetIndex();
GenTree* arrLen = bndsChk->GetArrayLength();
GenTree * src1, *src2;
emitJumpKind jmpKind;
instruction cmpKind;
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
if (arrIndex->IsIntegralConst(0) && arrLen->isUsedFromReg())
{
// arrIndex is 0 and arrLen is in a reg. In this case
// we can generate
// test reg, reg
// since arrLen is non-negative
src1 = arrLen;
src2 = arrLen;
jmpKind = EJ_je;
cmpKind = INS_test;
}
else if (arrIndex->isContainedIntOrIImmed())
{
// arrIndex is a contained constant. In this case
// we will generate one of the following
// cmp [mem], immed (if arrLen is a memory op)
// cmp reg, immed (if arrLen is in a reg)
//
// That is arrLen cannot be a contained immed.
assert(!arrLen->isContainedIntOrIImmed());
src1 = arrLen;
src2 = arrIndex;
jmpKind = EJ_jbe;
cmpKind = INS_cmp;
}
else
{
// arrIndex could either be a contained memory op or a reg
// In this case we will generate one of the following
// cmp [mem], immed (if arrLen is a constant)
// cmp [mem], reg (if arrLen is in a reg)
// cmp reg, immed (if arrIndex is in a reg)
// cmp reg1, reg2 (if arrIndex is in reg1)
// cmp reg, [mem] (if arrLen is a memory op)
//
// That is only one of arrIndex or arrLen can be a memory op.
assert(!arrIndex->isUsedFromMemory() || !arrLen->isUsedFromMemory());
src1 = arrIndex;
src2 = arrLen;
jmpKind = EJ_jae;
cmpKind = INS_cmp;
}
var_types bndsChkType = src2->TypeGet();
#if DEBUG
// Bounds checks can only be 32 or 64 bit sized comparisons.
assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
#endif // DEBUG
GetEmitter()->emitInsBinary(cmpKind, emitTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
//---------------------------------------------------------------------
// genCodeForPhysReg - generate code for a GT_PHYSREG node
//
// Arguments
// tree - the GT_PHYSREG node
//
// Return value:
// None
//
void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
{
assert(tree->OperIs(GT_PHYSREG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
genTransferRegGCState(targetReg, tree->gtSrcReg);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genCodeForNullCheck - generate code for a GT_NULLCHECK node
//
// Arguments
// tree - the GT_NULLCHECK node
//
// Return value:
// None
//
void CodeGen::genCodeForNullCheck(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_NULLCHECK));
assert(tree->gtOp1->isUsedFromReg());
regNumber reg = genConsumeReg(tree->gtOp1);
GetEmitter()->emitIns_AR_R(INS_cmp, emitTypeSize(tree), reg, reg, 0);
}
//------------------------------------------------------------------------
// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
// producing the effective index by subtracting the lower bound.
//
// Arguments:
// arrIndex - the node for which we're generating code
//
// Return Value:
// None.
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
regNumber indexReg = genConsumeReg(indexNode);
regNumber tgtReg = arrIndex->GetRegNum();
unsigned dim = arrIndex->gtCurrDim;
unsigned rank = arrIndex->gtArrRank;
var_types elemType = arrIndex->gtArrElemType;
noway_assert(tgtReg != REG_NA);
// Subtract the lower bound for this dimension.
// TODO-XArch-CQ: make this contained if it's an immediate that fits.
inst_Mov(indexNode->TypeGet(), tgtReg, indexReg, /* canSkip */ true);
GetEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
compiler->eeGetMDArrayLowerBoundOffset(rank, dim));
GetEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
compiler->eeGetMDArrayLengthOffset(rank, dim));
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
//
// Arguments:
// arrOffset - the node for which we're generating code
//
// Return Value:
// None.
//
// Notes:
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTree* offsetNode = arrOffset->gtOffset;
GenTree* indexNode = arrOffset->gtIndex;
GenTree* arrObj = arrOffset->gtArrObj;
regNumber tgtReg = arrOffset->GetRegNum();
assert(tgtReg != REG_NA);
unsigned dim = arrOffset->gtCurrDim;
unsigned rank = arrOffset->gtArrRank;
var_types elemType = arrOffset->gtArrElemType;
// First, consume the operands in the correct order.
regNumber offsetReg = REG_NA;
regNumber tmpReg = REG_NA;
if (!offsetNode->IsIntegralConst(0))
{
offsetReg = genConsumeReg(offsetNode);
// We will use a temp register for the offset*scale+effectiveIndex computation.
tmpReg = arrOffset->GetSingleTempReg();
}
else
{
assert(offsetNode->isContained());
}
regNumber indexReg = genConsumeReg(indexNode);
// Although arrReg may not be used in the constant-index case, if we have generated
// the value into a register, we must consume it, otherwise we will fail to end the
// live range of the gc ptr.
// TODO-CQ: Currently arrObj will always have a register allocated to it.
// We could avoid allocating a register for it, which would be of value if the arrObj
// is an on-stack lclVar.
regNumber arrReg = REG_NA;
if (arrObj->gtHasReg())
{
arrReg = genConsumeReg(arrObj);
}
if (!offsetNode->IsIntegralConst(0))
{
assert(tmpReg != REG_NA);
assert(arrReg != REG_NA);
// Evaluate tgtReg = offsetReg*dim_size + indexReg.
// tmpReg is used to load dim_size and the result of the multiplication.
// Note that dim_size will never be negative.
GetEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
compiler->eeGetMDArrayLengthOffset(rank, dim));
inst_RV_RV(INS_imul, tmpReg, offsetReg);
if (tmpReg == tgtReg)
{
inst_RV_RV(INS_add, tmpReg, indexReg);
}
else
{
inst_Mov(TYP_I_IMPL, tgtReg, indexReg, /* canSkip */ true);
inst_RV_RV(INS_add, tgtReg, tmpReg);
}
}
else
{
inst_Mov(TYP_INT, tgtReg, indexReg, /* canSkip */ true);
}
genProduceReg(arrOffset);
}
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
// Operations on SIMD vectors shouldn't come this path
assert(!varTypeIsSIMD(type));
if (varTypeIsFloating(type))
{
return ins_MathOp(oper, type);
}
switch (oper)
{
case GT_ADD:
ins = INS_add;
break;
case GT_AND:
ins = INS_and;
break;
case GT_LSH:
ins = INS_shl;
break;
case GT_MUL:
ins = INS_imul;
break;
case GT_NEG:
ins = INS_neg;
break;
case GT_NOT:
ins = INS_not;
break;
case GT_OR:
ins = INS_or;
break;
case GT_ROL:
ins = INS_rol;
break;
case GT_ROR:
ins = INS_ror;
break;
case GT_RSH:
ins = INS_sar;
break;
case GT_RSZ:
ins = INS_shr;
break;
case GT_SUB:
ins = INS_sub;
break;
case GT_XOR:
ins = INS_xor;
break;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
ins = INS_add;
break;
case GT_ADD_HI:
ins = INS_adc;
break;
case GT_SUB_LO:
ins = INS_sub;
break;
case GT_SUB_HI:
ins = INS_sbb;
break;
case GT_LSH_HI:
ins = INS_shld;
break;
case GT_RSH_LO:
ins = INS_shrd;
break;
#endif // !defined(TARGET_64BIT)
default:
unreached();
break;
}
return ins;
}
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is either a contained constant or
// it's a register-allocated expression. If it is in a register that is
// not RCX, it will be moved to RCX (so RCX better not be in use!).
//
void CodeGen::genCodeForShift(GenTree* tree)
{
// Only the non-RMW case here.
assert(tree->OperIsShiftOrRotate());
assert(tree->AsOp()->gtOp1->isUsedFromReg());
assert(tree->GetRegNum() != REG_NA);
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
GenTree* operand = tree->gtGetOp1();
regNumber operandReg = operand->GetRegNum();
GenTree* shiftBy = tree->gtGetOp2();
if (shiftBy->isContainedIntOrIImmed())
{
emitAttr size = emitTypeSize(tree);
// Optimize "X<<1" to "lea [reg+reg]" or "add reg, reg"
if (tree->OperIs(GT_LSH) && !tree->gtOverflowEx() && !tree->gtSetFlags() && shiftBy->IsIntegralConst(1))
{
if (tree->GetRegNum() == operandReg)
{
GetEmitter()->emitIns_R_R(INS_add, size, tree->GetRegNum(), operandReg);
}
else
{
GetEmitter()->emitIns_R_ARX(INS_lea, size, tree->GetRegNum(), operandReg, operandReg, 1, 0);
}
}
else
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
#if defined(TARGET_64BIT)
// Try to emit rorx if BMI2 is available instead of mov+rol
// it makes sense only for 64bit integers
if ((genActualType(targetType) == TYP_LONG) && (tree->GetRegNum() != operandReg) &&
compiler->compOpportunisticallyDependsOn(InstructionSet_BMI2) && tree->OperIs(GT_ROL, GT_ROR) &&
(shiftByValue > 0) && (shiftByValue < 64))
{
const int value = tree->OperIs(GT_ROL) ? (64 - shiftByValue) : shiftByValue;
GetEmitter()->emitIns_R_R_I(INS_rorx, size, tree->GetRegNum(), operandReg, value);
genProduceReg(tree);
return;
}
#endif
// First, move the operand to the destination register and
// later on perform the shift in-place.
// (LSRA will try to avoid this situation through preferencing.)
inst_Mov(targetType, tree->GetRegNum(), operandReg, /* canSkip */ true);
inst_RV_SH(ins, size, tree->GetRegNum(), shiftByValue);
}
}
else
{
// We must have the number of bits to shift stored in ECX, since we constrained this node to
// sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
// register destination requirement.
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The operand to be shifted must not be in ECX
noway_assert(operandReg != REG_RCX);
inst_Mov(targetType, tree->GetRegNum(), operandReg, /* canSkip */ true);
inst_RV(ins, tree->GetRegNum(), targetType);
}
genProduceReg(tree);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genCodeForShiftLong: Generates the code sequence for a GenTree node that
// represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is a contained constant
//
// TODO-X86-CQ: This only handles the case where the operand being shifted is in a register. We don't
// need sourceHi to be always in reg in case of GT_LSH_HI (because it could be moved from memory to
// targetReg if sourceHi is a memory operand). Similarly for GT_RSH_LO, sourceLo could be marked as
// contained memory-op. Even if not a memory-op, we could mark it as reg-optional.
//
void CodeGen::genCodeForShiftLong(GenTree* tree)
{
// Only the non-RMW case here.
genTreeOps oper = tree->OperGet();
assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
GenTree* operand = tree->AsOp()->gtOp1;
assert(operand->OperGet() == GT_LONG);
assert(operand->AsOp()->gtOp1->isUsedFromReg());
assert(operand->AsOp()->gtOp2->isUsedFromReg());
GenTree* operandLo = operand->gtGetOp1();
GenTree* operandHi = operand->gtGetOp2();
regNumber regLo = operandLo->GetRegNum();
regNumber regHi = operandHi->GetRegNum();
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(oper, targetType);
GenTree* shiftBy = tree->gtGetOp2();
assert(shiftBy->isContainedIntOrIImmed());
unsigned int count = (unsigned int)shiftBy->AsIntConCommon()->IconValue();
regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
inst_Mov(targetType, tree->GetRegNum(), regResult, /* canSkip */ true);
if (oper == GT_LSH_HI)
{
inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->GetRegNum(), regLo, count);
}
else
{
assert(oper == GT_RSH_LO);
inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->GetRegNum(), regHi, count);
}
genProduceReg(tree);
}
#endif
//------------------------------------------------------------------------
// genMapShiftInsToShiftByConstantIns: Given a general shift/rotate instruction,
// map it to the specific x86/x64 shift opcode for a shift/rotate by a constant.
// X86/x64 has a special encoding for shift/rotate-by-constant-1.
//
// Arguments:
// ins: the base shift/rotate instruction
// shiftByValue: the constant value by which we are shifting/rotating
//
instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue)
{
assert(ins == INS_rcl || ins == INS_rcr || ins == INS_rol || ins == INS_ror || ins == INS_shl || ins == INS_shr ||
ins == INS_sar);
// Which format should we use?
instruction shiftByConstantIns;
if (shiftByValue == 1)
{
// Use the shift-by-one format.
assert(INS_rcl + 1 == INS_rcl_1);
assert(INS_rcr + 1 == INS_rcr_1);
assert(INS_rol + 1 == INS_rol_1);
assert(INS_ror + 1 == INS_ror_1);
assert(INS_shl + 1 == INS_shl_1);
assert(INS_shr + 1 == INS_shr_1);
assert(INS_sar + 1 == INS_sar_1);
shiftByConstantIns = (instruction)(ins + 1);
}
else
{
// Use the shift-by-NNN format.
assert(INS_rcl + 2 == INS_rcl_N);
assert(INS_rcr + 2 == INS_rcr_N);
assert(INS_rol + 2 == INS_rol_N);
assert(INS_ror + 2 == INS_ror_N);
assert(INS_shl + 2 == INS_shl_N);
assert(INS_shr + 2 == INS_shr_N);
assert(INS_sar + 2 == INS_sar_N);
shiftByConstantIns = (instruction)(ins + 2);
}
return shiftByConstantIns;
}
//------------------------------------------------------------------------
// genCodeForShiftRMW: Generates the code sequence for a GT_STOREIND GenTree node that
// represents a RMW bit shift or rotate operation (<<, >>, >>>, rol, ror), for example:
// GT_STOREIND( AddressTree, GT_SHL( Ind ( AddressTree ), Operand ) )
//
// Arguments:
// storeIndNode: the GT_STOREIND node.
//
void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
{
GenTree* data = storeInd->Data();
assert(data->OperIsShift() || data->OperIsRotate());
// This function only handles the RMW case.
assert(data->AsOp()->gtOp1->isUsedFromMemory());
assert(data->AsOp()->gtOp1->isIndir());
assert(Lowering::IndirsAreEquivalent(data->AsOp()->gtOp1, storeInd));
assert(data->GetRegNum() == REG_NA);
var_types targetType = data->TypeGet();
genTreeOps oper = data->OperGet();
instruction ins = genGetInsForOper(oper, targetType);
emitAttr attr = EA_ATTR(genTypeSize(targetType));
GenTree* shiftBy = data->AsOp()->gtOp2;
if (shiftBy->isContainedIntOrIImmed())
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
if (shiftByValue == 1)
{
// There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
else
{
GetEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
}
}
else
{
// We must have the number of bits to shift stored in ECX, since we constrained this node to
// sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
// register destination requirement.
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The shiftBy operand is implicit, so call the unary version of emitInsRMW.
GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
}
//------------------------------------------------------------------------
// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
//
// Arguments:
// tree - the node.
//
void CodeGen::genCodeForLclAddr(GenTree* tree)
{
assert(tree->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
// Address of a local var.
noway_assert((targetType == TYP_BYREF) || (targetType == TYP_I_IMPL));
emitAttr size = emitTypeSize(targetType);
inst_RV_TT(INS_lea, targetReg, tree, 0, size);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclFld: Produce code for a GT_LCL_FLD node.
//
// Arguments:
// tree - the GT_LCL_FLD node
//
void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_LCL_FLD));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
noway_assert(targetReg != REG_NA);
#ifdef FEATURE_SIMD
// Loading of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genLoadLclTypeSIMD12(tree);
return;
}
#endif
noway_assert(targetType != TYP_STRUCT);
emitAttr size = emitTypeSize(targetType);
unsigned offs = tree->GetLclOffs();
unsigned varNum = tree->GetLclNum();
assert(varNum < compiler->lvaCount);
GetEmitter()->emitIns_R_S(ins_Load(targetType), size, targetReg, varNum, offs);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclVar: Produce code for a GT_LCL_VAR node.
//
// Arguments:
// tree - the GT_LCL_VAR node
//
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
assert(tree->OperIs(GT_LCL_VAR));
// lcl_vars are not defs
assert((tree->gtFlags & GTF_VAR_DEF) == 0);
LclVarDsc* varDsc = compiler->lvaGetDesc(tree);
bool isRegCandidate = varDsc->lvIsRegCandidate();
// If this is a register candidate that has been spilled, genConsumeReg() will
// reload it at the point of use. Otherwise, if it's not in a register, we load it here.
if (!isRegCandidate && !tree->IsMultiReg() && !(tree->gtFlags & GTF_SPILLED))
{
#if defined(FEATURE_SIMD) && defined(TARGET_X86)
// Loading of TYP_SIMD12 (i.e. Vector3) variable
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadLclTypeSIMD12(tree);
return;
}
#endif // defined(FEATURE_SIMD) && defined(TARGET_X86)
var_types type = varDsc->GetRegisterType(tree);
GetEmitter()->emitIns_R_S(ins_Load(type, compiler->isSIMDTypeLocalAligned(tree->GetLclNum())),
emitTypeSize(type), tree->GetRegNum(), tree->GetLclNum(), 0);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
//
// Arguments:
// tree - the GT_STORE_LCL_FLD node
//
void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_STORE_LCL_FLD));
var_types targetType = tree->TypeGet();
GenTree* op1 = tree->gtGetOp1();
noway_assert(targetType != TYP_STRUCT);
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
assert(varTypeUsesFloatReg(targetType) == varTypeUsesFloatReg(op1));
assert(genTypeSize(genActualType(targetType)) == genTypeSize(genActualType(op1->TypeGet())));
genConsumeRegs(op1);
if (op1->OperIs(GT_BITCAST) && op1->isContained())
{
regNumber targetReg = tree->GetRegNum();
GenTree* bitCastSrc = op1->gtGetOp1();
var_types srcType = bitCastSrc->TypeGet();
noway_assert(!bitCastSrc->isContained());
if (targetReg == REG_NA)
{
unsigned lclNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
GetEmitter()->emitIns_S_R(ins_Store(srcType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), bitCastSrc->GetRegNum(), lclNum, tree->GetLclOffs());
varDsc->SetRegNum(REG_STK);
}
else
{
genBitCast(targetType, targetReg, srcType, bitCastSrc->GetRegNum());
}
}
else
{
GetEmitter()->emitInsBinary(ins_Store(targetType), emitTypeSize(tree), tree, op1);
}
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
}
//------------------------------------------------------------------------
// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
//
// Arguments:
// lclNode - the GT_STORE_LCL_VAR node
//
void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode)
{
assert(lclNode->OperIs(GT_STORE_LCL_VAR));
regNumber targetReg = lclNode->GetRegNum();
emitter* emit = GetEmitter();
GenTree* op1 = lclNode->gtGetOp1();
// Stores from a multi-reg source are handled separately.
if (op1->gtSkipReloadOrCopy()->IsMultiRegNode())
{
genMultiRegStoreToLocal(lclNode);
}
else
{
unsigned lclNum = lclNode->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
var_types targetType = varDsc->GetRegisterType(lclNode);
#ifdef DEBUG
var_types op1Type = op1->TypeGet();
if (op1Type == TYP_STRUCT)
{
assert(op1->IsLocal());
GenTreeLclVar* op1LclVar = op1->AsLclVar();
unsigned op1lclNum = op1LclVar->GetLclNum();
LclVarDsc* op1VarDsc = compiler->lvaGetDesc(op1lclNum);
op1Type = op1VarDsc->GetRegisterType(op1LclVar);
}
assert(varTypeUsesFloatReg(targetType) == varTypeUsesFloatReg(op1Type));
assert(!varTypeUsesFloatReg(targetType) || (emitTypeSize(targetType) == emitTypeSize(op1Type)));
#endif
#if !defined(TARGET_64BIT)
if (targetType == TYP_LONG)
{
genStoreLongLclVar(lclNode);
return;
}
#endif // !defined(TARGET_64BIT)
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(lclNode);
return;
}
#endif // FEATURE_SIMD
genConsumeRegs(op1);
if (op1->OperIs(GT_BITCAST) && op1->isContained())
{
GenTree* bitCastSrc = op1->gtGetOp1();
var_types srcType = bitCastSrc->TypeGet();
noway_assert(!bitCastSrc->isContained());
if (targetReg == REG_NA)
{
emit->emitIns_S_R(ins_Store(srcType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), bitCastSrc->GetRegNum(), lclNum, 0);
genUpdateLife(lclNode);
varDsc->SetRegNum(REG_STK);
}
else
{
genBitCast(targetType, targetReg, srcType, bitCastSrc->GetRegNum());
}
}
else if (targetReg == REG_NA)
{
// stack store
emit->emitInsStoreLcl(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), lclNode);
varDsc->SetRegNum(REG_STK);
}
else
{
// Look for the case where we have a constant zero which we've marked for reuse,
// but which isn't actually in the register we want. In that case, it's better to create
// zero in the target register, because an xor is smaller than a copy. Note that we could
// potentially handle this in the register allocator, but we can't always catch it there
// because the target may not have a register allocated for it yet.
if (op1->isUsedFromReg() && (op1->GetRegNum() != targetReg) && (op1->IsIntegralConst(0) || op1->IsFPZero()))
{
op1->SetRegNum(REG_NA);
op1->ResetReuseRegVal();
op1->SetContained();
}
if (!op1->isUsedFromReg())
{
// Currently, we assume that the non-reg source of a GT_STORE_LCL_VAR writing to a register
// must be a constant. However, in the future we might want to support an operand used from
// memory. This is a bit tricky because we have to decide it can be used from memory before
// register allocation,
// and this would be a case where, once that's done, we need to mark that node as always
// requiring a register - which we always assume now anyway, but once we "optimize" that
// we'll have to take cases like this into account.
assert((op1->GetRegNum() == REG_NA) && op1->OperIsConst());
genSetRegToConst(targetReg, targetType, op1);
}
else
{
assert(targetReg == lclNode->GetRegNum());
assert(op1->GetRegNum() != REG_NA);
inst_Mov_Extend(targetType, /* srcInReg */ true, targetReg, op1->GetRegNum(), /* canSkip */ true,
emitTypeSize(targetType));
}
}
if (targetReg != REG_NA)
{
genProduceReg(lclNode);
}
}
}
//------------------------------------------------------------------------
// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node.
//
// Arguments:
// tree - the GT_INDEX_ADDR node
//
void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
{
GenTree* const base = node->Arr();
GenTree* const index = node->Index();
const regNumber baseReg = genConsumeReg(base);
regNumber indexReg = genConsumeReg(index);
const regNumber dstReg = node->GetRegNum();
// NOTE: `genConsumeReg` marks the consumed register as not a GC pointer, as it assumes that the input registers
// die at the first instruction generated by the node. This is not the case for `INDEX_ADDR`, however, as the
// base register is multiply-used. As such, we need to mark the base register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(baseReg, base->TypeGet());
assert(varTypeIsIntegral(index->TypeGet()));
regNumber tmpReg = REG_NA;
#ifdef TARGET_64BIT
tmpReg = node->GetSingleTempReg();
#endif
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
#ifdef TARGET_64BIT
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case that the index
// is a native int on a 64-bit platform, we will need to widen the array length and then compare.
if (index->TypeGet() == TYP_I_IMPL)
{
GetEmitter()->emitIns_R_AR(INS_mov, EA_4BYTE, tmpReg, baseReg, static_cast<int>(node->gtLenOffset));
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, indexReg, tmpReg);
}
else
#endif // TARGET_64BIT
{
GetEmitter()->emitIns_R_AR(INS_cmp, EA_4BYTE, indexReg, baseReg, static_cast<int>(node->gtLenOffset));
}
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
#ifdef TARGET_64BIT
if (index->TypeGet() != TYP_I_IMPL)
{
// LEA needs 64-bit operands so we need to widen the index if it's TYP_INT.
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, tmpReg, indexReg, /* canSkip */ false);
indexReg = tmpReg;
}
#endif // TARGET_64BIT
// Compute the address of the array element.
unsigned scale = node->gtElemSize;
switch (scale)
{
case 1:
case 2:
case 4:
case 8:
tmpReg = indexReg;
break;
default:
#ifdef TARGET_64BIT
// IMUL treats its immediate operand as signed so scale can't be larger than INT32_MAX.
// The VM doesn't allow such large array elements but let's be sure.
noway_assert(scale <= INT32_MAX);
#else // !TARGET_64BIT
tmpReg = node->GetSingleTempReg();
#endif // !TARGET_64BIT
GetEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg,
static_cast<ssize_t>(scale));
scale = 1;
break;
}
GetEmitter()->emitIns_R_ARX(INS_lea, emitTypeSize(node->TypeGet()), dstReg, baseReg, tmpReg, scale,
static_cast<int>(node->gtElemOffset));
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForIndir: Produce code for a GT_IND node.
//
// Arguments:
// tree - the GT_IND node
//
void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
#ifdef FEATURE_SIMD
// Handling of Vector3 type values loaded through indirection.
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
var_types targetType = tree->TypeGet();
emitter* emit = GetEmitter();
GenTree* addr = tree->Addr();
if (addr->IsCnsIntOrI() && addr->IsIconHandle(GTF_ICON_TLS_HDL))
{
noway_assert(EA_ATTR(genTypeSize(targetType)) == EA_PTRSIZE);
emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tree->GetRegNum(), FLD_GLOBAL_FS,
(int)addr->AsIntCon()->gtIconVal);
}
else
{
genConsumeAddress(addr);
emit->emitInsLoadInd(ins_Load(targetType), emitTypeSize(tree), tree->GetRegNum(), tree);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForStoreInd: Produce code for a GT_STOREIND node.
//
// Arguments:
// tree - the GT_STOREIND node
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
assert(tree->OperIs(GT_STOREIND));
#ifdef FEATURE_SIMD
// Storing Vector3 of size 12 bytes through indirection
if (tree->TypeGet() == TYP_SIMD12)
{
genStoreIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
GenTree* data = tree->Data();
GenTree* addr = tree->Addr();
var_types targetType = tree->TypeGet();
assert(!varTypeIsFloating(targetType) || (genTypeSize(targetType) == genTypeSize(data->TypeGet())));
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
// data and addr must be in registers.
// Consume both registers so that any copies of interfering registers are taken care of.
genConsumeOperands(tree);
if (genEmitOptimizedGCWriteBarrier(writeBarrierForm, addr, data))
{
return;
}
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_ARG_0, as that is where 'addr' must go.
noway_assert(data->GetRegNum() != REG_ARG_0);
// addr goes in REG_ARG_0
genCopyRegIfNeeded(addr, REG_ARG_0);
// data goes in REG_ARG_1
genCopyRegIfNeeded(data, REG_ARG_1);
genGCWriteBarrier(tree, writeBarrierForm);
}
else
{
bool dataIsUnary = false;
bool isRMWMemoryOp = tree->IsRMWMemoryOp();
GenTree* rmwSrc = nullptr;
// We must consume the operands in the proper execution order, so that liveness is
// updated appropriately.
genConsumeAddress(addr);
// If tree represents a RMW memory op then its data is a non-leaf node marked as contained
// and non-indir operand of data is the source of RMW memory op.
if (isRMWMemoryOp)
{
assert(data->isContained() && !data->OperIsLeaf());
GenTree* rmwDst = nullptr;
dataIsUnary = (GenTree::OperIsUnary(data->OperGet()) != 0);
if (!dataIsUnary)
{
if (tree->IsRMWDstOp1())
{
rmwDst = data->gtGetOp1();
rmwSrc = data->gtGetOp2();
}
else
{
assert(tree->IsRMWDstOp2());
rmwDst = data->gtGetOp2();
rmwSrc = data->gtGetOp1();
}
genConsumeRegs(rmwSrc);
}
else
{
// *(p) = oper *(p): Here addr = p, rmwsrc=rmwDst = *(p) i.e. GT_IND(p)
// For unary RMW ops, src and dst of RMW memory op is the same. Lower
// clears operand counts on rmwSrc and we don't need to perform a
// genConsumeReg() on it.
assert(tree->IsRMWDstOp1());
rmwSrc = data->gtGetOp1();
rmwDst = data->gtGetOp1();
assert(rmwSrc->isUsedFromMemory());
}
assert(rmwSrc != nullptr);
assert(rmwDst != nullptr);
assert(Lowering::IndirsAreEquivalent(rmwDst, tree));
}
else
{
genConsumeRegs(data);
}
if (isRMWMemoryOp)
{
if (dataIsUnary)
{
// generate code for unary RMW memory ops like neg/not
GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree), tree);
}
else
{
if (data->OperIsShiftOrRotate())
{
// Generate code for shift RMW memory ops.
// The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] =
// <amount> <shift> [addr]).
assert(tree->IsRMWDstOp1());
assert(rmwSrc == data->gtGetOp2());
genCodeForShiftRMW(tree);
}
else if (data->OperGet() == GT_ADD && (rmwSrc->IsIntegralConst(1) || rmwSrc->IsIntegralConst(-1)))
{
// Generate "inc/dec [mem]" instead of "add/sub [mem], 1".
//
// Notes:
// 1) Global morph transforms GT_SUB(x, +/-1) into GT_ADD(x, -/+1).
// 2) TODO-AMD64: Debugger routine NativeWalker::Decode() runs into
// an assert while decoding ModR/M byte of "inc dword ptr [rax]".
// It is not clear whether Decode() can handle all possible
// addr modes with inc/dec. For this reason, inc/dec [mem]
// is not generated while generating debuggable code. Update
// the above if condition once Decode() routine is fixed.
assert(rmwSrc->isContainedIntOrIImmed());
instruction ins = rmwSrc->IsIntegralConst(1) ? INS_inc : INS_dec;
GetEmitter()->emitInsRMW(ins, emitTypeSize(tree), tree);
}
else
{
// generate code for remaining binary RMW memory ops like add/sub/and/or/xor
GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree),
tree, rmwSrc);
}
}
}
else
{
GetEmitter()->emitInsStoreInd(ins_Store(data->TypeGet()), emitTypeSize(tree), tree);
}
}
}
//------------------------------------------------------------------------
// genCodeForSwap: Produce code for a GT_SWAP node.
//
// Arguments:
// tree - the GT_SWAP node
//
void CodeGen::genCodeForSwap(GenTreeOp* tree)
{
assert(tree->OperIs(GT_SWAP));
// Swap is only supported for lclVar operands that are enregistered
// We do not consume or produce any registers. Both operands remain enregistered.
// However, the gc-ness may change.
assert(genIsRegCandidateLocal(tree->gtOp1) && genIsRegCandidateLocal(tree->gtOp2));
GenTreeLclVarCommon* lcl1 = tree->gtOp1->AsLclVarCommon();
LclVarDsc* varDsc1 = compiler->lvaGetDesc(lcl1);
var_types type1 = varDsc1->TypeGet();
GenTreeLclVarCommon* lcl2 = tree->gtOp2->AsLclVarCommon();
LclVarDsc* varDsc2 = compiler->lvaGetDesc(lcl2);
var_types type2 = varDsc2->TypeGet();
// We must have both int or both fp regs
assert(!varTypeUsesFloatReg(type1) || varTypeUsesFloatReg(type2));
// FP swap is not yet implemented (and should have NYI'd in LSRA)
assert(!varTypeUsesFloatReg(type1));
regNumber oldOp1Reg = lcl1->GetRegNum();
regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
regNumber oldOp2Reg = lcl2->GetRegNum();
regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
// We don't call genUpdateVarReg because we don't have a tree node with the new register.
varDsc1->SetRegNum(oldOp2Reg);
varDsc2->SetRegNum(oldOp1Reg);
// Do the xchg
emitAttr size = EA_PTRSIZE;
if (varTypeGCtype(type1) != varTypeGCtype(type2))
{
// If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
// Otherwise it will leave them alone, which is correct if they have the same GC-ness.
size = EA_GCREF;
}
inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
// Update the gcInfo.
// Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
// gcMarkRegPtrVal will do the appropriate thing for non-gc types.
// It will also dump the updates.
gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
}
//------------------------------------------------------------------------
// genEmitOptimizedGCWriteBarrier: Generate write barrier store using the optimized
// helper functions.
//
// Arguments:
// writeBarrierForm - the write barrier form to use
// addr - the address at which to do the store
// data - the data to store
//
// Return Value:
// true if an optimized write barrier form was used, false if not. If this
// function returns false, the caller must emit a "standard" write barrier.
bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data)
{
assert(writeBarrierForm != GCInfo::WBF_NoBarrier);
#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS
if (!genUseOptimizedWriteBarriers(writeBarrierForm))
{
return false;
}
const static int regToHelper[2][8] = {
// If the target is known to be in managed memory
{
CORINFO_HELP_ASSIGN_REF_EAX, // EAX
CORINFO_HELP_ASSIGN_REF_ECX, // ECX
-1, // EDX (always the target address)
CORINFO_HELP_ASSIGN_REF_EBX, // EBX
-1, // ESP
CORINFO_HELP_ASSIGN_REF_EBP, // EBP
CORINFO_HELP_ASSIGN_REF_ESI, // ESI
CORINFO_HELP_ASSIGN_REF_EDI, // EDI
},
// Don't know if the target is in managed memory
{
CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, // EAX
CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, // ECX
-1, // EDX (always the target address)
CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, // EBX
-1, // ESP
CORINFO_HELP_CHECKED_ASSIGN_REF_EBP, // EBP
CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, // ESI
CORINFO_HELP_CHECKED_ASSIGN_REF_EDI, // EDI
},
};
noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
noway_assert(regToHelper[0][REG_ESP] == -1);
noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
noway_assert(regToHelper[1][REG_ESP] == -1);
noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
regNumber reg = data->GetRegNum();
noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
// Generate the following code:
// lea edx, addr
// call write_barrier_helper_reg
// addr goes in REG_ARG_0
genCopyRegIfNeeded(addr, REG_WRITE_BARRIER);
unsigned tgtAnywhere = 0;
if (writeBarrierForm != GCInfo::WBF_BarrierUnchecked)
{
tgtAnywhere = 1;
}
// We might want to call a modified version of genGCWriteBarrier() to get the benefit of
// the FEATURE_COUNT_GC_WRITE_BARRIERS code there, but that code doesn't look like it works
// with rationalized RyuJIT IR. So, for now, just emit the helper call directly here.
genEmitHelperCall(regToHelper[tgtAnywhere][reg],
0, // argSize
EA_PTRSIZE); // retSize
return true;
#else // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS
return false;
#endif // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS
}
// Produce code for a GT_CALL node
void CodeGen::genCall(GenTreeCall* call)
{
genAlignStackBeforeCall(call);
// all virtuals should have been expanded into a control expression
assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
// Insert a GS check if necessary
if (call->IsTailCallViaJitHelper())
{
if (compiler->getNeedsGSSecurityCookie())
{
#if FEATURE_FIXED_OUT_ARGS
// If either of the conditions below is true, we will need a temporary register in order to perform the GS
// cookie check. When FEATURE_FIXED_OUT_ARGS is disabled, we save and restore the temporary register using
// push/pop. When FEATURE_FIXED_OUT_ARGS is enabled, however, we need an alternative solution. For now,
// though, the tail prefix is ignored on all platforms that use fixed out args, so we should never hit this
// case.
assert(compiler->gsGlobalSecurityCookieAddr == nullptr);
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
#endif
genEmitGSCookieCheck(true);
}
}
// Consume all the arg regs
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* argNode = use.GetNode();
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
assert(curArgTabEntry);
if (curArgTabEntry->GetRegNum() == REG_STK)
{
continue;
}
#ifdef UNIX_AMD64_ABI
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
unsigned regIndex = 0;
for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses())
{
GenTree* putArgRegNode = use.GetNode();
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
regNumber argReg = curArgTabEntry->GetRegNum(regIndex++);
genConsumeReg(putArgRegNode);
// Validate the putArgRegNode has the right type.
assert(varTypeUsesFloatReg(putArgRegNode->TypeGet()) == genIsValidFloatReg(argReg));
inst_Mov_Extend(putArgRegNode->TypeGet(), /* srcInReg */ false, argReg, putArgRegNode->GetRegNum(),
/* canSkip */ true, emitActualTypeSize(TYP_I_IMPL));
}
}
else
#endif // UNIX_AMD64_ABI
{
regNumber argReg = curArgTabEntry->GetRegNum();
genConsumeReg(argNode);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ false, argReg, argNode->GetRegNum(), /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
// In the case of a varargs call,
// the ABI dictates that if we have floating point args,
// we must pass the enregistered arguments in both the
// integer and floating point registers so, let's do that.
if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode))
{
regNumber srcReg = argNode->GetRegNum();
regNumber targetReg = compiler->getCallArgIntRegister(argNode->GetRegNum());
inst_Mov(TYP_LONG, targetReg, srcReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL));
}
}
#if defined(TARGET_X86) || defined(UNIX_AMD64_ABI)
// The call will pop its arguments.
// for each putarg_stk:
target_ssize_t stackArgBytes = 0;
for (GenTreeCall::Use& use : call->Args())
{
GenTree* arg = use.GetNode();
if (arg->OperIs(GT_PUTARG_STK) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
GenTree* source = arg->AsPutArgStk()->gtGetOp1();
unsigned size = arg->AsPutArgStk()->GetStackByteSize();
stackArgBytes += size;
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
assert(curArgTabEntry != nullptr);
assert(size == (curArgTabEntry->numSlots * TARGET_POINTER_SIZE));
#ifdef FEATURE_PUT_STRUCT_ARG_STK
if (!source->OperIs(GT_FIELD_LIST) && (source->TypeGet() == TYP_STRUCT))
{
GenTreeObj* obj = source->AsObj();
unsigned argBytes = roundUp(obj->GetLayout()->GetSize(), TARGET_POINTER_SIZE);
#ifdef TARGET_X86
// If we have an OBJ, we must have created a copy if the original arg was not a
// local and was not a multiple of TARGET_POINTER_SIZE.
// Note that on x64/ux this will be handled by unrolling in genStructPutArgUnroll.
assert((argBytes == obj->GetLayout()->GetSize()) || obj->Addr()->IsLocalAddrExpr());
#endif // TARGET_X86
assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes);
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
#endif // DEBUG
}
}
#endif // defined(TARGET_X86) || defined(UNIX_AMD64_ABI)
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
GetEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
}
// If fast tail call, then we are done here, we just have to load the call
// target into the right registers. We ensure in RA that the registers used
// for the target (e.g. contained indir) are loaded into volatile registers
// that won't be restored by epilog sequence.
if (call->IsFastTailCall())
{
GenTree* target = getCallTarget(call, nullptr);
if (target != nullptr)
{
if (target->isContainedIndir())
{
genConsumeAddress(target->AsIndir()->Addr());
}
else
{
assert(!target->isContained());
genConsumeReg(target);
}
}
return;
}
// For a pinvoke to unmanged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
if (compiler->killGCRefs(call))
{
genDefineTempLabel(genCreateTempLabel());
}
#if defined(DEBUG) && defined(TARGET_X86)
// Store the stack pointer so we can check it after the call.
if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC)
{
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
#endif // defined(DEBUG) && defined(TARGET_X86)
// When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
// if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
// transition penalty, assuming the user function contains legacy SSE instruction.
// To limit code size increase impact: we only issue VZEROUPPER before PInvoke call, not issue
// VZEROUPPER after PInvoke call because transition penalty from legacy SSE to AVX only happens
// when there's preceding 256-bit AVX to legacy SSE transition penalty.
if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && GetEmitter()->Contains256bitAVX())
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
}
genCallInstruction(call X86_ARG(stackArgBytes));
// for pinvoke/intrinsic/tailcalls we may have needed to get the address of
// a label. In case it is indirect with CFG enabled make sure we do not get
// the address after the validation but only after the actual call that
// comes after.
if (genPendingCallLabel && !call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
genDefineInlineTempLabel(genPendingCallLabel);
genPendingCallLabel = nullptr;
}
#ifdef DEBUG
// We should not have GC pointers in killed registers live around the call.
// GC info for arg registers were cleared when consuming arg nodes above
// and LSRA should ensure it for other trashed registers.
regMaskTP killMask = RBM_CALLEE_TRASH;
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
assert((gcInfo.gcRegGCrefSetCur & killMask) == 0);
assert((gcInfo.gcRegByrefSetCur & killMask) == 0);
#endif
var_types returnType = call->TypeGet();
if (returnType != TYP_VOID)
{
#ifdef TARGET_X86
if (varTypeIsFloating(returnType))
{
// Spill the value from the fp stack.
// Then, load it into the target register.
call->gtFlags |= GTF_SPILL;
regSet.rsSpillFPStack(call);
call->gtFlags |= GTF_SPILLED;
call->gtFlags &= ~GTF_SPILL;
}
else
#endif // TARGET_X86
{
regNumber returnReg;
if (call->HasMultiRegRetVal())
{
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
assert(retTypeDesc != nullptr);
const unsigned regCount = retTypeDesc->GetReturnRegCount();
// If regs allocated to call node are different from ABI return
// regs in which the call has returned its result, move the result
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
var_types regType = retTypeDesc->GetReturnRegType(i);
returnReg = retTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
inst_Mov(regType, allocatedReg, returnReg, /* canSkip */ true);
}
#ifdef FEATURE_SIMD
// A Vector3 return value is stored in xmm0 and xmm1.
// RyuJIT assumes that the upper unused bits of xmm1 are cleared but
// the native compiler doesn't guarantee it.
if (call->IsUnmanaged() && (returnType == TYP_SIMD12))
{
returnReg = retTypeDesc->GetABIReturnReg(1);
// Clear the upper 32 bits by two shift instructions.
// retReg = retReg << 96
// retReg = retReg >> 96
GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
}
#endif // FEATURE_SIMD
}
else
{
#ifdef TARGET_X86
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
// The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
// TCB in REG_PINVOKE_TCB. AMD64/ARM64 use the standard calling convention. fgMorphCall() sets the
// correct argument registers.
returnReg = REG_PINVOKE_TCB;
}
else
#endif // TARGET_X86
if (varTypeIsFloating(returnType))
{
returnReg = REG_FLOATRET;
}
else
{
returnReg = REG_INTRET;
}
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ true);
}
genProduceReg(call);
}
}
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
if ((call->gtNext == nullptr) && compiler->opts.OptimizationEnabled())
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
#if defined(DEBUG) && defined(TARGET_X86)
if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC)
{
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvOnFrame);
if (!call->CallerPop() && (stackArgBytes != 0))
{
// ECX is trashed, so can be used to compute the expected SP. We saved the value of SP
// after pushing all the stack arguments, but the caller popped the arguments, so we need
// to do some math to figure a good comparison.
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, REG_ARG_0, REG_SPBASE, /* canSkip */ false);
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_ARG_0, stackArgBytes);
GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_ARG_0, compiler->lvaCallSpCheck, 0);
}
else
{
GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
BasicBlock* sp_check = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_je, sp_check);
instGen(INS_BREAKPOINT);
genDefineTempLabel(sp_check);
}
#endif // defined(DEBUG) && defined(TARGET_X86)
#if !defined(FEATURE_EH_FUNCLETS)
//-------------------------------------------------------------------------
// Create a label for tracking of region protected by the monitor in synchronized methods.
// This needs to be here, rather than above where fPossibleSyncHelperCall is set,
// so the GC state vars have been updated before creating the label.
if ((call->gtCallType == CT_HELPER) && (compiler->info.compFlags & CORINFO_FLG_SYNCH))
{
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
switch (helperNum)
{
case CORINFO_HELP_MON_ENTER:
case CORINFO_HELP_MON_ENTER_STATIC:
noway_assert(compiler->syncStartEmitCookie == NULL);
compiler->syncStartEmitCookie =
GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncStartEmitCookie != NULL);
break;
case CORINFO_HELP_MON_EXIT:
case CORINFO_HELP_MON_EXIT_STATIC:
noway_assert(compiler->syncEndEmitCookie == NULL);
compiler->syncEndEmitCookie =
GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncEndEmitCookie != NULL);
break;
default:
break;
}
}
#endif // !FEATURE_EH_FUNCLETS
unsigned stackAdjustBias = 0;
#if defined(TARGET_X86)
// Is the caller supposed to pop the arguments?
if (call->CallerPop() && (stackArgBytes != 0))
{
stackAdjustBias = stackArgBytes;
}
SubtractStackLevel(stackArgBytes);
#endif // TARGET_X86
genRemoveAlignmentAfterCall(call, stackAdjustBias);
}
//------------------------------------------------------------------------
// genCallInstruction - Generate instructions necessary to transfer control to the call.
//
// Arguments:
// call - the GT_CALL node
//
// Remaks:
// For tailcalls this function will generate a jump.
//
void CodeGen::genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes))
{
#if defined(TARGET_X86)
// If the callee pops the arguments, we pass a positive value as the argSize, and the emitter will
// adjust its stack level accordingly.
// If the caller needs to explicitly pop its arguments, we must pass a negative value, and then do the
// pop when we're done.
target_ssize_t argSizeForEmitter = stackArgBytes;
if (call->CallerPop())
{
argSizeForEmitter = -stackArgBytes;
}
#endif // defined(TARGET_X86)
// Determine return value size(s).
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
emitAttr retSize = EA_PTRSIZE;
emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(retTypeDesc->GetReturnRegType(1));
}
else
{
assert(!varTypeIsStruct(call));
if (call->gtType == TYP_REF)
{
retSize = EA_GCREF;
}
else if (call->gtType == TYP_BYREF)
{
retSize = EA_BYREF;
}
}
// We need to propagate the IL offset information to the call instruction, so we can emit
// an IL to native mapping record for the call, to support managed return value debugging.
// We don't want tail call helper calls that were converted from normal calls to get a record,
// so we skip this hash table lookup logic in that case.
DebugInfo di;
if (compiler->opts.compDbgInfo && compiler->genCallSite2DebugInfoMap != nullptr && !call->IsTailCall())
{
(void)compiler->genCallSite2DebugInfoMap->Lookup(call, &di);
}
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
// native call sites with the signatures they were generated from.
if (call->gtCallType != CT_HELPER)
{
sigInfo = call->callSig;
}
#endif // DEBUG
CORINFO_METHOD_HANDLE methHnd;
GenTree* target = getCallTarget(call, &methHnd);
if (target != nullptr)
{
#ifdef TARGET_X86
if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT))
{
// On x86, we need to generate a very specific pattern for indirect VSD calls:
//
// 3-byte nop
// call dword ptr [eax]
//
// Where EAX is also used as an argument to the stub dispatch helper. Make
// sure that the call target address is computed into EAX in this case.
assert(compiler->virtualStubParamInfo->GetReg() == REG_VIRTUAL_STUB_TARGET);
assert(target->isContainedIndir());
assert(target->OperGet() == GT_IND);
GenTree* addr = target->AsIndir()->Addr();
assert(addr->isUsedFromReg());
genConsumeReg(addr);
genCopyRegIfNeeded(addr, REG_VIRTUAL_STUB_TARGET);
GetEmitter()->emitIns_Nop(3);
// clang-format off
GetEmitter()->emitIns_Call(emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
argSizeForEmitter,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, REG_VIRTUAL_STUB_TARGET, REG_NA, 1, 0);
// clang-format on
}
else
#endif
if (target->isContainedIndir())
{
// When CFG is enabled we should not be emitting any non-register indirect calls.
assert(!compiler->opts.IsCFGEnabled() ||
call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL) ||
call->IsHelperCall(compiler, CORINFO_HELP_DISPATCH_INDIRECT_CALL));
if (target->AsIndir()->HasBase() && target->AsIndir()->Base()->isContainedIntOrIImmed())
{
// Note that if gtControlExpr is an indir of an absolute address, we mark it as
// contained only if it can be encoded as PC-relative offset.
assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
(void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
else
{
// For fast tailcalls this is happening in epilog, so we should
// have already consumed target in genCall.
if (!call->IsFastTailCall())
{
genConsumeAddress(target->AsIndir()->Addr());
}
// clang-format off
genEmitCallIndir(emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
target->AsIndir()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
call->IsFastTailCall());
// clang-format on
}
}
else
{
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
assert(genIsValidIntReg(target->GetRegNum()));
// For fast tailcalls this is happening in epilog, so we should
// have already consumed target in genCall.
if (!call->IsFastTailCall())
{
genConsumeReg(target);
}
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr // addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
target->GetRegNum(),
call->IsFastTailCall());
// clang-format on
}
}
else
{
// If we have no target and this is a call with indirection cell
// then emit call through that indir cell. This means we generate e.g.
// lea r11, [addr of cell]
// call [r11]
// which is more efficent than
// lea r11, [addr of cell]
// call [addr of cell]
regNumber indirCellReg = getCallIndirectionCellReg(call);
if (indirCellReg != REG_NA)
{
// clang-format off
GetEmitter()->emitIns_Call(
emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
0,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, indirCellReg, REG_NA, 0, 0,
call->IsFastTailCall());
// clang-format on
}
#ifdef FEATURE_READYTORUN
else if (call->gtEntryPoint.addr != nullptr)
{
emitter::EmitCallType type =
(call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN : emitter::EC_FUNC_TOKEN_INDIR;
// clang-format off
genEmitCall(type,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
(void*)call->gtEntryPoint.addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
#endif
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(call->gtCallType == CT_HELPER || call->gtCallType == CT_USER_FUNC);
void* addr = nullptr;
if (call->gtCallType == CT_HELPER)
{
// Direct call to a helper method.
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
void* pAddr = nullptr;
addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
assert(pAddr == nullptr);
}
else
{
// Direct call to a non-virtual user function.
addr = call->gtDirectCallAddress;
}
assert(addr != nullptr);
// Non-virtual direct calls to known addresses
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
}
}
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
void CodeGen::genJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
LclVarDsc* varDsc;
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
// We are not strictly required to spill reg args that are not in the desired reg for a jmp call
// But that would require us to deal with circularity while moving values around. Spilling
// to stack makes the implementation simple, which is not a bad trade off given Jmp calls
// are not frequent.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg && (varDsc->GetRegNum() != REG_STK))
{
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
// If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->GetRegNum() == varDsc->GetArgReg()))
{
continue;
}
}
else if (varDsc->GetRegNum() == REG_STK)
{
// Skip args which are currently living in stack.
continue;
}
// If we came here it means either a reg argument not in the right register or
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->GetRegNum() != REG_STK);
assert(!varDsc->lvIsStructField || (compiler->lvaGetDesc(varDsc->lvParentLcl)->lvFieldCnt == 1));
var_types storeType = varDsc->GetActualRegisterType(); // We own the memory and can use the full move.
GetEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), varDsc->GetRegNum(), varNum, 0);
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be expecting it.
// Therefore manually update life of varDsc->GetRegNum().
regMaskTP tempMask = varDsc->lvRegMask();
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
if (compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
}
#endif // DEBUG
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif
// Next move any un-enregistered register arguments back to their register.
regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
if (!varDsc->lvIsRegArg)
{
continue;
}
#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
assert(structDesc.passedInRegisters);
unsigned __int8 offset0 = 0;
unsigned __int8 offset1 = 0;
var_types type0 = TYP_UNKNOWN;
var_types type1 = TYP_UNKNOWN;
// Get the eightbyte data
compiler->GetStructTypeOffset(structDesc, &type0, &type1, &offset0, &offset1);
// Move the values into the right registers.
//
// Update varDsc->GetArgReg() and lvOtherArgReg life and GC Info to indicate varDsc stack slot is dead and
// argReg is going live. Note that we cannot modify varDsc->GetRegNum() and lvOtherArgReg here
// because another basic block may not be expecting it.
// Therefore manually update life of argReg. Note that GT_JMP marks
// the end of the basic block and after which reg life and gc info will be recomputed for the new block in
// genCodeForBBList().
if (type0 != TYP_UNKNOWN)
{
GetEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->GetArgReg(), varNum, offset0);
regSet.SetMaskVars(regSet.GetMaskVars() | genRegMask(varDsc->GetArgReg()));
gcInfo.gcMarkRegPtrVal(varDsc->GetArgReg(), type0);
}
if (type1 != TYP_UNKNOWN)
{
GetEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->GetOtherArgReg(), varNum,
offset1);
regSet.SetMaskVars(regSet.GetMaskVars() | genRegMask(varDsc->GetOtherArgReg()));
gcInfo.gcMarkRegPtrVal(varDsc->GetOtherArgReg(), type1);
}
if (varDsc->lvTracked)
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
else
#endif // !defined(UNIX_AMD64_ABI)
{
// Register argument
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
noway_assert(
isRegParamType(genActualType(varDsc->TypeGet())) ||
(varTypeIsStruct(varDsc->TypeGet()) && compiler->isTrivialPointerSizedStruct(varDsc->GetStructHnd())));
#else
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
#endif // TARGET_X86
// Is register argument already in the right register?
// If not load it from its stack location.
var_types loadType = varDsc->GetRegisterType();
#ifdef TARGET_X86
if (varTypeIsStruct(varDsc->TypeGet()))
{
// Treat trivial pointer-sized structs as a pointer sized primitive
// for the purposes of registers.
loadType = TYP_I_IMPL;
}
#endif
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varDsc->GetRegNum() != argReg)
{
assert(genIsValidReg(argReg));
GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be
// expecting it. Therefore manually update life of argReg. Note that GT_JMP marks the end of the
// basic block and after which reg life and gc info will be recomputed for the new block in
// genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming dead\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing dead\n", varNum);
}
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
#if defined(TARGET_AMD64)
// In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg
// register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to
// be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point
// values on the stack.
if (compFeatureVarArg() && compiler->info.compIsVarArgs)
{
regNumber intArgReg;
var_types loadType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varTypeIsFloating(loadType))
{
intArgReg = compiler->getCallArgIntRegister(argReg);
inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType));
}
else
{
intArgReg = argReg;
}
fixedIntArgMask |= genRegMask(intArgReg);
if (intArgReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
}
}
#endif // TARGET_AMD64
}
#if defined(TARGET_AMD64)
// Jmp call to a vararg method - if the method has fewer than 4 fixed arguments,
// load the remaining arg registers (both int and float) from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
// of caller passing float/double args both in int and float arg regs.
//
// This doesn't apply to x86, which doesn't pass floating point values in floating
// point registers.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
// remaining arg registers from shadow stack slots as non-gc interruptible.
if (compFeatureVarArg() && fixedIntArgMask != RBM_NONE)
{
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
GetEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
// also load it in corresponding float arg reg
regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
inst_Mov(TYP_DOUBLE, floatReg, argReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL));
}
argOffset += REGSIZE_BYTES;
}
GetEmitter()->emitEnableGC();
}
}
#endif // TARGET_AMD64
}
// produce code for a GT_LEA subnode
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
emitAttr size = emitTypeSize(lea);
genConsumeOperands(lea);
if (lea->Base() && lea->Index())
{
regNumber baseReg = lea->Base()->GetRegNum();
regNumber indexReg = lea->Index()->GetRegNum();
GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), baseReg, indexReg, lea->gtScale, lea->Offset());
}
else if (lea->Base())
{
GetEmitter()->emitIns_R_AR(INS_lea, size, lea->GetRegNum(), lea->Base()->GetRegNum(), lea->Offset());
}
else if (lea->Index())
{
GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), REG_NA, lea->Index()->GetRegNum(), lea->gtScale,
lea->Offset());
}
genProduceReg(lea);
}
//------------------------------------------------------------------------
// genCompareFloat: Generate code for comparing two floating point values
//
// Arguments:
// treeNode - the compare tree
//
void CodeGen::genCompareFloat(GenTree* treeNode)
{
assert(treeNode->OperIsCompare());
GenTreeOp* tree = treeNode->AsOp();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
genConsumeOperands(tree);
assert(varTypeIsFloating(op1Type));
assert(op1Type == op2Type);
regNumber targetReg = treeNode->GetRegNum();
instruction ins;
emitAttr cmpAttr;
GenCondition condition = GenCondition::FromFloatRelop(treeNode);
if (condition.PreferSwap())
{
condition = GenCondition::Swap(condition);
std::swap(op1, op2);
}
ins = (op1Type == TYP_FLOAT) ? INS_ucomiss : INS_ucomisd;
cmpAttr = emitTypeSize(op1Type);
GetEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
if ((condition.GetCode() == GenCondition::FNEU) && (op1->GetRegNum() == op2->GetRegNum()))
{
// For floating point, `x != x` is a common way of
// checking for NaN. So, in the case where both
// operands are the same, we can optimize codegen
// to only do a single check.
condition = GenCondition(GenCondition::P);
}
inst_SETCC(condition, treeNode->TypeGet(), targetReg);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCompareInt: Generate code for comparing ints or, on amd64, longs.
//
// Arguments:
// treeNode - the compare tree
//
// Return Value:
// None.
void CodeGen::genCompareInt(GenTree* treeNode)
{
assert(treeNode->OperIsCompare() || treeNode->OperIs(GT_CMP));
GenTreeOp* tree = treeNode->AsOp();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
bool canReuseFlags = false;
genConsumeOperands(tree);
assert(!op1->isContainedIntOrIImmed());
assert(!varTypeIsFloating(op2Type));
instruction ins;
var_types type = TYP_UNKNOWN;
if (tree->OperIs(GT_TEST_EQ, GT_TEST_NE))
{
ins = INS_test;
// Unlike many xarch instructions TEST doesn't have a form with a 16/32/64 bit first operand and
// an 8 bit immediate second operand. But if the immediate value fits in 8 bits then we can simply
// emit a 8 bit TEST instruction, unless we're targeting x86 and the first operand is a non-byteable
// register.
// Note that lowering does something similar but its main purpose is to allow memory operands to be
// contained so it doesn't handle other kind of operands. It could do more but on x86 that results
// in additional register constrains and that may be worse than wasting 3 bytes on an immediate.
if (
#ifdef TARGET_X86
(!op1->isUsedFromReg() || isByteReg(op1->GetRegNum())) &&
#endif
(op2->IsCnsIntOrI() && FitsIn<uint8_t>(op2->AsIntCon()->IconValue())))
{
type = TYP_UBYTE;
}
}
else if (op1->isUsedFromReg() && op2->IsIntegralConst(0))
{
if (compiler->opts.OptimizationEnabled())
{
emitAttr op1Size = emitActualTypeSize(op1->TypeGet());
assert((int)op1Size >= 4);
// Optimize "x<0" and "x>=0" to "x>>31" if "x" is not a jump condition and in a reg.
// Morph/Lowering are responsible to rotate "0<x" to "x>0" so we won't handle it here.
if ((targetReg != REG_NA) && tree->OperIs(GT_LT, GT_GE) && !tree->IsUnsigned())
{
inst_Mov(op1->TypeGet(), targetReg, op1->GetRegNum(), /* canSkip */ true);
if (tree->OperIs(GT_GE))
{
// emit "not" for "x>=0" case
inst_RV(INS_not, targetReg, op1->TypeGet());
}
inst_RV_IV(INS_shr_N, targetReg, (int)op1Size * 8 - 1, op1Size);
genProduceReg(tree);
return;
}
canReuseFlags = true;
}
// We're comparing a register to 0 so we can generate "test reg1, reg1"
// instead of the longer "cmp reg1, 0"
ins = INS_test;
op2 = op1;
}
else
{
ins = INS_cmp;
}
if (type == TYP_UNKNOWN)
{
if (op1Type == op2Type)
{
type = op1Type;
}
else if (genTypeSize(op1Type) == genTypeSize(op2Type))
{
// If the types are different but have the same size then we'll use TYP_INT or TYP_LONG.
// This primarily deals with small type mixes (e.g. byte/ubyte) that need to be widened
// and compared as int. We should not get long type mixes here but handle that as well
// just in case.
type = genTypeSize(op1Type) == 8 ? TYP_LONG : TYP_INT;
}
else
{
// In the types are different simply use TYP_INT. This deals with small type/int type
// mixes (e.g. byte/short ubyte/int) that need to be widened and compared as int.
// Lowering is expected to handle any mixes that involve long types (e.g. int/long).
type = TYP_INT;
}
// The common type cannot be smaller than any of the operand types, we're probably mixing int/long
assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type)));
// Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons
assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
// If op1 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory());
// If op2 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op2Type) >= genTypeSize(type)) || !op2->isUsedFromMemory());
// If we ended up with a small type and op2 is a constant then make sure we don't lose constant bits
assert(!op2->IsCnsIntOrI() || !varTypeIsSmall(type) || FitsIn(type, op2->AsIntCon()->IconValue()));
}
// The type cannot be larger than the machine word size
assert(genTypeSize(type) <= genTypeSize(TYP_I_IMPL));
// TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
// Sign jump optimization should only be set the following check
assert((tree->gtFlags & GTF_RELOP_SJUMP_OPT) == 0);
if (canReuseFlags && emit->AreFlagsSetToZeroCmp(op1->GetRegNum(), emitTypeSize(type), tree->OperGet()))
{
JITDUMP("Not emitting compare due to flags being already set\n");
}
else if (canReuseFlags && emit->AreFlagsSetForSignJumpOpt(op1->GetRegNum(), emitTypeSize(type), tree))
{
JITDUMP("Not emitting compare due to sign being already set, follow up instr will transform jump\n");
tree->gtFlags |= GTF_RELOP_SJUMP_OPT;
}
else
{
emit->emitInsBinary(ins, emitTypeSize(type), op1, op2);
}
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
inst_SETCC(GenCondition::FromIntegralRelop(tree), tree->TypeGet(), targetReg);
genProduceReg(tree);
}
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// genLongToIntCast: Generate code for long to int casts on x86.
//
// Arguments:
// cast - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// The cast node and its sources (via GT_LONG) must have been assigned registers.
// The destination cannot be a floating point type or a small integer type.
//
void CodeGen::genLongToIntCast(GenTree* cast)
{
assert(cast->OperGet() == GT_CAST);
GenTree* src = cast->gtGetOp1();
noway_assert(src->OperGet() == GT_LONG);
genConsumeRegs(src);
var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
var_types dstType = cast->CastToType();
regNumber loSrcReg = src->gtGetOp1()->GetRegNum();
regNumber hiSrcReg = src->gtGetOp2()->GetRegNum();
regNumber dstReg = cast->GetRegNum();
assert((dstType == TYP_INT) || (dstType == TYP_UINT));
assert(genIsValidIntReg(loSrcReg));
assert(genIsValidIntReg(hiSrcReg));
assert(genIsValidIntReg(dstReg));
if (cast->gtOverflow())
{
//
// Generate an overflow check for [u]long to [u]int casts:
//
// long -> int - check if the upper 33 bits are all 0 or all 1
//
// ulong -> int - check if the upper 33 bits are all 0
//
// long -> uint - check if the upper 32 bits are all 0
// ulong -> uint - check if the upper 32 bits are all 0
//
if ((srcType == TYP_LONG) && (dstType == TYP_INT))
{
BasicBlock* allOne = genCreateTempLabel();
BasicBlock* success = genCreateTempLabel();
inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
inst_JMP(EJ_js, allOne);
inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
inst_JMP(EJ_jmp, success);
genDefineTempLabel(allOne);
inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
genDefineTempLabel(success);
}
else
{
if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
{
inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_js, SCK_OVERFLOW);
}
inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
}
inst_Mov(TYP_INT, dstReg, loSrcReg, /* canSkip */ true);
genProduceReg(cast);
}
#endif
//------------------------------------------------------------------------
// genIntCastOverflowCheck: Generate overflow checking code for an integer cast.
//
// Arguments:
// cast - The GT_CAST node
// desc - The cast description
// reg - The register containing the value to check
//
void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg)
{
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
GetEmitter()->emitIns_R_R(INS_test, EA_SIZE(desc.CheckSrcSize()), reg, reg);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::CHECK_UINT_RANGE:
{
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in an immediate operand. Use a right shift to test if the
// upper 32 bits are zero. This requires a temporary register.
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
GetEmitter()->emitIns_Mov(INS_mov, EA_8BYTE, tempReg, reg, /* canSkip */ false);
GetEmitter()->emitIns_R_I(INS_shr_N, EA_8BYTE, tempReg, 32);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_ja, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MIN);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#endif
default:
{
assert(desc.CheckKind() == GenIntCastDesc::CHECK_SMALL_INT_RANGE);
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_ja : EJ_jg, SCK_OVERFLOW);
if (castMinValue != 0)
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
}
}
break;
}
}
//------------------------------------------------------------------------
// genIntToIntCast: Generate code for an integer cast, with or without overflow check.
//
// Arguments:
// cast - The GT_CAST node
//
// Assumptions:
// The cast node is not a contained node and must have an assigned register.
// Neither the source nor target type can be a floating point type.
// On x86 casts to (U)BYTE require that the source be in a byte register.
//
// TODO-XArch-CQ: Allow castOp to be a contained node without an assigned register.
//
void CodeGen::genIntToIntCast(GenTreeCast* cast)
{
genConsumeRegs(cast->gtGetOp1());
const regNumber srcReg = cast->gtGetOp1()->GetRegNum();
const regNumber dstReg = cast->GetRegNum();
emitter* emit = GetEmitter();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
GenIntCastDesc desc(cast);
if (desc.CheckKind() != GenIntCastDesc::CHECK_NONE)
{
genIntCastOverflowCheck(cast, desc, srcReg);
}
instruction ins;
unsigned insSize;
bool canSkip = false;
switch (desc.ExtendKind())
{
case GenIntCastDesc::ZERO_EXTEND_SMALL_INT:
ins = INS_movzx;
insSize = desc.ExtendSrcSize();
break;
case GenIntCastDesc::SIGN_EXTEND_SMALL_INT:
ins = INS_movsx;
insSize = desc.ExtendSrcSize();
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::ZERO_EXTEND_INT:
ins = INS_mov;
insSize = 4;
canSkip = compiler->opts.OptimizationEnabled() && emit->AreUpper32BitsZero(srcReg);
break;
case GenIntCastDesc::SIGN_EXTEND_INT:
ins = INS_movsxd;
insSize = 4;
break;
#endif
default:
assert(desc.ExtendKind() == GenIntCastDesc::COPY);
ins = INS_mov;
insSize = desc.ExtendSrcSize();
canSkip = true;
break;
}
emit->emitIns_Mov(ins, EA_ATTR(insSize), dstReg, srcReg, canSkip);
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genFloatToFloatCast: Generate code for a cast between float and double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// The cast is between float and double or vice versa.
//
void CodeGen::genFloatToFloatCast(GenTree* treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
// If not contained, must be a valid float reg.
if (op1->isUsedFromReg())
{
assert(genIsValidFloatReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
if (srcType == dstType && (op1->isUsedFromReg() && (targetReg == op1->GetRegNum())))
{
// source and destinations types are the same and also reside in the same register.
// we just need to consume and produce the reg in this case.
;
}
else
{
instruction ins = ins_FloatConv(dstType, srcType);
GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int/long to float/double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
void CodeGen::genIntToFloatCast(GenTree* treeNode)
{
// int type --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
if (op1->isUsedFromReg())
{
assert(genIsValidIntReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
#if !defined(TARGET_64BIT)
// We expect morph to replace long to float/double casts with helper calls
noway_assert(!varTypeIsLong(srcType));
#endif // !defined(TARGET_64BIT)
// Since xarch emitter doesn't handle reporting gc-info correctly while casting away gc-ness we
// ensure srcType of a cast is non gc-type. Codegen should never see BYREF as source type except
// for GT_LCL_VAR_ADDR and GT_LCL_FLD_ADDR that represent stack addresses and can be considered
// as TYP_I_IMPL. In all other cases where src operand is a gc-type and not known to be on stack,
// Front-end (see fgMorphCast()) ensures this by assigning gc-type local to a non gc-type
// temp and using temp as operand of cast operation.
if (srcType == TYP_BYREF)
{
noway_assert(op1->OperGet() == GT_LCL_VAR_ADDR || op1->OperGet() == GT_LCL_FLD_ADDR);
srcType = TYP_I_IMPL;
}
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (treeNode->gtFlags & GTF_UNSIGNED)
{
srcType = varTypeToUnsigned(srcType);
}
noway_assert(!varTypeIsGC(srcType));
// We should never be seeing srcType whose size is not sizeof(int) nor sizeof(long).
// For conversions from byte/sbyte/int16/uint16 to float/double, we would expect
// either the front-end or lowering phase to have generated two levels of cast.
// The first one is for widening smaller int type to int32 and the second one is
// to the float/double.
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) || (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
// Also we don't expect to see uint32 -> float/double and uint64 -> float conversions
// here since they should have been lowered apropriately.
noway_assert(srcType != TYP_UINT);
noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
// To convert int to a float/double, cvtsi2ss/sd SSE2 instruction is used
// which does a partial write to lower 4/8 bytes of xmm register keeping the other
// upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
// the partial write could introduce a false dependency and could cause a stall
// if there are further uses of xmmReg. We have such a case occurring with a
// customer reported version of SpectralNorm benchmark, resulting in 2x perf
// regression. To avoid false dependency, we emit "xorps xmmReg, xmmReg" before
// cvtsi2ss/sd instruction.
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->GetRegNum(), treeNode->GetRegNum());
// Note that here we need to specify srcType that will determine
// the size of source reg/mem operand and rex.w prefix.
instruction ins = ins_FloatConv(dstType, TYP_INT);
GetEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
// Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
// will interpret ULONG value as LONG. Hence we need to adjust the
// result if sign-bit of srcType is set.
if (srcType == TYP_ULONG)
{
// The instruction sequence below is less accurate than what clang
// and gcc generate. However, we keep the current sequence for backward compatibility.
// If we change the instructions below, FloatingPointUtils::convertUInt64ToDobule
// should be also updated for consistent conversion result.
assert(dstType == TYP_DOUBLE);
assert(op1->isUsedFromReg());
// Set the flags without modifying op1.
// test op1Reg, op1Reg
inst_RV_RV(INS_test, op1->GetRegNum(), op1->GetRegNum(), srcType);
// No need to adjust result if op1 >= 0 i.e. positive
// Jge label
BasicBlock* label = genCreateTempLabel();
inst_JMP(EJ_jge, label);
// Adjust the result
// result = result + 0x43f00000 00000000
// addsd resultReg, 0x43f00000 00000000
CORINFO_FIELD_HANDLE* cns = &u8ToDblBitmask;
if (*cns == nullptr)
{
double d;
static_assert_no_msg(sizeof(double) == sizeof(__int64));
*((__int64*)&d) = 0x43f0000000000000LL;
*cns = GetEmitter()->emitFltOrDblConst(d, EA_8BYTE);
}
GetEmitter()->emitIns_R_C(INS_addsd, EA_8BYTE, treeNode->GetRegNum(), *cns, 0);
genDefineTempLabel(label);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genFloatToIntCast: Generate code to cast float/double to int/long
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
// TODO-XArch-CQ: (Low-pri) - generate in-line code when DstType = uint64
//
void CodeGen::genFloatToIntCast(GenTree* treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidIntReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
if (op1->isUsedFromReg())
{
assert(genIsValidFloatReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We should never be seeing dstType whose size is neither sizeof(TYP_INT) nor sizeof(TYP_LONG).
// For conversions to byte/sbyte/int16/uint16 from float/double, we would expect the
// front-end or lowering phase to have generated two levels of cast. The first one is
// for float or double to int32/uint32 and the second one for narrowing int32/uint32 to
// the required smaller int type.
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) || (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
// We shouldn't be seeing uint64 here as it should have been converted
// into a helper call by either front-end or lowering phase.
noway_assert(!varTypeIsUnsigned(dstType) || (dstSize != EA_ATTR(genTypeSize(TYP_LONG))));
// If the dstType is TYP_UINT, we have 32-bits to encode the
// float number. Any of 33rd or above bits can be the sign bit.
// To achieve it we pretend as if we are converting it to a long.
if (varTypeIsUnsigned(dstType) && (dstSize == EA_ATTR(genTypeSize(TYP_INT))))
{
dstType = TYP_LONG;
}
// Note that we need to specify dstType here so that it will determine
// the size of destination integer register and also the rex.w prefix.
genConsumeOperands(treeNode->AsOp());
instruction ins = ins_FloatConv(TYP_INT, srcType);
GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCkfinite: Generate code for ckfinite opcode.
//
// Arguments:
// treeNode - The GT_CKFINITE node
//
// Return Value:
// None.
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
//
// TODO-XArch-CQ - mark the operand as contained if known to be in
// memory (e.g. field or an array element).
//
void CodeGen::genCkfinite(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
GenTree* op1 = treeNode->AsOp()->gtOp1;
var_types targetType = treeNode->TypeGet();
int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
regNumber targetReg = treeNode->GetRegNum();
// Extract exponent into a register.
regNumber tmpReg = treeNode->GetSingleTempReg();
genConsumeReg(op1);
#ifdef TARGET_64BIT
// Copy the floating-point value to an integer register. If we copied a float to a long, then
// right-shift the value so the high 32 bits of the floating-point value sit in the low 32
// bits of the integer register.
regNumber srcReg = op1->GetRegNum();
var_types targetIntType = ((targetType == TYP_FLOAT) ? TYP_INT : TYP_LONG);
inst_Mov(targetIntType, tmpReg, srcReg, /* canSkip */ false, emitActualTypeSize(targetType));
if (targetType == TYP_DOUBLE)
{
// right shift by 32 bits to get to exponent.
inst_RV_SH(INS_shr, EA_8BYTE, tmpReg, 32);
}
// Mask exponent with all 1's and check if the exponent is all 1's
inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
// if it is a finite value copy it to targetReg
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
#else // !TARGET_64BIT
// If the target type is TYP_DOUBLE, we want to extract the high 32 bits into the register.
// There is no easy way to do this. To not require an extra register, we'll use shuffles
// to move the high 32 bits into the low 32 bits, then shuffle it back, since we
// need to produce the value into the target register.
//
// For TYP_DOUBLE, we'll generate (for targetReg != op1->GetRegNum()):
// movaps targetReg, op1->GetRegNum()
// shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
// mov_xmm2i tmpReg, targetReg // tmpReg <= Y
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// movaps targetReg, op1->GetRegNum() // copy the value again, instead of un-shuffling it
//
// For TYP_DOUBLE with (targetReg == op1->GetRegNum()):
// shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
// mov_xmm2i tmpReg, targetReg // tmpReg <= Y
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// shufps targetReg, targetReg, 0xB1 // ZWXY => WZYX
//
// For TYP_FLOAT, it's the same as TARGET_64BIT:
// mov_xmm2i tmpReg, targetReg // tmpReg <= low 32 bits
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// movaps targetReg, op1->GetRegNum() // only if targetReg != op1->GetRegNum()
regNumber copyToTmpSrcReg; // The register we'll copy to the integer temp.
if (targetType == TYP_DOUBLE)
{
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1);
copyToTmpSrcReg = targetReg;
}
else
{
copyToTmpSrcReg = op1->GetRegNum();
}
// Copy only the low 32 bits. This will be the high order 32 bits of the floating-point
// value, no matter the floating-point type.
inst_Mov(TYP_INT, tmpReg, copyToTmpSrcReg, /* canSkip */ false, emitActualTypeSize(TYP_FLOAT));
// Mask exponent with all 1's and check if the exponent is all 1's
inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
if ((targetType == TYP_DOUBLE) && (targetReg == op1->GetRegNum()))
{
// We need to re-shuffle the targetReg to get the correct result.
inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1);
}
else
{
// In both the TYP_FLOAT and TYP_DOUBLE case, the op1 register is untouched,
// so copy it to the targetReg. This is faster and smaller for TYP_DOUBLE
// than re-shuffling the targetReg.
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
}
#endif // !TARGET_64BIT
genProduceReg(treeNode);
}
#ifdef TARGET_AMD64
int CodeGenInterface::genSPtoFPdelta() const
{
int delta;
#ifdef UNIX_AMD64_ABI
// We require frame chaining on Unix to support native tool unwinding (such as
// unwinding by the native debugger). We have a CLR-only extension to the
// unwind codes (UWOP_SET_FPREG_LARGE) to support SP->FP offsets larger than 240.
// If Unix ever supports EnC, the RSP == RBP assumption will have to be reevaluated.
delta = genTotalFrameSize();
#else // !UNIX_AMD64_ABI
// As per Amd64 ABI, RBP offset from initial RSP can be between 0 and 240 if
// RBP needs to be reported in unwind codes. This case would arise for methods
// with localloc.
if (compiler->compLocallocUsed)
{
// We cannot base delta computation on compLclFrameSize since it changes from
// tentative to final frame layout and hence there is a possibility of
// under-estimating offset of vars from FP, which in turn results in under-
// estimating instruction size.
//
// To be predictive and so as never to under-estimate offset of vars from FP
// we will always position FP at min(240, outgoing arg area size).
delta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize);
}
else if (compiler->opts.compDbgEnC)
{
// vm assumption on EnC methods is that rsp and rbp are equal
delta = 0;
}
else
{
delta = genTotalFrameSize();
}
#endif // !UNIX_AMD64_ABI
return delta;
}
//---------------------------------------------------------------------
// genTotalFrameSize - return the total size of the stack frame, including local size,
// callee-saved register size, etc. For AMD64, this does not include the caller-pushed
// return address.
//
// Return value:
// Total frame size
//
int CodeGenInterface::genTotalFrameSize() const
{
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
// address than the frame pointer.
//
// There must be a frame pointer to call this function!
//
// We can't compute this directly from the Caller-SP, since the frame pointer
// is based on a maximum delta from Initial-SP, so first we find SP, then
// compute the FP offset.
int CodeGenInterface::genCallerSPtoFPdelta() const
{
assert(isFramePointerUsed());
int callerSPtoFPdelta;
callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
}
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
// This number will be negative.
int CodeGenInterface::genCallerSPtoInitialSPdelta() const
{
int callerSPtoSPdelta = 0;
callerSPtoSPdelta -= genTotalFrameSize();
callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
// compCalleeRegsPushed does not account for the frame pointer
// TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
if (isFramePointerUsed())
{
callerSPtoSPdelta -= REGSIZE_BYTES;
}
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
}
#endif // TARGET_AMD64
//-----------------------------------------------------------------------------------------
// genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask"
//
// Arguments:
// treeNode - tree node
//
// Return value:
// None
//
// Assumptions:
// i) tree oper is one of GT_NEG or GT_INTRINSIC Abs()
// ii) tree type is floating point type.
// iii) caller of this routine needs to call genProduceReg()
void CodeGen::genSSE2BitwiseOp(GenTree* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
regNumber operandReg = genConsumeReg(treeNode->gtGetOp1());
emitAttr size = emitTypeSize(treeNode);
assert(varTypeIsFloating(treeNode->TypeGet()));
assert(treeNode->gtGetOp1()->isUsedFromReg());
CORINFO_FIELD_HANDLE* maskFld = nullptr;
UINT64 mask = 0;
instruction ins = INS_invalid;
if (treeNode->OperIs(GT_NEG))
{
// Neg(x) = flip the sign bit.
// Neg(f) = f ^ 0x80000000 x4 (packed)
// Neg(d) = d ^ 0x8000000000000000 x2 (packed)
ins = INS_xorps;
mask = treeNode->TypeIs(TYP_FLOAT) ? 0x8000000080000000UL : 0x8000000000000000UL;
maskFld = treeNode->TypeIs(TYP_FLOAT) ? &negBitmaskFlt : &negBitmaskDbl;
}
else if (treeNode->OperIs(GT_INTRINSIC))
{
assert(treeNode->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Abs);
// Abs(x) = set sign-bit to zero
// Abs(f) = f & 0x7fffffff x4 (packed)
// Abs(d) = d & 0x7fffffffffffffff x2 (packed)
ins = INS_andps;
mask = treeNode->TypeIs(TYP_FLOAT) ? 0x7fffffff7fffffffUL : 0x7fffffffffffffffUL;
maskFld = treeNode->TypeIs(TYP_FLOAT) ? &absBitmaskFlt : &absBitmaskDbl;
}
else
{
assert(!"genSSE2BitwiseOp: unsupported oper");
}
if (*maskFld == nullptr)
{
UINT64 maskPack[] = {mask, mask};
*maskFld = GetEmitter()->emitBlkConst(&maskPack, 16, 16, treeNode->TypeGet());
}
GetEmitter()->emitIns_SIMD_R_R_C(ins, size, targetReg, operandReg, *maskFld, 0);
}
//-----------------------------------------------------------------------------------------
// genSSE41RoundOp - generate SSE41 code for the given tree as a round operation
//
// Arguments:
// treeNode - tree node
//
// Return value:
// None
//
// Assumptions:
// i) SSE4.1 is supported by the underlying hardware
// ii) treeNode oper is a GT_INTRINSIC
// iii) treeNode type is a floating point type
// iv) treeNode is not used from memory
// v) tree oper is NI_System_Math{F}_Round, _Ceiling, _Floor, or _Truncate
// vi) caller of this routine needs to call genProduceReg()
void CodeGen::genSSE41RoundOp(GenTreeOp* treeNode)
{
// i) SSE4.1 is supported by the underlying hardware
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_SSE41));
// ii) treeNode oper is a GT_INTRINSIC
assert(treeNode->OperGet() == GT_INTRINSIC);
GenTree* srcNode = treeNode->gtGetOp1();
// iii) treeNode type is floating point type
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
// iv) treeNode is not used from memory
assert(!treeNode->isUsedFromMemory());
genConsumeOperands(treeNode);
instruction ins = (treeNode->TypeGet() == TYP_FLOAT) ? INS_roundss : INS_roundsd;
emitAttr size = emitTypeSize(treeNode);
regNumber dstReg = treeNode->GetRegNum();
unsigned ival = 0;
// v) tree oper is NI_System_Math{F}_Round, _Ceiling, _Floor, or _Truncate
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Round:
ival = 4;
break;
case NI_System_Math_Ceiling:
ival = 10;
break;
case NI_System_Math_Floor:
ival = 9;
break;
case NI_System_Math_Truncate:
ival = 11;
break;
default:
ins = INS_invalid;
assert(!"genSSE41RoundOp: unsupported intrinsic");
unreached();
}
if (srcNode->isContained() || srcNode->isUsedFromSpillTemp())
{
emitter* emit = GetEmitter();
TempDsc* tmpDsc = nullptr;
unsigned varNum = BAD_VAR_NUM;
unsigned offset = (unsigned)-1;
if (srcNode->isUsedFromSpillTemp())
{
assert(srcNode->IsRegOptional());
tmpDsc = getSpillTempDsc(srcNode);
varNum = tmpDsc->tdTempNum();
offset = 0;
regSet.tmpRlsTemp(tmpDsc);
}
else if (srcNode->isIndir())
{
GenTreeIndir* memIndir = srcNode->AsIndir();
GenTree* memBase = memIndir->gtOp1;
switch (memBase->OperGet())
{
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
{
assert(memBase->isContained());
varNum = memBase->AsLclVarCommon()->GetLclNum();
offset = memBase->AsLclVarCommon()->GetLclOffs();
// Ensure that all the GenTreeIndir values are set to their defaults.
assert(memBase->GetRegNum() == REG_NA);
assert(!memIndir->HasIndex());
assert(memIndir->Scale() == 1);
assert(memIndir->Offset() == 0);
break;
}
case GT_CLS_VAR_ADDR:
{
emit->emitIns_R_C_I(ins, size, dstReg, memBase->AsClsVar()->gtClsVarHnd, 0, ival);
return;
}
default:
{
emit->emitIns_R_A_I(ins, size, dstReg, memIndir, ival);
return;
}
}
}
else
{
switch (srcNode->OperGet())
{
case GT_CNS_DBL:
{
GenTreeDblCon* dblConst = srcNode->AsDblCon();
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(dblConst->gtDconVal, emitTypeSize(dblConst));
emit->emitIns_R_C_I(ins, size, dstReg, hnd, 0, ival);
return;
}
case GT_LCL_FLD:
varNum = srcNode->AsLclFld()->GetLclNum();
offset = srcNode->AsLclFld()->GetLclOffs();
break;
case GT_LCL_VAR:
{
assert(srcNode->IsRegOptional() || !compiler->lvaGetDesc(srcNode->AsLclVar())->lvIsRegCandidate());
varNum = srcNode->AsLclVar()->GetLclNum();
offset = 0;
break;
}
default:
unreached();
break;
}
}
// Ensure we got a good varNum and offset.
// We also need to check for `tmpDsc != nullptr` since spill temp numbers
// are negative and start with -1, which also happens to be BAD_VAR_NUM.
assert((varNum != BAD_VAR_NUM) || (tmpDsc != nullptr));
assert(offset != (unsigned)-1);
emit->emitIns_R_S_I(ins, size, dstReg, varNum, offset, ival);
}
else
{
inst_RV_RV_IV(ins, size, dstReg, srcNode->GetRegNum(), ival);
}
}
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
// Arguments
// treeNode - the GT_INTRINSIC node
//
// Return value:
// None
//
void CodeGen::genIntrinsic(GenTree* treeNode)
{
// Handle intrinsics that can be implemented by target-specific instructions
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Abs:
genSSE2BitwiseOp(treeNode);
break;
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
genSSE41RoundOp(treeNode->AsOp());
break;
case NI_System_Math_Sqrt:
{
// Both operand and its result must be of the same floating point type.
GenTree* srcNode = treeNode->AsOp()->gtOp1;
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
genConsumeOperands(treeNode->AsOp());
const instruction ins = (treeNode->TypeGet() == TYP_FLOAT) ? INS_sqrtss : INS_sqrtsd;
GetEmitter()->emitInsBinary(ins, emitTypeSize(treeNode), treeNode, srcNode);
break;
}
default:
assert(!"genIntrinsic: Unsupported intrinsic");
unreached();
}
genProduceReg(treeNode);
}
//-------------------------------------------------------------------------- //
// getBaseVarForPutArgStk - returns the baseVarNum for passing a stack arg.
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
// Return value:
// The number of the base variable.
//
// Note:
// If tail call the outgoing args are placed in the caller's incoming arg stack space.
// Otherwise, they go in the outgoing arg area on the current frame.
//
// On Windows the caller always creates slots (homing space) in its frame for the
// first 4 arguments of a callee (register passed args). So, the baseVarNum is always 0.
// For System V systems there is no such calling convention requirement, and the code needs to find
// the first stack passed argument from the caller. This is done by iterating over
// all the lvParam variables and finding the first with GetArgReg() equals to REG_STK.
//
unsigned CodeGen::getBaseVarForPutArgStk(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_PUTARG_STK);
unsigned baseVarNum;
// Whether to setup stk arg in incoming or out-going arg area?
// Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
// All other calls - stk arg is setup in out-going arg area.
if (treeNode->AsPutArgStk()->putInIncomingArgArea())
{
// See the note in the function header re: finding the first stack passed argument.
baseVarNum = getFirstArgWithStackSlot();
assert(baseVarNum != BAD_VAR_NUM);
#ifdef DEBUG
// This must be a fast tail call.
assert(treeNode->AsPutArgStk()->gtCall->AsCall()->IsFastTailCall());
// Since it is a fast tail call, the existence of first incoming arg is guaranteed
// because fast tail call requires that in-coming arg area of caller is >= out-going
// arg area required for tail call.
LclVarDsc* varDsc = compiler->lvaGetDesc(baseVarNum);
assert(varDsc != nullptr);
#ifdef UNIX_AMD64_ABI
assert(!varDsc->lvIsRegArg && varDsc->GetArgReg() == REG_STK);
#else // !UNIX_AMD64_ABI
// On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
assert(varDsc->lvIsRegArg && (varDsc->GetArgReg() == REG_ARG_0 || varDsc->GetArgReg() == REG_FLTARG_0));
#endif // !UNIX_AMD64_ABI
#endif // !DEBUG
}
else
{
#if FEATURE_FIXED_OUT_ARGS
baseVarNum = compiler->lvaOutgoingArgSpaceVar;
#else // !FEATURE_FIXED_OUT_ARGS
assert(!"No BaseVarForPutArgStk on x86");
baseVarNum = BAD_VAR_NUM;
#endif // !FEATURE_FIXED_OUT_ARGS
}
return baseVarNum;
}
//---------------------------------------------------------------------
// genAlignStackBeforeCall: Align the stack if necessary before a call.
//
// Arguments:
// putArgStk - the putArgStk node.
//
void CodeGen::genAlignStackBeforeCall(GenTreePutArgStk* putArgStk)
{
#if defined(UNIX_X86_ABI)
genAlignStackBeforeCall(putArgStk->gtCall);
#endif // UNIX_X86_ABI
}
//---------------------------------------------------------------------
// genAlignStackBeforeCall: Align the stack if necessary before a call.
//
// Arguments:
// call - the call node.
//
void CodeGen::genAlignStackBeforeCall(GenTreeCall* call)
{
#if defined(UNIX_X86_ABI)
// Have we aligned the stack yet?
if (!call->fgArgInfo->IsStkAlignmentDone())
{
// We haven't done any stack alignment yet for this call. We might need to create
// an alignment adjustment, even if this function itself doesn't have any stack args.
// This can happen if this function call is part of a nested call sequence, and the outer
// call has already pushed some arguments.
unsigned stkLevel = genStackLevel + call->fgArgInfo->GetStkSizeBytes();
call->fgArgInfo->ComputeStackAlignment(stkLevel);
unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
if (padStkAlign != 0)
{
// Now generate the alignment
inst_RV_IV(INS_sub, REG_SPBASE, padStkAlign, EA_PTRSIZE);
AddStackLevel(padStkAlign);
AddNestedAlignment(padStkAlign);
}
call->fgArgInfo->SetStkAlignmentDone();
}
#endif // UNIX_X86_ABI
}
//---------------------------------------------------------------------
// genRemoveAlignmentAfterCall: After a call, remove the alignment
// added before the call, if any.
//
// Arguments:
// call - the call node.
// bias - additional stack adjustment
//
// Note:
// When bias > 0, caller should adjust stack level appropriately as
// bias is not considered when adjusting stack level.
//
void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias)
{
#if defined(TARGET_X86)
#if defined(UNIX_X86_ABI)
// Put back the stack pointer if there was any padding for stack alignment
unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
unsigned padStkAdjust = padStkAlign + bias;
if (padStkAdjust != 0)
{
inst_RV_IV(INS_add, REG_SPBASE, padStkAdjust, EA_PTRSIZE);
SubtractStackLevel(padStkAlign);
SubtractNestedAlignment(padStkAlign);
}
#else // UNIX_X86_ABI
if (bias != 0)
{
if (bias == sizeof(int))
{
inst_RV(INS_pop, REG_ECX, TYP_INT);
}
else
{
inst_RV_IV(INS_add, REG_SPBASE, bias, EA_PTRSIZE);
}
}
#endif // !UNIX_X86_ABI_
#else // TARGET_X86
assert(bias == 0);
#endif // !TARGET_X86
}
#ifdef TARGET_X86
//---------------------------------------------------------------------
// genAdjustStackForPutArgStk:
// adjust the stack pointer for a putArgStk node if necessary.
//
// Arguments:
// putArgStk - the putArgStk node.
//
// Returns: true if the stack pointer was adjusted; false otherwise.
//
// Notes:
// Sets `m_pushStkArg` to true if the stack arg needs to be pushed,
// false if the stack arg needs to be stored at the current stack
// pointer address. This is exactly the opposite of the return value
// of this function.
//
bool CodeGen::genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk)
{
const unsigned argSize = putArgStk->GetStackByteSize();
GenTree* source = putArgStk->gtGetOp1();
#ifdef FEATURE_SIMD
if (!source->OperIs(GT_FIELD_LIST) && varTypeIsSIMD(source))
{
inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
AddStackLevel(argSize);
m_pushStkArg = false;
return true;
}
#endif // FEATURE_SIMD
#ifdef DEBUG
switch (putArgStk->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
case GenTreePutArgStk::Kind::Unroll:
assert(!source->AsObj()->GetLayout()->HasGCPtr());
break;
case GenTreePutArgStk::Kind::Push:
case GenTreePutArgStk::Kind::PushAllSlots:
assert(source->OperIs(GT_FIELD_LIST) || source->AsObj()->GetLayout()->HasGCPtr() ||
(argSize < XMM_REGSIZE_BYTES));
break;
default:
unreached();
}
#endif // DEBUG
// In lowering (see "LowerPutArgStk") we have determined what sort of instructions
// are going to be used for this node. If we'll not be using "push"es, the stack
// needs to be adjusted first (s. t. the SP points to the base of the outgoing arg).
//
if (!putArgStk->isPushKind())
{
// If argSize is large, we need to probe the stack like we do in the prolog (genAllocLclFrame)
// or for localloc (genLclHeap), to ensure we touch the stack pages sequentially, and don't miss
// the stack guard pages. The prolog probes, but we don't know at this point how much higher
// the last probed stack pointer value is. We default a threshold. Any size below this threshold
// we are guaranteed the stack has been probed. Above this threshold, we don't know. The threshold
// should be high enough to cover all common cases. Increasing the threshold means adding a few
// more "lowest address of stack" probes in the prolog. Since this is relatively rare, add it to
// stress modes.
if ((argSize >= ARG_STACK_PROBE_THRESHOLD_BYTES) ||
compiler->compStressCompile(Compiler::STRESS_GENERIC_VARN, 5))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)argSize, REG_NA);
}
else
{
inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
}
AddStackLevel(argSize);
m_pushStkArg = false;
return true;
}
// Otherwise, "push" will be adjusting the stack for us.
m_pushStkArg = true;
return false;
}
//---------------------------------------------------------------------
// genPutArgStkFieldList - generate code for passing a GT_FIELD_LIST arg on the stack.
//
// Arguments
// treeNode - the GT_PUTARG_STK node whose op1 is a GT_FIELD_LIST
//
// Return value:
// None
//
void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk)
{
GenTreeFieldList* const fieldList = putArgStk->gtOp1->AsFieldList();
assert(fieldList != nullptr);
// Set m_pushStkArg and pre-adjust the stack if necessary.
const bool preAdjustedStack = genAdjustStackForPutArgStk(putArgStk);
// For now, we only support the "push" case; we will push a full slot for the first field of each slot
// within the struct.
assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg);
// If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0.
// (Note that this mode is not currently being used.)
// If we are pushing the arguments (i.e. we have not pre-adjusted the stack), then we are pushing them
// in reverse order, so we start with the current field offset at the size of the struct arg (which must be
// a multiple of the target pointer size).
unsigned currentOffset = (preAdjustedStack) ? 0 : putArgStk->GetStackByteSize();
unsigned prevFieldOffset = currentOffset;
regNumber intTmpReg = REG_NA;
regNumber simdTmpReg = REG_NA;
if (putArgStk->AvailableTempRegCount() != 0)
{
regMaskTP rsvdRegs = putArgStk->gtRsvdRegs;
if ((rsvdRegs & RBM_ALLINT) != 0)
{
intTmpReg = putArgStk->GetSingleTempReg(RBM_ALLINT);
assert(genIsValidIntReg(intTmpReg));
}
if ((rsvdRegs & RBM_ALLFLOAT) != 0)
{
simdTmpReg = putArgStk->GetSingleTempReg(RBM_ALLFLOAT);
assert(genIsValidFloatReg(simdTmpReg));
}
assert(genCountBits(rsvdRegs) == (unsigned)((intTmpReg == REG_NA) ? 0 : 1) + ((simdTmpReg == REG_NA) ? 0 : 1));
}
for (GenTreeFieldList::Use& use : fieldList->Uses())
{
GenTree* const fieldNode = use.GetNode();
const unsigned fieldOffset = use.GetOffset();
var_types fieldType = use.GetType();
// Long-typed nodes should have been handled by the decomposition pass, and lowering should have sorted the
// field list in descending order by offset.
assert(!varTypeIsLong(fieldType));
assert(fieldOffset <= prevFieldOffset);
// Consume the register, if any, for this field. Note that genConsumeRegs() will appropriately
// update the liveness info for a lclVar that has been marked RegOptional, which hasn't been
// assigned a register, and which is therefore contained.
// Unlike genConsumeReg(), it handles the case where no registers are being consumed.
genConsumeRegs(fieldNode);
regNumber argReg = fieldNode->isUsedFromSpillTemp() ? REG_NA : fieldNode->GetRegNum();
// If the field is slot-like, we can use a push instruction to store the entire register no matter the type.
//
// The GC encoder requires that the stack remain 4-byte aligned at all times. Round the adjustment up
// to the next multiple of 4. If we are going to generate a `push` instruction, the adjustment must
// not require rounding.
// NOTE: if the field is of GC type, we must use a push instruction, since the emitter is not otherwise
// able to detect stores into the outgoing argument area of the stack on x86.
const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevFieldOffset - fieldOffset) >= 4);
int adjustment = roundUp(currentOffset - fieldOffset, 4);
if (fieldIsSlot && !varTypeIsSIMD(fieldType))
{
fieldType = genActualType(fieldType);
unsigned pushSize = genTypeSize(fieldType);
assert((pushSize % 4) == 0);
adjustment -= pushSize;
while (adjustment != 0)
{
inst_IV(INS_push, 0);
currentOffset -= pushSize;
AddStackLevel(pushSize);
adjustment -= pushSize;
}
m_pushStkArg = true;
}
else
{
m_pushStkArg = false;
// We always "push" floating point fields (i.e. they are full slot values that don't
// require special handling).
assert(varTypeIsIntegralOrI(fieldNode) || varTypeIsSIMD(fieldNode));
// If we can't push this field, it needs to be in a register so that we can store
// it to the stack location.
if (adjustment != 0)
{
// This moves the stack pointer to fieldOffset.
// For this case, we must adjust the stack and generate stack-relative stores rather than pushes.
// Adjust the stack pointer to the next slot boundary.
inst_RV_IV(INS_sub, REG_SPBASE, adjustment, EA_PTRSIZE);
currentOffset -= adjustment;
AddStackLevel(adjustment);
}
// Does it need to be in a byte register?
// If so, we'll use intTmpReg, which must have been allocated as a byte register.
// If it's already in a register, but not a byteable one, then move it.
if (varTypeIsByte(fieldType) && ((argReg == REG_NA) || ((genRegMask(argReg) & RBM_BYTE_REGS) == 0)))
{
assert(intTmpReg != REG_NA);
noway_assert((genRegMask(intTmpReg) & RBM_BYTE_REGS) != 0);
if (argReg != REG_NA)
{
inst_Mov(fieldType, intTmpReg, argReg, /* canSkip */ false);
argReg = intTmpReg;
}
}
}
if (argReg == REG_NA)
{
if (m_pushStkArg)
{
if (fieldNode->isUsedFromSpillTemp())
{
assert(!varTypeIsSIMD(fieldType)); // Q: can we get here with SIMD?
assert(fieldNode->IsRegOptional());
TempDsc* tmp = getSpillTempDsc(fieldNode);
GetEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
regSet.tmpRlsTemp(tmp);
}
else
{
assert(varTypeIsIntegralOrI(fieldNode));
switch (fieldNode->OperGet())
{
case GT_LCL_VAR:
inst_TT(INS_push, fieldNode, 0, 0, emitActualTypeSize(fieldNode->TypeGet()));
break;
case GT_CNS_INT:
if (fieldNode->IsIconHandle())
{
inst_IV_handle(INS_push, fieldNode->AsIntCon()->gtIconVal);
}
else
{
inst_IV(INS_push, fieldNode->AsIntCon()->gtIconVal);
}
break;
default:
unreached();
}
}
currentOffset -= TARGET_POINTER_SIZE;
AddStackLevel(TARGET_POINTER_SIZE);
}
else
{
// The stack has been adjusted and we will load the field to intTmpReg and then store it on the stack.
assert(varTypeIsIntegralOrI(fieldNode));
switch (fieldNode->OperGet())
{
case GT_LCL_VAR:
inst_RV_TT(INS_mov, intTmpReg, fieldNode);
break;
case GT_CNS_INT:
genSetRegToConst(intTmpReg, fieldNode->TypeGet(), fieldNode);
break;
default:
unreached();
}
genStoreRegToStackArg(fieldType, intTmpReg, fieldOffset - currentOffset);
}
}
else
{
#if defined(FEATURE_SIMD)
if (fieldType == TYP_SIMD12)
{
assert(genIsValidFloatReg(simdTmpReg));
genStoreSIMD12ToStack(argReg, simdTmpReg);
}
else
#endif // defined(FEATURE_SIMD)
{
genStoreRegToStackArg(fieldType, argReg, fieldOffset - currentOffset);
}
if (m_pushStkArg)
{
// We always push a slot-rounded size
currentOffset -= genTypeSize(fieldType);
}
}
prevFieldOffset = fieldOffset;
}
if (currentOffset != 0)
{
// We don't expect padding at the beginning of a struct, but it could happen with explicit layout.
inst_RV_IV(INS_sub, REG_SPBASE, currentOffset, EA_PTRSIZE);
AddStackLevel(currentOffset);
}
}
#endif // TARGET_X86
//---------------------------------------------------------------------
// genPutArgStk - generate code for passing an arg on the stack.
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
{
GenTree* data = putArgStk->gtOp1;
var_types targetType = genActualType(data->TypeGet());
#ifdef TARGET_X86
genAlignStackBeforeCall(putArgStk);
if ((data->OperGet() != GT_FIELD_LIST) && varTypeIsStruct(targetType))
{
(void)genAdjustStackForPutArgStk(putArgStk);
genPutStructArgStk(putArgStk);
return;
}
// On a 32-bit target, all of the long arguments are handled with GT_FIELD_LISTs of TYP_INT.
assert(targetType != TYP_LONG);
const unsigned argSize = putArgStk->GetStackByteSize();
assert((argSize % TARGET_POINTER_SIZE) == 0);
if (data->isContainedIntOrIImmed())
{
if (data->IsIconHandle())
{
inst_IV_handle(INS_push, data->AsIntCon()->gtIconVal);
}
else
{
inst_IV(INS_push, data->AsIntCon()->gtIconVal);
}
AddStackLevel(argSize);
}
else if (data->OperGet() == GT_FIELD_LIST)
{
genPutArgStkFieldList(putArgStk);
}
else
{
// We should not see any contained nodes that are not immediates.
assert(data->isUsedFromReg());
genConsumeReg(data);
genPushReg(targetType, data->GetRegNum());
}
#else // !TARGET_X86
{
unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
#ifdef UNIX_AMD64_ABI
if (data->OperIs(GT_FIELD_LIST))
{
genPutArgStkFieldList(putArgStk, baseVarNum);
return;
}
else if (varTypeIsStruct(targetType))
{
m_stkArgVarNum = baseVarNum;
m_stkArgOffset = putArgStk->getArgOffset();
genPutStructArgStk(putArgStk);
m_stkArgVarNum = BAD_VAR_NUM;
return;
}
#endif // UNIX_AMD64_ABI
noway_assert(targetType != TYP_STRUCT);
// Get argument offset on stack.
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
unsigned argOffset = putArgStk->getArgOffset();
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(putArgStk->gtCall, putArgStk);
assert(curArgTabEntry != nullptr);
assert(argOffset == curArgTabEntry->slotNum * TARGET_POINTER_SIZE);
#endif
if (data->isContainedIntOrIImmed())
{
GetEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
(int)data->AsIntConCommon()->IconValue());
}
else
{
assert(data->isUsedFromReg());
genConsumeReg(data);
GetEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->GetRegNum(), baseVarNum,
argOffset);
}
}
#endif // !TARGET_X86
}
//---------------------------------------------------------------------
// genPutArgReg - generate code for a GT_PUTARG_REG node
//
// Arguments
// tree - the GT_PUTARG_REG node
//
// Return value:
// None
//
void CodeGen::genPutArgReg(GenTreeOp* tree)
{
assert(tree->OperIs(GT_PUTARG_REG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
#ifndef UNIX_AMD64_ABI
assert(targetType != TYP_STRUCT);
#endif // !UNIX_AMD64_ABI
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
// If child node is not already in the register we need, move it
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
genProduceReg(tree);
}
#ifdef TARGET_X86
// genPushReg: Push a register value onto the stack and adjust the stack level
//
// Arguments:
// type - the type of value to be stored
// reg - the register containing the value
//
// Notes:
// For TYP_LONG, the srcReg must be a floating point register.
// Otherwise, the register type must be consistent with the given type.
//
void CodeGen::genPushReg(var_types type, regNumber srcReg)
{
unsigned size = genTypeSize(type);
if (varTypeIsIntegralOrI(type) && type != TYP_LONG)
{
assert(genIsValidIntReg(srcReg));
inst_RV(INS_push, srcReg, type);
}
else
{
instruction ins;
emitAttr attr = emitTypeSize(type);
if (type == TYP_LONG)
{
// On x86, the only way we can push a TYP_LONG from a register is if it is in an xmm reg.
// This is only used when we are pushing a struct from memory to memory, and basically is
// handling an 8-byte "chunk", as opposed to strictly a long type.
ins = INS_movq;
}
else
{
ins = ins_Store(type);
}
assert(genIsValidFloatReg(srcReg));
inst_RV_IV(INS_sub, REG_SPBASE, size, EA_PTRSIZE);
GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
}
AddStackLevel(size);
}
#endif // TARGET_X86
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
// genStoreRegToStackArg: Store a register value into the stack argument area
//
// Arguments:
// type - the type of value to be stored
// reg - the register containing the value
// offset - the offset from the base (see Assumptions below)
//
// Notes:
// A type of TYP_STRUCT instructs this method to store a 16-byte chunk
// at the given offset (i.e. not the full struct).
//
// Assumptions:
// The caller must set the context appropriately before calling this method:
// - On x64, m_stkArgVarNum must be set according to whether this is a regular or tail call.
// - On x86, the caller must set m_pushStkArg if this method should push the argument.
// Otherwise, the argument is stored at the given offset from sp.
//
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
//
void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset)
{
assert(srcReg != REG_NA);
instruction ins;
emitAttr attr;
unsigned size;
if (type == TYP_STRUCT)
{
ins = INS_movdqu;
// This should be changed!
attr = EA_8BYTE;
size = 16;
}
else
{
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(type))
{
assert(genIsValidFloatReg(srcReg));
ins = ins_Store(type); // TODO-CQ: pass 'aligned' correctly
}
else
#endif // FEATURE_SIMD
#ifdef TARGET_X86
if (type == TYP_LONG)
{
assert(genIsValidFloatReg(srcReg));
ins = INS_movq;
}
else
#endif // TARGET_X86
{
assert((varTypeUsesFloatReg(type) && genIsValidFloatReg(srcReg)) ||
(varTypeIsIntegralOrI(type) && genIsValidIntReg(srcReg)));
ins = ins_Store(type);
}
attr = emitTypeSize(type);
size = genTypeSize(type);
}
#ifdef TARGET_X86
if (m_pushStkArg)
{
genPushReg(type, srcReg);
}
else
{
GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
}
#else // !TARGET_X86
assert(m_stkArgVarNum != BAD_VAR_NUM);
GetEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
#endif // !TARGET_X86
}
//---------------------------------------------------------------------
// genPutStructArgStk - generate code for copying a struct arg on the stack by value.
// In case there are references to heap object in the struct,
// it generates the gcinfo as well.
//
// Arguments
// putArgStk - the GT_PUTARG_STK node
//
// Notes:
// In the case of fixed out args, the caller must have set m_stkArgVarNum to the variable number
// corresponding to the argument area (where we will put the argument on the stack).
// For tail calls this is the baseVarNum = 0.
// For non tail calls this is the outgoingArgSpace.
//
void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk)
{
GenTree* source = putArgStk->gtGetOp1();
var_types targetType = source->TypeGet();
#if defined(TARGET_X86) && defined(FEATURE_SIMD)
if (putArgStk->isSIMD12())
{
genPutArgStkSIMD12(putArgStk);
return;
}
#endif // defined(TARGET_X86) && defined(FEATURE_SIMD)
if (varTypeIsSIMD(targetType))
{
regNumber srcReg = genConsumeReg(source);
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
genStoreRegToStackArg(targetType, srcReg, 0);
return;
}
assert(targetType == TYP_STRUCT);
ClassLayout* layout = source->AsObj()->GetLayout();
switch (putArgStk->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
genStructPutArgRepMovs(putArgStk);
break;
#ifndef TARGET_X86
case GenTreePutArgStk::Kind::PartialRepInstr:
genStructPutArgPartialRepMovs(putArgStk);
break;
#endif // !TARGET_X86
case GenTreePutArgStk::Kind::Unroll:
genStructPutArgUnroll(putArgStk);
break;
#ifdef TARGET_X86
case GenTreePutArgStk::Kind::Push:
genStructPutArgPush(putArgStk);
break;
#endif // TARGET_X86
default:
unreached();
}
}
#endif // defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Create and record GC Info for the function.
*/
#ifndef JIT32_GCENCODER
void
#else // !JIT32_GCENCODER
void*
#endif // !JIT32_GCENCODER
CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
{
#ifdef JIT32_GCENCODER
return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
#else // !JIT32_GCENCODER
genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
#endif // !JIT32_GCENCODER
}
#ifdef JIT32_GCENCODER
void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr))
{
BYTE headerBuf[64];
InfoHdr header;
int s_cached;
#ifdef FEATURE_EH_FUNCLETS
// We should do this before gcInfoBlockHdrSave since varPtrTableSize must be finalized before it
if (compiler->ehAnyFunclets())
{
gcInfo.gcMarkFilterVarsPinned();
}
#endif
#ifdef DEBUG
size_t headerSize =
#endif
compiler->compInfoBlkSize =
gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
size_t argTabOffset = 0;
size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
#if DISPLAY_SIZES
if (GetInterruptible())
{
gcHeaderISize += compiler->compInfoBlkSize;
gcPtrMapISize += ptrMapSize;
}
else
{
gcHeaderNSize += compiler->compInfoBlkSize;
gcPtrMapNSize += ptrMapSize;
}
#endif // DISPLAY_SIZES
compiler->compInfoBlkSize += ptrMapSize;
/* Allocate the info block for the method */
compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
#if 0 // VERBOSE_SIZES
// TODO-X86-Cleanup: 'dataSize', below, is not defined
// if (compiler->compInfoBlkSize > codeSize && compiler->compInfoBlkSize > 100)
{
printf("[%7u VM, %7u+%7u/%7u x86 %03u/%03u%%] %s.%s\n",
compiler->info.compILCodeSize,
compiler->compInfoBlkSize,
codeSize + dataSize,
codeSize + dataSize - prologSize - epilogSize,
100 * (codeSize + dataSize) / compiler->info.compILCodeSize,
100 * (codeSize + dataSize + compiler->compInfoBlkSize) / compiler->info.compILCodeSize,
compiler->info.compClassName,
compiler->info.compMethodName);
}
#endif
/* Fill in the info block and return it to the caller */
void* infoPtr = compiler->compInfoBlkAddr;
/* Create the method info block: header followed by GC tracking tables */
compiler->compInfoBlkAddr +=
gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
#ifdef DEBUG
if (0)
{
BYTE* temp = (BYTE*)infoPtr;
size_t size = compiler->compInfoBlkAddr - temp;
BYTE* ptab = temp + headerSize;
noway_assert(size == headerSize + ptrMapSize);
printf("Method info block - header [%zu bytes]:", headerSize);
for (unsigned i = 0; i < size; i++)
{
if (temp == ptab)
{
printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
}
else
{
if (!(i % 16))
printf("\n %04X: ", i);
}
printf("%02X ", *temp++);
}
printf("\n");
}
#endif // DEBUG
#if DUMP_GC_TABLES
if (compiler->opts.dspGCtbls)
{
const BYTE* base = (BYTE*)infoPtr;
size_t size;
unsigned methodSize;
InfoHdr dumpHeader;
printf("GC Info for method %s\n", compiler->info.compFullName);
printf("GC info size = %3u\n", compiler->compInfoBlkSize);
size = gcInfo.gcInfoBlockHdrDump(base, &dumpHeader, &methodSize);
// printf("size of header encoding is %3u\n", size);
printf("\n");
if (compiler->opts.dspGCtbls)
{
base += size;
size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
// printf("size of pointer table is %3u\n", size);
printf("\n");
noway_assert(compiler->compInfoBlkAddr == (base + size));
}
}
#endif // DUMP_GC_TABLES
/* Make sure we ended up generating the expected number of bytes */
noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
return infoPtr;
}
#else // !JIT32_GCENCODER
void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
// We keep the call count for the second call to gcMakeRegPtrTable() below.
unsigned callCnt = 0;
// First we figure out the encoder ID's for the stack slots and registers.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
// Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
gcInfoEncoder->FinalizeSlotIds();
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
if (compiler->opts.compDbgEnC)
{
// what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
// which is:
// -return address
// -saved off RBP
// -saved 'this' pointer and bool for synchronized methods
// 4 slots for RBP + return address + RSI + RDI
int preservedAreaSize = 4 * REGSIZE_BYTES;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
{
preservedAreaSize += REGSIZE_BYTES;
}
// bool in synchronized methods that tracks whether the lock has been taken (takes 4 bytes on stack)
preservedAreaSize += 4;
}
// Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
// frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
}
if (compiler->opts.IsReversePInvoke())
{
unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM);
const LclVarDsc* reversePInvokeFrameVar = compiler->lvaGetDesc(reversePInvokeFrameVarNumber);
gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar->GetStackOffset());
}
gcInfoEncoder->Build();
// GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
// let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
#endif // !JIT32_GCENCODER
/*****************************************************************************
* Emit a call to a helper function.
*
*/
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg)
{
void* addr = nullptr;
void* pAddr = nullptr;
emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
regNumber callTarget = REG_NA;
regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
if (!addr)
{
assert(pAddr != nullptr);
// Absolute indirect call addr
// Note: Order of checks is important. First always check for pc-relative and next
// zero-relative. Because the former encoding is 1-byte smaller than the latter.
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)pAddr) ||
genCodeIndirAddrCanBeEncodedAsZeroRelOffset((size_t)pAddr))
{
// generate call whose target is specified by 32-bit offset relative to PC or zero.
callType = emitter::EC_FUNC_TOKEN_INDIR;
addr = pAddr;
}
else
{
#ifdef TARGET_AMD64
// If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
// load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to
// make the call.
// mov reg, addr
// call [reg]
if (callTargetReg == REG_NA)
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & killMask) == callTargetMask);
}
else
{
// The call target must not overwrite any live variable, though it may not be in the
// kill set for the call.
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & regSet.GetMaskVars()) == RBM_NONE);
}
#endif
callTarget = callTargetReg;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, callTarget, (ssize_t)pAddr);
callType = emitter::EC_INDIR_ARD;
}
}
// clang-format off
GetEmitter()->emitIns_Call(callType,
compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) addr,
argSize,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
callTarget, // ireg
REG_NA, 0, 0, // xreg, xmul, disp
false // isJump
);
// clang-format on
regSet.verifyRegistersUsed(killMask);
}
/*****************************************************************************
* Unit testing of the XArch emitter: generate a bunch of instructions into the prolog
* (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
* disassembler thinks the instructions as the same as we do.
*/
// Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
// After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
//#define ALL_XARCH_EMITTER_UNIT_TESTS
#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
void CodeGen::genAmd64EmitterUnitTests()
{
if (!verbose)
{
return;
}
if (!compiler->opts.altJit)
{
// No point doing this in a "real" JIT.
return;
}
// Mark the "fake" instructions in the output.
printf("*************** In genAmd64EmitterUnitTests()\n");
// We use this:
// genDefineTempLabel(genCreateTempLabel());
// to create artificial labels to help separate groups of tests.
//
// Loads
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef ALL_XARCH_EMITTER_UNIT_TESTS
genDefineTempLabel(genCreateTempLabel());
// vhaddpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
#endif // ALL_XARCH_EMITTER_UNIT_TESTS
printf("*************** End of genAmd64EmitterUnitTests()\n");
}
#endif // defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
#ifdef PROFILING_SUPPORTED
#ifdef TARGET_X86
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. This variable remains unchanged.
//
// Return Value:
// None
//
// Notes:
// The x86 profile enter helper has the following requirements (see ProfileEnterNaked in
// VM\i386\asmhelpers.asm for details):
// 1. The calling sequence for calling the helper is:
// push FunctionIDOrClientID
// call ProfileEnterHelper
// 2. The calling function has an EBP frame.
// 3. EBP points to the saved ESP which is the first thing saved in the function. Thus,
// the following prolog is assumed:
// push ESP
// mov EBP, ESP
// 4. All registers are preserved.
// 5. The helper pops the FunctionIDOrClientID argument from the stack.
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
unsigned saveStackLvl2 = genStackLevel;
// Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK()
// for x86 stack unwinding
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
#endif // UNIX_X86_ABI
// Push the profilerHandle
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
inst_IV(INS_push, (size_t)compiler->compProfilerMethHnd);
}
// This will emit either
// "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER,
0, // argSize. Again, we have to lie about it
EA_UNKNOWN); // retSize
// Check that we have place for the push.
assert(compiler->fgGetPtrArgCntMax() >= 1);
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
#endif // UNIX_X86_ABI
/* Restore the stack level */
SetStackLevel(saveStackLvl2);
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
// Notes:
// The x86 profile leave/tailcall helper has the following requirements (see ProfileLeaveNaked and
// ProfileTailcallNaked in VM\i386\asmhelpers.asm for details):
// 1. The calling sequence for calling the helper is:
// push FunctionIDOrClientID
// call ProfileLeaveHelper or ProfileTailcallHelper
// 2. The calling function has an EBP frame.
// 3. EBP points to the saved ESP which is the first thing saved in the function. Thus,
// the following prolog is assumed:
// push ESP
// mov EBP, ESP
// 4. helper == CORINFO_HELP_PROF_FCN_LEAVE: All registers are preserved.
// helper == CORINFO_HELP_PROF_FCN_TAILCALL: Only argument registers are preserved.
// 5. The helper pops the FunctionIDOrClientID argument from the stack.
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
// Need to save on to the stack level, since the helper call will pop the argument
unsigned saveStackLvl2 = genStackLevel;
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
AddStackLevel(0xC);
AddNestedAlignment(0xC);
#endif // UNIX_X86_ABI
//
// Push the profilerHandle
//
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
inst_IV(INS_push, (size_t)compiler->compProfilerMethHnd);
}
genSinglePush();
#if defined(UNIX_X86_ABI)
int argSize = -REGSIZE_BYTES; // negative means caller-pop (cdecl)
#else
int argSize = REGSIZE_BYTES;
#endif
genEmitHelperCall(helper, argSize, EA_UNKNOWN /* retSize */);
// Check that we have place for the push.
assert(compiler->fgGetPtrArgCntMax() >= 1);
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
SubtractStackLevel(0x10);
SubtractNestedAlignment(0xC);
#endif // UNIX_X86_ABI
/* Restore the stack level */
SetStackLevel(saveStackLvl2);
}
#endif // TARGET_X86
#ifdef TARGET_AMD64
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
#if !defined(UNIX_AMD64_ABI)
unsigned varNum;
LclVarDsc* varDsc;
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
noway_assert(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES));
// Home all arguments passed in arg registers (RCX, RDX, R8 and R9).
// In case of vararg methods, arg regs are already homed.
//
// Note: Here we don't need to worry about updating gc'info since enter
// callback is generated as part of prolog which is non-gc interruptible.
// Moreover GC cannot kick while executing inside profiler callback which is a
// profiler requirement so it can examine arguments which could be obj refs.
if (!compiler->info.compIsVarArgs)
{
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (!varDsc->lvIsRegArg)
{
continue;
}
var_types storeType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg();
instruction store_ins = ins_Store(storeType);
#ifdef FEATURE_SIMD
if ((storeType == TYP_SIMD8) && genIsValidIntReg(argReg))
{
store_ins = INS_mov;
}
#endif // FEATURE_SIMD
GetEmitter()->emitIns_S_R(store_ins, emitTypeSize(storeType), argReg, varNum, 0);
}
}
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
// RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_8BYTE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RDX = caller's SP
// Notes
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
// This will emit either
// "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN);
// TODO-AMD64-CQ: Rather than reloading, see if this could be optimized by combining with prolog
// generation logic that moves args around as required by first BB entry point conditions
// computed by LSRA. Code pointers for investigating this further: genFnPrologCalleeRegArgs()
// and genEnregisterIncomingStackArgs().
//
// Now reload arg registers from home locations.
// Vararg methods:
// - we need to reload only known (i.e. fixed) reg args.
// - if floating point type, also reload it into corresponding integer reg
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (!varDsc->lvIsRegArg)
{
continue;
}
var_types loadType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg();
instruction load_ins = ins_Load(loadType);
#ifdef FEATURE_SIMD
if ((loadType == TYP_SIMD8) && genIsValidIntReg(argReg))
{
load_ins = INS_mov;
}
#endif // FEATURE_SIMD
GetEmitter()->emitIns_R_S(load_ins, emitTypeSize(loadType), argReg, varNum, 0);
if (compFeatureVarArg() && compiler->info.compIsVarArgs && varTypeIsFloating(loadType))
{
regNumber intArgReg = compiler->getCallArgIntRegister(argReg);
inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType));
}
}
// If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using.
if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0)
{
*pInitRegZeroed = false;
}
#else // !defined(UNIX_AMD64_ABI)
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
// R14 = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_0,
(ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_PROFILER_ENTER_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// R15 = caller's SP
// Notes
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_1, genFramePointerReg(), -callerSPOffset);
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
// We use R11 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r11, helper addr; call r11"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET);
// If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using.
if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0)
{
*pInitRegZeroed = false;
}
#endif // !defined(UNIX_AMD64_ABI)
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
#if !defined(UNIX_AMD64_ABI)
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
noway_assert(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES));
// If thisPtr needs to be kept alive and reported, it cannot be one of the callee trash
// registers that profiler callback kills.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaGetDesc(compiler->info.compThisArg)->lvIsInReg())
{
regMaskTP thisPtrMask = genRegMask(compiler->lvaGetDesc(compiler->info.compThisArg)->GetRegNum());
noway_assert((RBM_PROFILER_LEAVE_TRASH & thisPtrMask) == 0);
}
// At this point return value is computed and stored in RAX or XMM0.
// On Amd64, Leave callback preserves the return register. We keep
// RAX alive by not reporting as trashed by helper call. Also note
// that GC cannot kick-in while executing inside profiler callback,
// which is a requirement of profiler as well since it needs to examine
// return value which could be an obj ref.
// RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of an address.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RDX = caller's SP
// TODO-AMD64-Cleanup: Once we start doing codegen after final frame layout, retain the "if" portion
// of the stmnts to execute unconditionally and clean-up rest.
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
// Caller's SP relative offset to FramePointer will be negative. We need to add absolute
// value of that offset to FramePointer to obtain caller's SP value.
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
// If we are here means that it is a tentative frame layout during which we
// cannot use caller's SP offset since it is an estimate. For now we require the
// method to have at least a single arg so that we can use it to obtain caller's
// SP.
LclVarDsc* varDsc = compiler->lvaGetDesc(0U);
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RCX, RDX) for call target.
// We use R8 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r8, helper addr; call r8"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_ARG_2);
#else // !defined(UNIX_AMD64_ABI)
// RDI = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RSI = caller's SP
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
LclVarDsc* varDsc = compiler->lvaGetDesc(0U);
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
// We use R11 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r11, helper addr; call r11"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET);
#endif // !defined(UNIX_AMD64_ABI)
}
#endif // TARGET_AMD64
#endif // PROFILING_SUPPORTED
#ifdef TARGET_AMD64
//------------------------------------------------------------------------
// genOSRRecordTier0CalleeSavedRegistersAndFrame: for OSR methods, record the
// subset of callee saves already saved by the Tier0 method, and the frame
// created by Tier0.
//
void CodeGen::genOSRRecordTier0CalleeSavedRegistersAndFrame()
{
assert(compiler->compGeneratingProlog);
assert(compiler->opts.IsOSR());
assert(compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// Figure out which set of int callee saves was already saved by Tier0.
// Emit appropriate unwind.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
int const tier0IntCalleeSaveUsedSize = genCountBits(tier0IntCalleeSaves) * REGSIZE_BYTES;
JITDUMP("--OSR--- tier0 has already saved ");
JITDUMPEXEC(dspRegMask(tier0IntCalleeSaves));
JITDUMP("\n");
// We must account for the Tier0 callee saves.
//
// These have already happened at method entry; all these
// unwind records should be at offset 0.
//
// RBP is always aved by Tier0 and always pushed first.
//
assert((tier0IntCalleeSaves & RBM_FPBASE) == RBM_FPBASE);
compiler->unwindPush(REG_RBP);
tier0IntCalleeSaves &= ~RBM_FPBASE;
// Now the rest of the Tier0 callee saves.
//
for (regNumber reg = REG_INT_LAST; tier0IntCalleeSaves != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & tier0IntCalleeSaves) != 0)
{
compiler->unwindPush(reg);
}
tier0IntCalleeSaves &= ~regBit;
}
// We must account for the post-callee-saves push SP movement
// done by the Tier0 frame and by the OSR transition.
//
// tier0FrameSize is the Tier0 FP-SP delta plus the fake call slot added by
// JIT_Patchpoint. We add one slot to account for the saved FP.
//
// We then need to subtract off the size the Tier0 callee saves as SP
// adjusts for those will have been modelled by the unwind pushes above.
//
int const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES;
int const tier0NetSize = tier0FrameSize - tier0IntCalleeSaveUsedSize;
compiler->unwindAllocStack(tier0NetSize);
}
//------------------------------------------------------------------------
// genOSRSaveRemainingCalleeSavedRegisters: save any callee save registers
// that Tier0 didn't save.
//
// Notes:
// This must be invoked after SP has been adjusted to allocate the local
// frame, because of how the UnwindSave records are interpreted.
//
// We rely on the fact that other "local frame" allocation actions (like
// stack probing) will not trash callee saves registers.
//
void CodeGen::genOSRSaveRemainingCalleeSavedRegisters()
{
// We should be generating the prolog of an OSR root frame.
//
assert(compiler->compGeneratingProlog);
assert(compiler->opts.IsOSR());
assert(compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
// x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack
// here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not
// here.
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// Figure out which set of int callee saves still needs saving.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
unsigned const tier0IntCalleeSaveUsedSize = genCountBits(tier0IntCalleeSaves) * REGSIZE_BYTES;
regMaskTP const osrIntCalleeSaves = rsPushRegs & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP osrAdditionalIntCalleeSaves = osrIntCalleeSaves & ~tier0IntCalleeSaves;
JITDUMP("---OSR--- int callee saves are ");
JITDUMPEXEC(dspRegMask(osrIntCalleeSaves));
JITDUMP("; tier0 already saved ");
JITDUMPEXEC(dspRegMask(tier0IntCalleeSaves));
JITDUMP("; so only saving ");
JITDUMPEXEC(dspRegMask(osrAdditionalIntCalleeSaves));
JITDUMP("\n");
// These remaining callee saves will be stored in the Tier0 callee save area
// below any saves already done by Tier0. Compute the offset.
//
// The OSR method doesn't actually use its callee save area.
//
int const osrFrameSize = compiler->compLclFrameSize;
int const tier0FrameSize = patchpointInfo->TotalFrameSize();
int const osrCalleeSaveSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
int const osrFramePointerSize = isFramePointerUsed() ? REGSIZE_BYTES : 0;
int offset = osrFrameSize + osrCalleeSaveSize + osrFramePointerSize + tier0FrameSize - tier0IntCalleeSaveUsedSize;
// The tier0 frame is always an RBP frame, so the OSR method should never need to save RBP.
//
assert((tier0CalleeSaves & RBM_FPBASE) == RBM_FPBASE);
assert((osrAdditionalIntCalleeSaves & RBM_FPBASE) == RBM_NONE);
// The OSR method must use MOVs to save additional callee saves.
//
for (regNumber reg = REG_INT_LAST; osrAdditionalIntCalleeSaves != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & osrAdditionalIntCalleeSaves) != 0)
{
GetEmitter()->emitIns_AR_R(INS_mov, EA_8BYTE, reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
offset -= REGSIZE_BYTES;
}
osrAdditionalIntCalleeSaves &= ~regBit;
}
}
#endif // TARGET_AMD64
//------------------------------------------------------------------------
// genPushCalleeSavedRegisters: Push any callee-saved registers we have used.
//
void CodeGen::genPushCalleeSavedRegisters()
{
assert(compiler->compGeneratingProlog);
#if DEBUG
// OSR root frames must handle this differently. See
// genOSRRecordTier0CalleeSavedRegisters()
// genOSRSaveRemainingCalleeSavedRegisters()
//
if (compiler->opts.IsOSR())
{
assert(compiler->funCurrentFunc()->funKind != FuncKind::FUNC_ROOT);
}
#endif
// x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack
// here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not
// here.
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// On X86/X64 we have already pushed the FP (frame-pointer) prior to calling this method
if (isFramePointerUsed())
{
rsPushRegs &= ~RBM_FPBASE;
}
#ifdef DEBUG
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
}
#endif // DEBUG
// Push backwards so we match the order we will pop them in the epilog
// and all the other code that expects it to be in this order.
for (regNumber reg = REG_INT_LAST; rsPushRegs != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & rsPushRegs) != 0)
{
inst_RV(INS_push, reg, TYP_REF);
compiler->unwindPush(reg);
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(REGSIZE_BYTES);
}
#endif // USING_SCOPE_INFO
rsPushRegs &= ~regBit;
}
}
}
void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
#ifdef TARGET_AMD64
const bool isFunclet = compiler->funCurrentFunc()->funKind != FuncKind::FUNC_ROOT;
const bool doesSupersetOfNormalPops = compiler->opts.IsOSR() && !isFunclet;
// OSR methods must restore all registers saved by either the OSR or
// the Tier0 method. First restore any callee save not saved by
// Tier0, then the callee saves done by Tier0.
//
// OSR funclets do normal restores.
//
if (doesSupersetOfNormalPops)
{
regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP tier0CalleeSaves =
((regMaskTP)compiler->info.compPatchpointInfo->CalleeSaveRegisters()) & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP additionalCalleeSaves = rsPopRegs & ~tier0CalleeSaves;
// Registers saved by the OSR prolog.
//
genPopCalleeSavedRegistersFromMask(additionalCalleeSaves);
// Registers saved by the Tier0 prolog.
// Tier0 frame pointer will be restored separately.
//
genPopCalleeSavedRegistersFromMask(tier0CalleeSaves & ~RBM_FPBASE);
return;
}
#endif // TARGET_AMD64
// Registers saved by a normal prolog
//
regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED;
const unsigned popCount = genPopCalleeSavedRegistersFromMask(rsPopRegs);
noway_assert(compiler->compCalleeRegsPushed == popCount);
}
//------------------------------------------------------------------------
// genPopCalleeSavedRegistersFromMask: pop specified set of callee saves
// in the "standard" order
//
unsigned CodeGen::genPopCalleeSavedRegistersFromMask(regMaskTP rsPopRegs)
{
unsigned popCount = 0;
if ((rsPopRegs & RBM_EBX) != 0)
{
popCount++;
inst_RV(INS_pop, REG_EBX, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_FPBASE) != 0)
{
// EBP cannot be directly modified for EBP frame and double-aligned frames
assert(!doubleAlignOrFramePointerUsed());
popCount++;
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#ifndef UNIX_AMD64_ABI
// For System V AMD64 calling convention ESI and EDI are volatile registers.
if ((rsPopRegs & RBM_ESI) != 0)
{
popCount++;
inst_RV(INS_pop, REG_ESI, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_EDI) != 0)
{
popCount++;
inst_RV(INS_pop, REG_EDI, TYP_I_IMPL);
}
#endif // !defined(UNIX_AMD64_ABI)
#ifdef TARGET_AMD64
if ((rsPopRegs & RBM_R12) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R12, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R13) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R13, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R14) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R14, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R15) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R15, TYP_I_IMPL);
}
#endif // TARGET_AMD64
// Amd64/x86 doesn't support push/pop of xmm registers.
// These will get saved to stack separately after allocating
// space on stack in prolog sequence. PopCount is essentially
// tracking the count of integer registers pushed.
return popCount;
}
/*****************************************************************************
*
* Generates code for a function epilog.
*
* Please consult the "debugger team notification" comment in genFnProlog().
*/
void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFnEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
noway_assert(!compiler->opts.MinOpts() || isFramePointerUsed()); // FPO not allowed with minOpts
#ifdef DEBUG
genInterruptibleUsed = true;
#endif
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n__epilog:\n");
}
if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif
// Restore float registers that were saved to stack before SP is modified.
genRestoreCalleeSavedFltRegs(compiler->compLclFrameSize);
#ifdef JIT32_GCENCODER
// When using the JIT32 GC encoder, we do not start the OS-reported portion of the epilog until after
// the above call to `genRestoreCalleeSavedFltRegs` because that function
// a) does not actually restore any registers: there are none when targeting the Windows x86 ABI,
// which is the only target that uses the JIT32 GC encoder
// b) may issue a `vzeroupper` instruction to eliminate AVX -> SSE transition penalties.
// Because the `vzeroupper` instruction is not recognized by the VM's unwinder and there are no
// callee-save FP restores that the unwinder would need to see, we can avoid the need to change the
// unwinder (and break binary compat with older versions of the runtime) by starting the epilog
// after any `vzeroupper` instruction has been emitted. If either of the above conditions changes,
// we will need to rethink this.
GetEmitter()->emitStartEpilog();
#endif
/* Compute the size in bytes we've pushed/popped */
bool removeEbpFrame = doubleAlignOrFramePointerUsed();
#ifdef TARGET_AMD64
// We only remove the EBP frame using the frame pointer (using `lea rsp, [rbp + const]`)
// if we reported the frame pointer in the prolog. The Windows x64 unwinding ABI specifically
// disallows this `lea` form:
//
// See https://docs.microsoft.com/en-us/cpp/build/prolog-and-epilog?view=msvc-160#epilog-code
//
// "When a frame pointer is not used, the epilog must use add RSP,constant to deallocate the fixed part of the
// stack. It may not use lea RSP,constant[RSP] instead. This restriction exists so the unwind code has fewer
// patterns to recognize when searching for epilogs."
//
// Otherwise, we must use `add RSP, constant`, as stated. So, we need to use the same condition
// as genFnProlog() used in determining whether to report the frame pointer in the unwind data.
// This is a subset of the `doubleAlignOrFramePointerUsed()` cases.
//
if (removeEbpFrame)
{
const bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
removeEbpFrame = removeEbpFrame && reportUnwindData;
}
#endif // TARGET_AMD64
if (!removeEbpFrame)
{
// We have an ESP frame */
noway_assert(compiler->compLocallocUsed == false); // Only used with frame-pointer
/* Get rid of our local variables */
unsigned int frameSize = compiler->compLclFrameSize;
#ifdef TARGET_AMD64
// OSR must remove the entire OSR frame and the Tier0 frame down to the bottom
// of the used part of the Tier0 callee save area.
//
if (compiler->opts.IsOSR())
{
// The patchpoint TotalFrameSize is SP-FP delta (plus "call" slot added by JIT_Patchpoint)
// so does not account for the Tier0 push of FP, so we add in an extra stack slot to get the
// offset to the top of the Tier0 callee saves area.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP const tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP const osrIntCalleeSaves = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP const allIntCalleeSaves = osrIntCalleeSaves | tier0IntCalleeSaves;
unsigned const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES;
unsigned const tier0IntCalleeSaveUsedSize = genCountBits(allIntCalleeSaves) * REGSIZE_BYTES;
unsigned const osrCalleeSaveSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
unsigned const osrFramePointerSize = isFramePointerUsed() ? REGSIZE_BYTES : 0;
unsigned const osrAdjust =
tier0FrameSize - tier0IntCalleeSaveUsedSize + osrCalleeSaveSize + osrFramePointerSize;
JITDUMP("OSR epilog adjust factors: tier0 frame %u, tier0 callee saves -%u, osr callee saves %u, osr "
"framePointer %u\n",
tier0FrameSize, tier0IntCalleeSaveUsedSize, osrCalleeSaveSize, osrFramePointerSize);
JITDUMP(" OSR frame size %u; net osr adjust %u, result %u\n", frameSize, osrAdjust,
frameSize + osrAdjust);
frameSize += osrAdjust;
}
#endif // TARGET_AMD64
if (frameSize > 0)
{
#ifdef TARGET_X86
/* Add 'compiler->compLclFrameSize' to ESP */
/* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */
if ((frameSize == TARGET_POINTER_SIZE) && !compiler->compJmpOpUsed)
{
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
else
#endif // TARGET_X86
{
/* Add 'compiler->compLclFrameSize' to ESP */
/* Generate "add esp, <stack-size>" */
inst_RV_IV(INS_add, REG_SPBASE, frameSize, EA_PTRSIZE);
}
}
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
// In the case where we have an RSP frame, and no frame pointer reported in the OS unwind info,
// but we do have a pushed frame pointer and established frame chain, we do need to pop RBP.
//
// OSR methods must always pop RBP (pushed by Tier0 frame)
if (doubleAlignOrFramePointerUsed() || compiler->opts.IsOSR())
{
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#endif // TARGET_AMD64
}
else
{
noway_assert(doubleAlignOrFramePointerUsed());
// We don't support OSR for methods that must report an FP in unwind.
//
assert(!compiler->opts.IsOSR());
/* Tear down the stack frame */
bool needMovEspEbp = false;
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
{
//
// add esp, compLclFrameSize
//
// We need not do anything (except the "mov esp, ebp") if
// compiler->compCalleeRegsPushed==0. However, this is unlikely, and it
// also complicates the code manager. Hence, we ignore that case.
noway_assert(compiler->compLclFrameSize != 0);
inst_RV_IV(INS_add, REG_SPBASE, compiler->compLclFrameSize, EA_PTRSIZE);
needMovEspEbp = true;
}
else
#endif // DOUBLE_ALIGN
{
bool needLea = false;
if (compiler->compLocallocUsed)
{
// OSR not yet ready for localloc
assert(!compiler->opts.IsOSR());
// ESP may be variable if a localloc was actually executed. Reset it.
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
needLea = true;
}
else if (!regSet.rsRegsModified(RBM_CALLEE_SAVED))
{
if (compiler->compLclFrameSize != 0)
{
#ifdef TARGET_AMD64
// AMD64 can't use "mov esp, ebp", according to the ABI specification describing epilogs. So,
// do an LEA to "pop off" the frame allocation.
needLea = true;
#else // !TARGET_AMD64
// We will just generate "mov esp, ebp" and be done with it.
needMovEspEbp = true;
#endif // !TARGET_AMD64
}
}
else if (compiler->compLclFrameSize == 0)
{
// do nothing before popping the callee-saved registers
}
#ifdef TARGET_X86
else if (compiler->compLclFrameSize == REGSIZE_BYTES)
{
// "pop ecx" will make ESP point to the callee-saved registers
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
#endif // TARGET_X86
else
{
// We need to make ESP point to the callee-saved registers
needLea = true;
}
if (needLea)
{
int offset;
#ifdef TARGET_AMD64
// lea esp, [ebp + compiler->compLclFrameSize - genSPtoFPdelta]
//
// Case 1: localloc not used.
// genSPToFPDelta = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize
// offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
// The amount to be subtracted from RBP to point at callee saved int regs.
//
// Case 2: localloc used
// genSPToFPDelta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize)
// Offset = Amount to be added to RBP to point at callee saved int regs.
offset = genSPtoFPdelta() - compiler->compLclFrameSize;
// Offset should fit within a byte if localloc is not used.
if (!compiler->compLocallocUsed)
{
noway_assert(offset < UCHAR_MAX);
}
#else
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
noway_assert(offset < UCHAR_MAX); // the offset fits in a byte
#endif
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -offset);
}
}
//
// Pop the callee-saved registers (if any)
//
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
// Extra OSR adjust to get to where RBP was saved by the tier0 frame.
//
// Note the other callee saves made in that frame are dead, the current method
// will save and restore what it needs.
if (compiler->opts.IsOSR())
{
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
const int tier0FrameSize = patchpointInfo->TotalFrameSize();
// Use add since we know the SP-to-FP delta of the original method.
// We also need to skip over the slot where we pushed RBP.
//
// If we ever allow the original method to have localloc this will
// need to change.
inst_RV_IV(INS_add, REG_SPBASE, tier0FrameSize + TARGET_POINTER_SIZE, EA_PTRSIZE);
}
assert(!needMovEspEbp); // "mov esp, ebp" is not allowed in AMD64 epilogs
#else // !TARGET_AMD64
if (needMovEspEbp)
{
// mov esp, ebp
inst_Mov(TYP_I_IMPL, REG_SPBASE, REG_FPBASE, /* canSkip */ false);
}
#endif // !TARGET_AMD64
// pop ebp
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
GetEmitter()->emitStartExitSeq(); // Mark the start of the "return" sequence
/* Check if this a special return block i.e.
* CEE_JMP instruction */
if (jmpEpilog)
{
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->GetFirstLIRNode());
// figure out what jump we have
GenTree* jmpNode = block->lastNode();
#if !FEATURE_FASTTAILCALL
// x86
noway_assert(jmpNode->gtOper == GT_JMP);
#else
// amd64
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
noway_assert((jmpNode->gtOper == GT_JMP) ||
((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
#endif
{
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
CORINFO_METHOD_HANDLE methHnd = (CORINFO_METHOD_HANDLE)jmpNode->AsVal()->gtVal1;
CORINFO_CONST_LOOKUP addrInfo;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
if (addrInfo.accessType != IAT_VALUE && addrInfo.accessType != IAT_PVALUE)
{
NO_WAY("Unsupported JMP indirection");
}
// If we have IAT_PVALUE we might need to jump via register indirect, as sometimes the
// indirection cell can't be reached by the jump.
emitter::EmitCallType callType;
void* addr;
regNumber indCallReg;
if (addrInfo.accessType == IAT_PVALUE)
{
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)addrInfo.addr))
{
// 32 bit displacement will work
callType = emitter::EC_FUNC_TOKEN_INDIR;
addr = addrInfo.addr;
indCallReg = REG_NA;
}
else
{
// 32 bit displacement won't work
callType = emitter::EC_INDIR_ARD;
indCallReg = REG_RAX;
addr = nullptr;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
regSet.verifyRegUsed(indCallReg);
}
}
else
{
callType = emitter::EC_FUNC_TOKEN;
addr = addrInfo.addr;
indCallReg = REG_NA;
}
// clang-format off
GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
0, // argSize
EA_UNKNOWN // retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN), // secondRetSize
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
indCallReg, REG_NA, 0, 0, /* ireg, xreg, xmul, disp */
true /* isJump */
);
// clang-format on
}
#if FEATURE_FASTTAILCALL
else
{
genCallInstruction(jmpNode->AsCall());
}
#endif // FEATURE_FASTTAILCALL
}
else
{
unsigned stkArgSize = 0; // Zero on all platforms except x86
#if defined(TARGET_X86)
bool fCalleePop = true;
// varargs has caller pop
if (compiler->info.compIsVarArgs)
fCalleePop = false;
if (IsCallerPop(compiler->info.compCallConv))
fCalleePop = false;
if (fCalleePop)
{
noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand
}
#ifdef UNIX_X86_ABI
// The called function must remove hidden address argument from the stack before returning
// in case of struct returning according to cdecl calling convention on linux.
// Details: http://www.sco.com/developers/devspecs/abi386-4.pdf pages 40-43
if (compiler->info.compCallConv == CorInfoCallConvExtension::C && compiler->info.compRetBuffArg != BAD_VAR_NUM)
stkArgSize += TARGET_POINTER_SIZE;
#endif // UNIX_X86_ABI
#endif // TARGET_X86
/* Return, popping our arguments (if any) */
instGen_Return(stkArgSize);
}
}
#if defined(FEATURE_EH_FUNCLETS)
#if defined(TARGET_AMD64)
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
* Funclets have the following incoming arguments:
*
* catch/filter-handler: rcx = InitialSP, rdx = the exception object that was caught (see GT_CATCH_ARG)
* filter: rcx = InitialSP, rdx = the exception object to filter (see GT_CATCH_ARG)
* finally/fault: rcx = InitialSP
*
* Funclets set the following registers on exit:
*
* catch/filter-handler: rax = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: rax = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* The AMD64 funclet prolog sequence is:
*
* push ebp
* push callee-saved regs
* ; TODO-AMD64-CQ: We probably only need to save any callee-save registers that we actually use
* ; in the funclet. Currently, we save the same set of callee-saved regs calculated for
* ; the entire function.
* sub sp, XXX ; Establish the rest of the frame.
* ; XXX is determined by lvaOutgoingArgSpaceSize plus space for the PSP slot, aligned
* ; up to preserve stack alignment. If we push an odd number of registers, we also
* ; generate this, to keep the stack aligned.
*
* ; Fill the PSP slot, for use by the VM (it gets reported with the GC info), or by code generation of nested
* ; filters.
* ; This is not part of the "OS prolog"; it has no associated unwind data, and is not reversed in the funclet
* ; epilog.
* ; Also, re-establish the frame pointer from the PSP.
*
* mov rbp, [rcx + PSP_slot_InitialSP_offset] ; Load the PSP (InitialSP of the main function stored in the
* ; PSP of the dynamically containing funclet or function)
* mov [rsp + PSP_slot_InitialSP_offset], rbp ; store the PSP in our frame
* lea ebp, [rbp + Function_InitialSP_to_FP_delta] ; re-establish the frame pointer of the parent frame. If
* ; Function_InitialSP_to_FP_delta==0, we don't need this
* ; instruction.
*
* The epilog sequence is then:
*
* add rsp, XXX
* pop callee-saved regs ; if necessary
* pop rbp
* ret
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming |
* | arguments |
* +=======================+ <---- Caller's SP
* | Return address |
* |-----------------------|
* | Saved EBP |
* |-----------------------|
* |Callee saved registers |
* |-----------------------|
* ~ possible 8 byte pad ~
* ~ for alignment ~
* |-----------------------|
* | PSP slot | // Omitted in CoreRT ABI
* |-----------------------|
* | Outgoing arg space | // this only exists if the function makes a call
* |-----------------------| <---- Initial SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* TODO-AMD64-Bug?: the frame pointer should really point to the PSP slot (the debugger seems to assume this
* in DacDbiInterfaceImpl::InitParentFrameInfo()), or someplace above Initial-SP. There is an AMD64
* UNWIND_INFO restriction that it must be within 240 bytes of Initial-SP. See jit64\amd64\inc\md.h
* "FRAMEPTR OFFSETS" for details.
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletProlog()\n");
}
#endif
assert(!regSet.rsRegsModified(RBM_FPBASE));
assert(block != nullptr);
assert(block->bbFlags & BBF_FUNCLET_BEG);
assert(isFramePointerUsed());
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
// We need to push ebp, since it's callee-saved.
// We need to push the callee-saved registers. We only need to push the ones that we need, but we don't
// keep track of that on a per-funclet basis, so we push the same set as in the main function.
// The only fixed-size frame we need to allocate is whatever is big enough for the PSPSym, since nothing else
// is stored here (all temps are allocated in the parent frame).
// We do need to allocate the outgoing argument space, in case there are calls here. This must be the same
// size as the parent frame's outgoing argument space, to keep the PSPSym offset the same.
inst_RV(INS_push, REG_FPBASE, TYP_REF);
compiler->unwindPush(REG_FPBASE);
// Callee saved int registers are pushed to stack.
genPushCalleeSavedRegisters();
regMaskTP maskArgRegsLiveIn;
if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
{
maskArgRegsLiveIn = RBM_ARG_0;
}
else
{
maskArgRegsLiveIn = RBM_ARG_0 | RBM_ARG_2;
}
regNumber initReg = REG_EBP; // We already saved EBP, so it can be trashed
bool initRegZeroed = false;
genAllocLclFrame(genFuncletInfo.fiSpDelta, initReg, &initRegZeroed, maskArgRegsLiveIn);
// Callee saved float registers are copied to stack in their assigned stack slots
// after allocating space for them as part of funclet frame.
genPreserveCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// If there is no PSPSym (CoreRT ABI), we are done.
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
GetEmitter()->emitIns_R_AR(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_ARG_0, genFuncletInfo.fiPSP_slot_InitialSP_offset);
regSet.verifyRegUsed(REG_FPBASE);
GetEmitter()->emitIns_AR_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, genFuncletInfo.fiPSP_slot_InitialSP_offset);
if (genFuncletInfo.fiFunction_InitialSP_to_FP_delta != 0)
{
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_FPBASE,
genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
}
// We've modified EBP, but not really. Say that we haven't...
regSet.rsRemoveRegsModified(RBM_FPBASE);
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*
* Note that we don't do anything with unwind codes, because AMD64 only cares about unwind codes for the prolog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Restore callee saved XMM regs from their stack slots before modifying SP
// to position at callee saved int regs.
genRestoreCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
inst_RV_IV(INS_add, REG_SPBASE, genFuncletInfo.fiSpDelta, EA_PTRSIZE);
genPopCalleeSavedRegisters();
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
instGen_Return(0);
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
{
return;
}
// Note that compLclFrameSize can't be used (for can we call functions that depend on it),
// because we're not going to allocate the same size frame as the parent.
assert(isFramePointerUsed());
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be
// finalized
assert(compiler->compCalleeFPRegsSavedMask != (regMaskTP)-1); // The float registers to be preserved is finalized
// Even though lvaToInitialSPRelativeOffset() depends on compLclFrameSize,
// that's ok, because we're figuring out an offset in the parent frame.
genFuncletInfo.fiFunction_InitialSP_to_FP_delta =
compiler->lvaToInitialSPRelativeOffset(0, true); // trick to find the Initial-SP-relative offset of the frame
// pointer.
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
#ifndef UNIX_AMD64_ABI
// No 4 slots for outgoing params on the stack for System V systems.
assert((compiler->lvaOutgoingArgSpaceSize == 0) ||
(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES))); // On AMD64, we always have 4 outgoing argument
// slots if there are any calls in the function.
#endif // UNIX_AMD64_ABI
unsigned offset = compiler->lvaOutgoingArgSpaceSize;
genFuncletInfo.fiPSP_slot_InitialSP_offset = offset;
// How much stack do we allocate in the funclet?
// We need to 16-byte align the stack.
unsigned totalFrameSize =
REGSIZE_BYTES // return address
+ REGSIZE_BYTES // pushed EBP
+ (compiler->compCalleeRegsPushed * REGSIZE_BYTES); // pushed callee-saved int regs, not including EBP
// Entire 128-bits of XMM register is saved to stack due to ABI encoding requirement.
// Copying entire XMM register to/from memory will be performant if SP is aligned at XMM_REGSIZE_BYTES boundary.
unsigned calleeFPRegsSavedSize = genCountBits(compiler->compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES;
unsigned FPRegsPad = (calleeFPRegsSavedSize > 0) ? AlignmentPad(totalFrameSize, XMM_REGSIZE_BYTES) : 0;
unsigned PSPSymSize = (compiler->lvaPSPSym != BAD_VAR_NUM) ? REGSIZE_BYTES : 0;
totalFrameSize += FPRegsPad // Padding before pushing entire xmm regs
+ calleeFPRegsSavedSize // pushed callee-saved float regs
// below calculated 'pad' will go here
+ PSPSymSize // PSPSym
+ compiler->lvaOutgoingArgSpaceSize // outgoing arg space
;
unsigned pad = AlignmentPad(totalFrameSize, 16);
genFuncletInfo.fiSpDelta = FPRegsPad // Padding to align SP on XMM_REGSIZE_BYTES boundary
+ calleeFPRegsSavedSize // Callee saved xmm regs
+ pad + PSPSymSize // PSPSym
+ compiler->lvaOutgoingArgSpaceSize // outgoing arg space
;
#ifdef DEBUG
if (verbose)
{
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Function InitialSP-to-FP delta: %d\n", genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
printf(" SP delta: %d\n", genFuncletInfo.fiSpDelta);
printf(" PSP slot Initial SP offset: %d\n", genFuncletInfo.fiPSP_slot_InitialSP_offset);
}
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
assert(genFuncletInfo.fiPSP_slot_InitialSP_offset ==
compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and
// funclet!
}
#endif // DEBUG
}
#elif defined(TARGET_X86)
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
*
* Funclets have the following incoming arguments:
*
* catch/filter-handler: eax = the exception object that was caught (see GT_CATCH_ARG)
* filter: eax = the exception object that was caught (see GT_CATCH_ARG)
* finally/fault: none
*
* Funclets set the following registers on exit:
*
* catch/filter-handler: eax = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: eax = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* Funclet prolog/epilog sequence and funclet frame layout are TBD.
*
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletProlog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// TODO We may need EBP restore sequence here if we introduce PSPSym
// Add a padding for 16-byte alignment
inst_RV_IV(INS_sub, REG_SPBASE, 12, EA_PTRSIZE);
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Revert a padding that was added for 16-byte alignment
inst_RV_IV(INS_add, REG_SPBASE, 12, EA_PTRSIZE);
instGen_Return(0);
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
{
return;
}
}
#endif // TARGET_X86
void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
#if defined(TARGET_AMD64)
// The PSP sym value is Initial-SP, not Caller-SP!
// We assume that RSP is Initial-SP when this function is called. That is, the stack frame
// has been established.
//
// We generate:
// mov [rbp-20h], rsp // store the Initial-SP (our current rsp) in the PSPsym
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaPSPSym, 0);
#else // TARGET*
NYI("Set function PSP sym");
#endif // TARGET*
}
#endif // FEATURE_EH_FUNCLETS
//-----------------------------------------------------------------------------
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
// `genUseBlockInit` is set.
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
assert(genUseBlockInit);
assert(untrLclHi > untrLclLo);
assert(compiler->getSIMDSupportLevel() >= SIMD_SSE2_Supported);
emitter* emit = GetEmitter();
regNumber frameReg = genFramePointerReg();
regNumber zeroReg = REG_NA;
int blkSize = untrLclHi - untrLclLo;
int minSimdSize = XMM_REGSIZE_BYTES;
assert(blkSize >= 0);
noway_assert((blkSize % sizeof(int)) == 0);
// initReg is not a live incoming argument reg
assert((genRegMask(initReg) & intRegState.rsCalleeRegArgMaskLiveIn) == 0);
#if defined(TARGET_AMD64)
// We will align on x64 so can use the aligned mov
instruction simdMov = simdAlignedMovIns();
// Aligning low we want to move up to next boundary
int alignedLclLo = (untrLclLo + (XMM_REGSIZE_BYTES - 1)) & -XMM_REGSIZE_BYTES;
if ((untrLclLo != alignedLclLo) && (blkSize < 2 * XMM_REGSIZE_BYTES))
{
// If unaligned and smaller then 2 x SIMD size we won't bother trying to align
assert((alignedLclLo - untrLclLo) < XMM_REGSIZE_BYTES);
simdMov = simdUnalignedMovIns();
}
#else // !defined(TARGET_AMD64)
// We aren't going to try and align on x86
instruction simdMov = simdUnalignedMovIns();
int alignedLclLo = untrLclLo;
#endif // !defined(TARGET_AMD64)
if (blkSize < minSimdSize)
{
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= blkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, untrLclLo + i);
}
#if defined(TARGET_AMD64)
assert((i == blkSize) || (i + (int)sizeof(int) == blkSize));
if (i != blkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, untrLclLo + i);
i += sizeof(int);
}
#endif // defined(TARGET_AMD64)
assert(i == blkSize);
}
else
{
// Grab a non-argument, non-callee saved XMM reg
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
// System V x64 first temp reg is xmm8
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM8);
#else
// Windows first temp reg is xmm4
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM4);
#endif // UNIX_AMD64_ABI
#if defined(TARGET_AMD64)
int alignedLclHi;
int alignmentHiBlkSize;
if ((blkSize < 2 * XMM_REGSIZE_BYTES) || (untrLclLo == alignedLclLo))
{
// Either aligned or smaller then 2 x SIMD size so we won't try to align
// However, we still want to zero anything that is not in a 16 byte chunk at end
int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES;
alignmentHiBlkSize = blkSize - alignmentBlkSize;
alignedLclHi = untrLclLo + alignmentBlkSize;
alignedLclLo = untrLclLo;
blkSize = alignmentBlkSize;
assert((blkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
}
else
{
// We are going to align
// Aligning high we want to move down to previous boundary
alignedLclHi = untrLclHi & -XMM_REGSIZE_BYTES;
// Zero out the unaligned portions
alignmentHiBlkSize = untrLclHi - alignedLclHi;
int alignmentLoBlkSize = alignedLclLo - untrLclLo;
blkSize = alignedLclHi - alignedLclLo;
assert((blkSize + alignmentLoBlkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
assert(alignmentLoBlkSize > 0);
assert(alignmentLoBlkSize < XMM_REGSIZE_BYTES);
assert((alignedLclLo - alignmentLoBlkSize) == untrLclLo);
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= alignmentLoBlkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, untrLclLo + i);
}
assert((i == alignmentLoBlkSize) || (i + (int)sizeof(int) == alignmentLoBlkSize));
if (i != alignmentLoBlkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, untrLclLo + i);
i += sizeof(int);
}
assert(i == alignmentLoBlkSize);
}
#else // !defined(TARGET_AMD64)
// While we aren't aligning the start, we still want to
// zero anything that is not in a 16 byte chunk at end
int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES;
int alignmentHiBlkSize = blkSize - alignmentBlkSize;
int alignedLclHi = untrLclLo + alignmentBlkSize;
blkSize = alignmentBlkSize;
assert((blkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
#endif // !defined(TARGET_AMD64)
// The loop is unrolled 3 times so we do not move to the loop block until it
// will loop at least once so the threshold is 6.
if (blkSize < (6 * XMM_REGSIZE_BYTES))
{
// Generate the following code:
//
// xorps xmm4, xmm4
// movups xmmword ptr [ebp/esp-OFFS], xmm4
// ...
// movups xmmword ptr [ebp/esp-OFFS], xmm4
// mov qword ptr [ebp/esp-OFFS], rax
emit->emitIns_R_R(INS_xorps, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, zeroSIMDReg);
int i = 0;
for (; i < blkSize; i += XMM_REGSIZE_BYTES)
{
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, alignedLclLo + i);
}
assert(i == blkSize);
}
else
{
// Generate the following code:
//
// xorps xmm4, xmm4
// ;movaps xmmword ptr[ebp/esp-loOFFS], xmm4 ; alignment to 3x
// ;movaps xmmword ptr[ebp/esp-loOFFS + 10H], xmm4 ;
// mov rax, - <size> ; start offset from hi
// movaps xmmword ptr[rbp + rax + hiOFFS ], xmm4 ; <--+
// movaps xmmword ptr[rbp + rax + hiOFFS + 10H], xmm4 ; |
// movaps xmmword ptr[rbp + rax + hiOFFS + 20H], xmm4 ; | Loop
// add rax, 48 ; |
// jne SHORT -5 instr ; ---+
emit->emitIns_R_R(INS_xorps, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, zeroSIMDReg);
// How many extra don't fit into the 3x unroll
int extraSimd = (blkSize % (XMM_REGSIZE_BYTES * 3)) / XMM_REGSIZE_BYTES;
if (extraSimd != 0)
{
blkSize -= XMM_REGSIZE_BYTES;
// Not a multiple of 3 so add stores at low end of block
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, alignedLclLo);
if (extraSimd == 2)
{
blkSize -= XMM_REGSIZE_BYTES;
// one more store needed
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg,
alignedLclLo + XMM_REGSIZE_BYTES);
}
}
// Exact multiple of 3 simd lengths (or loop end condition will not be met)
noway_assert((blkSize % (3 * XMM_REGSIZE_BYTES)) == 0);
// At least 3 simd lengths remain (as loop is 3x unrolled and we want it to loop at least once)
assert(blkSize >= (3 * XMM_REGSIZE_BYTES));
// In range at start of loop
assert((alignedLclHi - blkSize) >= untrLclLo);
assert(((alignedLclHi - blkSize) + (XMM_REGSIZE_BYTES * 2)) < (untrLclHi - XMM_REGSIZE_BYTES));
// In range at end of loop
assert((alignedLclHi - (3 * XMM_REGSIZE_BYTES) + (2 * XMM_REGSIZE_BYTES)) <=
(untrLclHi - XMM_REGSIZE_BYTES));
assert((alignedLclHi - (blkSize + extraSimd * XMM_REGSIZE_BYTES)) == alignedLclLo);
// Set loop counter
emit->emitIns_R_I(INS_mov, EA_PTRSIZE, initReg, -(ssize_t)blkSize);
// Loop start
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1, alignedLclHi);
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1,
alignedLclHi + XMM_REGSIZE_BYTES);
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1,
alignedLclHi + 2 * XMM_REGSIZE_BYTES);
emit->emitIns_R_I(INS_add, EA_PTRSIZE, initReg, XMM_REGSIZE_BYTES * 3);
// Loop until counter is 0
emit->emitIns_J(INS_jne, nullptr, -5);
// initReg will be zero at end of the loop
*pInitRegZeroed = true;
}
if (untrLclHi != alignedLclHi)
{
assert(alignmentHiBlkSize > 0);
assert(alignmentHiBlkSize < XMM_REGSIZE_BYTES);
assert((alignedLclHi + alignmentHiBlkSize) == untrLclHi);
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= alignmentHiBlkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, alignedLclHi + i);
}
#if defined(TARGET_AMD64)
assert((i == alignmentHiBlkSize) || (i + (int)sizeof(int) == alignmentHiBlkSize));
if (i != alignmentHiBlkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, alignedLclHi + i);
i += sizeof(int);
}
#endif // defined(TARGET_AMD64)
assert(i == alignmentHiBlkSize);
}
}
}
// Save compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE]
// Here offset = 16-byte aligned offset after pushing integer registers.
//
// Params
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize)
{
genVzeroupperIfNeeded(false);
regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
if (regMask == RBM_NONE)
{
return;
}
#ifdef TARGET_AMD64
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
unsigned offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
// Offset is 16-byte aligned since we use movaps for preserving xmm regs.
assert((offset % 16) == 0);
instruction copyIns = ins_Copy(TYP_FLOAT);
#else // !TARGET_AMD64
unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES;
instruction copyIns = INS_movupd;
#endif // !TARGET_AMD64
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
// ABI requires us to preserve lower 128-bits of YMM register.
GetEmitter()->emitIns_AR_R(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
}
}
// Save/Restore compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE]
// Here offset = 16-byte aligned offset after pushing integer registers.
//
// Params
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
{
regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
if (regMask == RBM_NONE)
{
genVzeroupperIfNeeded();
return;
}
#ifdef TARGET_AMD64
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
instruction copyIns = ins_Copy(TYP_FLOAT);
#else // !TARGET_AMD64
unsigned firstFPRegPadding = 0;
instruction copyIns = INS_movupd;
#endif // !TARGET_AMD64
unsigned offset;
regNumber regBase;
if (compiler->compLocallocUsed)
{
// localloc frame: use frame pointer relative offset
assert(isFramePointerUsed());
regBase = REG_FPBASE;
offset = lclFrameSize - genSPtoFPdelta() - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
else
{
regBase = REG_SPBASE;
offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
#ifdef TARGET_AMD64
// Offset is 16-byte aligned since we use movaps for restoring xmm regs
assert((offset % 16) == 0);
#endif // TARGET_AMD64
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
// ABI requires us to restore lower 128-bits of YMM register.
GetEmitter()->emitIns_R_AR(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, regBase, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
}
genVzeroupperIfNeeded();
}
// Generate Vzeroupper instruction as needed to zero out upper 128b-bit of all YMM registers so that the
// AVX/Legacy SSE transition penalties can be avoided. This function is been used in genPreserveCalleeSavedFltRegs
// (prolog) and genRestoreCalleeSavedFltRegs (epilog). Issue VZEROUPPER in Prolog if the method contains
// 128-bit or 256-bit AVX code, to avoid legacy SSE to AVX transition penalty, which could happen when native
// code contains legacy SSE code calling into JIT AVX code (e.g. reverse pinvoke). Issue VZEROUPPER in Epilog
// if the method contains 256-bit AVX code, to avoid AVX to legacy SSE transition penalty.
//
// Params
// check256bitOnly - true to check if the function contains 256-bit AVX instruction and generate Vzeroupper
// instruction, false to check if the function contains AVX instruciton (either 128-bit or 256-bit).
//
void CodeGen::genVzeroupperIfNeeded(bool check256bitOnly /* = true*/)
{
bool emitVzeroUpper = false;
if (check256bitOnly)
{
emitVzeroUpper = GetEmitter()->Contains256bitAVX();
}
else
{
emitVzeroUpper = GetEmitter()->ContainsAVX();
}
if (emitVzeroUpper)
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
}
}
//-----------------------------------------------------------------------------------
// instGen_MemoryBarrier: Emit a MemoryBarrier instruction
//
// Arguments:
// barrierKind - kind of barrier to emit (Load-only is no-op on xarch)
//
// Notes:
// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1
//
void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind)
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
{
return;
}
#endif // DEBUG
// only full barrier needs to be emitted on Xarch
if (barrierKind == BARRIER_FULL)
{
instGen(INS_lock);
GetEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
}
}
#ifdef TARGET_AMD64
// Returns relocation type hint for an addr.
// Note that there are no reloc hints on x86.
//
// Arguments
// addr - data address
//
// Returns
// relocation type hint
//
unsigned short CodeGenInterface::genAddrRelocTypeHint(size_t addr)
{
return compiler->eeGetRelocTypeHint((void*)addr);
}
#endif // TARGET_AMD64
// Return true if an absolute indirect data address can be encoded as IP-relative.
// offset. Note that this method should be used only when the caller knows that
// the address is an icon value that VM has given and there is no GenTree node
// representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - an absolute indirect data address
//
// Returns
// true if indir data addr could be encoded as IP-relative offset.
//
bool CodeGenInterface::genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef TARGET_AMD64
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return false;
#endif
}
// Return true if an indirect code address can be encoded as IP-relative offset.
// Note that this method should be used only when the caller knows that the
// address is an icon value that VM has given and there is no GenTree node
// representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - an absolute indirect code address
//
// Returns
// true if indir code addr could be encoded as IP-relative offset.
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef TARGET_AMD64
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return true;
#endif
}
// Return true if an indirect code address can be encoded as 32-bit displacement
// relative to zero. Note that this method should be used only when the caller
// knows that the address is an icon value that VM has given and there is no
// GenTree node representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - absolute indirect code address
//
// Returns
// true if absolute indir code addr could be encoded as 32-bit displacement relative to zero.
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsZeroRelOffset(size_t addr)
{
return GenTreeIntConCommon::FitsInI32((ssize_t)addr);
}
// Return true if an absolute indirect code address needs a relocation recorded with VM.
//
// Arguments
// addr - an absolute indirect code address
//
// Returns
// true if indir code addr needs a relocation recorded with VM
//
bool CodeGenInterface::genCodeIndirAddrNeedsReloc(size_t addr)
{
// If generating relocatable ngen code, then all code addr should go through relocation
if (compiler->opts.compReloc)
{
return true;
}
#ifdef TARGET_AMD64
// See if the code indir addr can be encoded as 32-bit displacement relative to zero.
// We don't need a relocation in that case.
if (genCodeIndirAddrCanBeEncodedAsZeroRelOffset(addr))
{
return false;
}
// Else we need a relocation.
return true;
#else // TARGET_X86
// On x86 there is no need to record or ask for relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif // TARGET_X86
}
// Return true if a direct code address needs to be marked as relocatable.
//
// Arguments
// addr - absolute direct code address
//
// Returns
// true if direct code addr needs a relocation recorded with VM
//
bool CodeGenInterface::genCodeAddrNeedsReloc(size_t addr)
{
// If generating relocatable ngen code, then all code addr should go through relocation
if (compiler->opts.compReloc)
{
return true;
}
#ifdef TARGET_AMD64
// By default all direct code addresses go through relocation so that VM will setup
// a jump stub if addr cannot be encoded as pc-relative offset.
return true;
#else // TARGET_X86
// On x86 there is no need for recording relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif // TARGET_X86
}
#endif // TARGET_XARCH
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Amd64/x86 Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#pragma warning(disable : 4310) // cast truncates constant value - happens for (int8_t)0xb1
#endif
#ifdef TARGET_XARCH
#include "emit.h"
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
#include "patchpointinfo.h"
//---------------------------------------------------------------------
// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog.
//
// Arguments:
// initReg - register to use as a scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
{
return;
}
if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
{
// Security cookie is on original frame and was initialized there.
return;
}
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
#ifdef TARGET_AMD64
if ((size_t)(int)compiler->gsGlobalSecurityCookieVal != compiler->gsGlobalSecurityCookieVal)
{
// initReg = #GlobalSecurityCookieVal64; [frame.GSSecurityCookie] = initReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
*pInitRegZeroed = false;
}
else
#endif
{
// mov dword ptr [frame.GSSecurityCookie], #GlobalSecurityCookieVal
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
else
{
// Always use EAX on x86 and x64
// On x64, if we're not moving into RAX, and the address isn't RIP relative, we can't encode it.
// mov eax, dword ptr [compiler->gsGlobalSecurityCookieAddr]
// mov dword ptr [frame.GSSecurityCookie], eax
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_EAX, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
regSet.verifyRegUsed(REG_EAX);
GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, REG_EAX, compiler->lvaGSSecurityCookie, 0);
if (initReg == REG_EAX)
{
*pInitRegZeroed = false;
}
}
}
/*****************************************************************************
*
* Generate code to check that the GS cookie wasn't thrashed by a buffer
* overrun. If pushReg is true, preserve all registers around code sequence.
* Otherwise ECX could be modified.
*
* Implementation Note: pushReg = true, in case of tail calls.
*/
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that EAX is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by EAX.
//
// For Amd64 System V, a two-register-returned struct could be returned in RAX and RDX
// In such case make sure that the correct GC-ness of RDX is reported as well, so
// a GC object pointed by RDX will not be collected.
if (!pushReg)
{
// Handle multi-reg return type values
if (compiler->compMethodReturnsMultiRegRetType())
{
ReturnTypeDesc retTypeDesc;
if (varTypeIsLong(compiler->info.compRetNativeType))
{
retTypeDesc.InitializeLongReturnType();
}
else // we must have a struct return type
{
retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass,
compiler->info.compCallConv);
}
const unsigned regCount = retTypeDesc.GetReturnRegCount();
// Only x86 and x64 Unix ABI allows multi-reg return and
// number of result regs should be equal to MAX_RET_REG_COUNT.
assert(regCount == MAX_RET_REG_COUNT);
for (unsigned i = 0; i < regCount; ++i)
{
gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
}
}
else if (compiler->compMethodReturnsRetBufAddr())
{
// This is for returning in an implicit RetBuf.
// If the address of the buffer is returned in REG_INTRET, mark the content of INTRET as ByRef.
// In case the return is in an implicit RetBuf, the native return type should be a struct
assert(varTypeIsStruct(compiler->info.compRetNativeType));
gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
}
// ... all other cases.
else
{
#ifdef TARGET_AMD64
// For x64, structs that are not returned in registers are always
// returned in implicit RetBuf. If we reached here, we should not have
// a RetBuf and the return type should not be a struct.
assert(compiler->info.compRetBuffArg == BAD_VAR_NUM);
assert(!varTypeIsStruct(compiler->info.compRetNativeType));
#endif // TARGET_AMD64
// For x86 Windows we can't make such assertions since we generate code for returning of
// the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise
// compRetNativeType could be TYP_STRUCT.
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
}
}
regNumber regGSCheck;
regMaskTP regMaskGSCheck = RBM_NONE;
if (!pushReg)
{
// Non-tail call: we can use any callee trash register that is not
// a return register or contain 'this' pointer (keep alive this), since
// we are generating GS cookie check after a GT_RETURN block.
// Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
// as return register for two-register-returned structs.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaGetDesc(compiler->info.compThisArg)->lvIsInReg() &&
(compiler->lvaGetDesc(compiler->info.compThisArg)->GetRegNum() == REG_ARG_0))
{
regGSCheck = REG_ARG_1;
}
else
{
regGSCheck = REG_ARG_0;
}
}
else
{
#ifdef TARGET_X86
// It doesn't matter which register we pick, since we're going to save and restore it
// around the check.
// TODO-CQ: Can we optimize the choice of register to avoid doing the push/pop sometimes?
regGSCheck = REG_EAX;
regMaskGSCheck = RBM_EAX;
#else // !TARGET_X86
// Jmp calls: specify method handle using which JIT queries VM for its entry point
// address and hence it can neither be a VSD call nor PInvoke calli with cookie
// parameter. Therefore, in case of jmp calls it is safe to use R11.
regGSCheck = REG_R11;
#endif // !TARGET_X86
}
regMaskTP byrefPushedRegs = RBM_NONE;
regMaskTP norefPushedRegs = RBM_NONE;
regMaskTP pushedRegs = RBM_NONE;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
#if defined(TARGET_AMD64)
// If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'.
// Otherwise, load the value into a reg and use 'cmp mem64, reg64'.
if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, regGSCheck, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
else
#endif // defined(TARGET_AMD64)
{
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
else
{
// Ngen case - GS cookie value needs to be accessed through an indirection.
pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_je, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
genPopRegs(pushedRegs, byrefPushedRegs, norefPushedRegs);
}
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
#if defined(FEATURE_EH_FUNCLETS)
// Generate a call to the finally, like this:
// mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
// call finally-funclet
// jmp finally-return // Only for non-retless finally calls
// The jmp can be a NOP if we're going to the next block.
// If we're generating code for the main function (not a funclet), and there is no localloc,
// then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP
// instead of loading the PSPSym in this case, or if PSPSym is not used (CoreRT ABI).
if ((compiler->lvaPSPSym == BAD_VAR_NUM) ||
(!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT)))
{
#ifndef UNIX_X86_ABI
inst_Mov(TYP_I_IMPL, REG_ARG_0, REG_SPBASE, /* canSkip */ false);
#endif // !UNIX_X86_ABI
}
else
{
GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
}
GetEmitter()->emitIns_J(INS_call, block->bbJumpDest);
if (block->bbFlags & BBF_RETLESS_CALL)
{
// We have a retless call, and the last instruction generated was a call.
// If the next block is in a different EH region (or is the end of the code
// block), then we need to generate a breakpoint here (since it will never
// get executed) to get proper unwind behavior.
if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
instGen(INS_BREAKPOINT); // This should never get executed
}
}
else
{
// TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other
// architectures?
#ifndef JIT32_GCENCODER
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
GetEmitter()->emitDisableGC();
#endif // JIT32_GCENCODER
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
{
// Fall-through.
// TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
// to the next instruction? This would depend on stack walking from within the finally
// handler working without this instruction being in this special EH region.
instGen(INS_nop);
}
else
{
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
#ifndef JIT32_GCENCODER
GetEmitter()->emitEnableGC();
#endif // JIT32_GCENCODER
}
#else // !FEATURE_EH_FUNCLETS
// If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
// corresponding to the finally's nesting level. When invoked in response to an exception, the
// EE does this.
//
// We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
//
// We will emit :
// mov [ebp - (n + 1)], 0
// mov [ebp - n ], 0xFC
// push &step
// jmp finallyBlock
// ...
// step:
// mov [ebp - n ], 0
// jmp leaveTarget
// ...
// leaveTarget:
noway_assert(isFramePointerUsed());
// Get the nesting level which contains the finally
unsigned finallyNesting = 0;
compiler->fgGetNestingLevel(block, &finallyNesting);
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
unsigned curNestingSlotOffs;
curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
// Zero out the slot for the next nesting level
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar,
curNestingSlotOffs - TARGET_POINTER_SIZE, 0);
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, LCL_FINALLY_MARK);
// Now push the address where the finally funclet should return to directly.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
GetEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
}
else
{
// EE expects a DWORD, so we provide 0
inst_IV(INS_push_hide, 0);
}
// Jump to the finally BB
inst_JMP(EJ_jmp, block->bbJumpDest);
#endif // !FEATURE_EH_FUNCLETS
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
// block is RETLESS.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
block = block->bbNext;
}
return block;
}
#if defined(FEATURE_EH_FUNCLETS)
void CodeGen::genEHCatchRet(BasicBlock* block)
{
// Set RAX to the address the VM should return to after the catch.
// Generate a RIP-relative
// lea reg, [rip + disp32] ; the RIP is implicit
// which will be position-independent.
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
}
#else // !FEATURE_EH_FUNCLETS
void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block)
{
// The last statement of the block must be a GT_RETFILT, which has already been generated.
assert(block->lastNode() != nullptr);
assert(block->lastNode()->OperGet() == GT_RETFILT);
if (block->bbJumpKind == BBJ_EHFINALLYRET)
{
assert(block->lastNode()->AsOp()->gtOp1 == nullptr); // op1 == nullptr means endfinally
// Return using a pop-jmp sequence. As the "try" block calls
// the finally with a jmp, this leaves the x86 call-ret stack
// balanced in the normal flow of path.
noway_assert(isFramePointerRequired());
inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL);
inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL);
}
else
{
assert(block->bbJumpKind == BBJ_EHFILTERRET);
// The return value has already been computed.
instGen_Return(0);
}
}
#endif // !FEATURE_EH_FUNCLETS
// Move an immediate value into an integer register
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags))
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
emitAttr origAttr = size;
if (!compiler->opts.compReloc)
{
// Strip any reloc flags from size if we aren't doing relocs
size = EA_REMOVE_FLG(size, EA_CNS_RELOC_FLG | EA_DSP_RELOC_FLG);
}
if ((imm == 0) && !EA_IS_RELOC(size))
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
// Only use lea if the original was relocatable. Otherwise we can get spurious
// instruction selection due to different memory placement at runtime.
if (EA_IS_RELOC(origAttr) && genDataIndirAddrCanBeEncodedAsPCRelOffset(imm))
{
// We will use lea so displacement and not immediate will be relocatable
size = EA_SET_FLG(EA_REMOVE_FLG(size, EA_CNS_RELOC_FLG), EA_DSP_RELOC_FLG);
GetEmitter()->emitIns_R_AI(INS_lea, size, reg, imm);
}
else
{
GetEmitter()->emitIns_R_I(INS_mov, size, reg, imm DEBUGARG(gtFlags));
}
}
regSet.verifyRegUsed(reg);
}
/***********************************************************************************
*
* Generate code to set a register 'targetReg' of type 'targetType' to the constant
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree)
{
switch (tree->gtOper)
{
case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
// Currently this cannot be done for all handles due to
// https://github.com/dotnet/runtime/issues/60712. However, it is
// also unclear whether we unconditionally want to use rip-relative
// lea instructions when not necessary. While a mov is larger, on
// many Intel CPUs rip-relative lea instructions have higher
// latency.
if (con->ImmedValNeedsReloc(compiler))
{
attr = EA_SET_FLG(attr, EA_CNS_RELOC_FLG);
}
if (targetType == TYP_BYREF)
{
attr = EA_SET_FLG(attr, EA_BYREF_FLG);
}
instGen_Set_Reg_To_Imm(attr, targetReg, cnsVal, INS_FLAGS_DONT_CARE DEBUGARG(0) DEBUGARG(tree->gtFlags));
regSet.verifyRegUsed(targetReg);
}
break;
case GT_CNS_DBL:
{
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(targetType);
double constValue = tree->AsDblCon()->gtDconVal;
// Make sure we use "xorps reg, reg" only for +ve zero constant (0.0) and not for -ve zero (-0.0)
if (*(__int64*)&constValue == 0)
{
// A faster/smaller way to generate 0
emit->emitIns_R_R(INS_xorps, size, targetReg, targetReg);
}
else
{
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(constValue, size);
emit->emitIns_R_C(ins_Load(targetType), size, targetReg, hnd, 0);
}
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForNegNot(GenTree* tree)
{
assert(tree->OperIs(GT_NEG, GT_NOT));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
if (varTypeIsFloating(targetType))
{
assert(tree->gtOper == GT_NEG);
genSSE2BitwiseOp(tree);
}
else
{
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
inst_RV(ins, targetReg, targetType);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForBswap: Produce code for a GT_BSWAP / GT_BSWAP16 node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForBswap(GenTree* tree)
{
// TODO: If we're swapping immediately after a read from memory or immediately before
// a write to memory, use the MOVBE instruction instead of the BSWAP instruction if
// the platform supports it.
assert(tree->OperIs(GT_BSWAP, GT_BSWAP16));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
if (tree->OperIs(GT_BSWAP))
{
// 32-bit and 64-bit byte swaps use "bswap reg"
inst_RV(INS_bswap, targetReg, targetType);
}
else
{
// 16-bit byte swaps use "ror reg.16, 8"
inst_RV_IV(INS_ror_N, targetReg, 8 /* val */, emitAttr::EA_2BYTE);
}
genProduceReg(tree);
}
// Produce code for a GT_INC_SATURATE node.
void CodeGen::genCodeForIncSaturate(GenTree* tree)
{
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
inst_Mov(targetType, targetReg, operandReg, /* canSkip */ true);
inst_RV_IV(INS_add, targetReg, 1, emitActualTypeSize(targetType));
inst_RV_IV(INS_sbb, targetReg, 0, emitActualTypeSize(targetType));
genProduceReg(tree);
}
// Generate code to get the high N bits of a N*N=2N bit multiplication result
void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
assert(!treeNode->gtOverflowEx());
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(treeNode);
GenTree* op1 = treeNode->AsOp()->gtOp1;
GenTree* op2 = treeNode->AsOp()->gtOp2;
// to get the high bits of the multiply, we are constrained to using the
// 1-op form: RDX:RAX = RAX * rm
// The 3-op form (Rx=Ry*Rz) does not support it.
genConsumeOperands(treeNode->AsOp());
GenTree* regOp = op1;
GenTree* rmOp = op2;
// Set rmOp to the memory operand (if any)
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == REG_RAX)))
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, REG_RAX, regOp->GetRegNum(), /* canSkip */ true);
instruction ins;
if ((treeNode->gtFlags & GTF_UNSIGNED) == 0)
{
ins = INS_imulEAX;
}
else
{
ins = INS_mulEAX;
}
emit->emitInsBinary(ins, size, treeNode, rmOp);
// Move the result to the desired register, if necessary
if (treeNode->OperGet() == GT_MULHI)
{
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
genProduceReg(treeNode);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genCodeForLongUMod: Generate code for a tree of the form
// `(umod (gt_long x y) (const int))`
//
// Arguments:
// node - the node for which to generate code
//
void CodeGen::genCodeForLongUMod(GenTreeOp* node)
{
assert(node != nullptr);
assert(node->OperGet() == GT_UMOD);
assert(node->TypeGet() == TYP_INT);
GenTreeOp* const dividend = node->gtOp1->AsOp();
assert(dividend->OperGet() == GT_LONG);
assert(varTypeIsLong(dividend));
genConsumeOperands(node);
GenTree* const dividendLo = dividend->gtOp1;
GenTree* const dividendHi = dividend->gtOp2;
assert(dividendLo->isUsedFromReg());
assert(dividendHi->isUsedFromReg());
GenTree* const divisor = node->gtOp2;
assert(divisor->gtSkipReloadOrCopy()->OperGet() == GT_CNS_INT);
assert(divisor->gtSkipReloadOrCopy()->isUsedFromReg());
assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal >= 2);
assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal <= 0x3fffffff);
// dividendLo must be in RAX; dividendHi must be in RDX
genCopyRegIfNeeded(dividendLo, REG_EAX);
genCopyRegIfNeeded(dividendHi, REG_EDX);
// At this point, EAX:EDX contains the 64bit dividend and op2->GetRegNum()
// contains the 32bit divisor. We want to generate the following code:
//
// cmp edx, divisor->GetRegNum()
// jb noOverflow
//
// mov temp, eax
// mov eax, edx
// xor edx, edx
// div divisor->GetRegNum()
// mov eax, temp
//
// noOverflow:
// div divisor->GetRegNum()
//
// This works because (a * 2^32 + b) % c = ((a % c) * 2^32 + b) % c.
BasicBlock* const noOverflow = genCreateTempLabel();
// cmp edx, divisor->GetRegNum()
// jb noOverflow
inst_RV_RV(INS_cmp, REG_EDX, divisor->GetRegNum());
inst_JMP(EJ_jb, noOverflow);
// mov temp, eax
// mov eax, edx
// xor edx, edx
// div divisor->GetRegNum()
// mov eax, temp
const regNumber tempReg = node->GetSingleTempReg();
inst_Mov(TYP_INT, tempReg, REG_EAX, /* canSkip */ false);
inst_Mov(TYP_INT, REG_EAX, REG_EDX, /* canSkip */ false);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
inst_RV(INS_div, divisor->GetRegNum(), TYP_INT);
inst_Mov(TYP_INT, REG_EAX, tempReg, /* canSkip */ false);
// noOverflow:
// div divisor->GetRegNum()
genDefineTempLabel(noOverflow);
inst_RV(INS_div, divisor->GetRegNum(), TYP_INT);
const regNumber targetReg = node->GetRegNum();
inst_Mov(TYP_INT, targetReg, REG_RDX, /* canSkip */ true);
genProduceReg(node);
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// genCodeForDivMod: Generate code for a DIV or MOD operation.
//
// Arguments:
// treeNode - the node to generate the code for
//
void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
{
assert(treeNode->OperIs(GT_DIV, GT_UDIV, GT_MOD, GT_UMOD));
GenTree* dividend = treeNode->gtOp1;
#ifdef TARGET_X86
if (varTypeIsLong(dividend->TypeGet()))
{
genCodeForLongUMod(treeNode);
return;
}
#endif // TARGET_X86
GenTree* divisor = treeNode->gtOp2;
genTreeOps oper = treeNode->OperGet();
emitAttr size = emitTypeSize(treeNode);
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
// Node's type must be int/native int, small integer types are not
// supported and floating point types are handled by genCodeForBinary.
assert(varTypeIsIntOrI(targetType));
// dividend is in a register.
assert(dividend->isUsedFromReg());
genConsumeOperands(treeNode->AsOp());
// dividend must be in RAX
genCopyRegIfNeeded(dividend, REG_RAX);
// zero or sign extend rax to rdx
if (oper == GT_UMOD || oper == GT_UDIV ||
(dividend->IsIntegralConst() && (dividend->AsIntConCommon()->IconValue() > 0)))
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
}
else
{
emit->emitIns(INS_cdq, size);
// the cdq instruction writes RDX, So clear the gcInfo for RDX
gcInfo.gcMarkRegSetNpt(RBM_RDX);
}
// Perform the 'targetType' (64-bit or 32-bit) divide instruction
instruction ins;
if (oper == GT_UMOD || oper == GT_UDIV)
{
ins = INS_div;
}
else
{
ins = INS_idiv;
}
emit->emitInsBinary(ins, size, treeNode, divisor);
// DIV/IDIV instructions always store the quotient in RAX and the remainder in RDX.
// Move the result to the desired register, if necessary
if (oper == GT_DIV || oper == GT_UDIV)
{
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
}
else
{
assert((oper == GT_MOD) || (oper == GT_UMOD));
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForBinary: Generate code for many binary arithmetic operators
//
// Arguments:
// treeNode - The binary operation for which we are generating code.
//
// Return Value:
// None.
//
// Notes:
// Integer MUL and DIV variants have special constraints on x64 so are not handled here.
// See the assert below for the operators that are handled.
void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
#ifdef DEBUG
bool isValidOper = treeNode->OperIs(GT_ADD, GT_SUB);
if (varTypeIsFloating(treeNode->TypeGet()))
{
isValidOper |= treeNode->OperIs(GT_MUL, GT_DIV);
}
else
{
isValidOper |= treeNode->OperIs(GT_AND, GT_OR, GT_XOR);
#ifndef TARGET_64BIT
isValidOper |= treeNode->OperIs(GT_ADD_LO, GT_ADD_HI, GT_SUB_LO, GT_SUB_HI);
#endif
}
assert(isValidOper);
#endif
genConsumeOperands(treeNode);
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
// Commutative operations can mark op1 as contained or reg-optional to generate "op reg, memop/immed"
if (!op1->isUsedFromReg())
{
assert(treeNode->OperIsCommutative());
assert(op1->isMemoryOp() || op1->IsLocal() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() ||
op1->IsRegOptional());
op1 = treeNode->gtGetOp2();
op2 = treeNode->gtGetOp1();
}
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
noway_assert(targetReg != REG_NA);
regNumber op1reg = op1->isUsedFromReg() ? op1->GetRegNum() : REG_NA;
regNumber op2reg = op2->isUsedFromReg() ? op2->GetRegNum() : REG_NA;
if (varTypeIsFloating(treeNode->TypeGet()))
{
// floating-point addition, subtraction, multiplication, and division
// all have RMW semantics if VEX support is not available
bool isRMW = !compiler->canUseVexEncoding();
inst_RV_RV_TT(ins, emitTypeSize(treeNode), targetReg, op1reg, op2, isRMW);
genProduceReg(treeNode);
return;
}
GenTree* dst;
GenTree* src;
// This is the case of reg1 = reg1 op reg2
// We're ready to emit the instruction without any moves
if (op1reg == targetReg)
{
dst = op1;
src = op2;
}
// We have reg1 = reg2 op reg1
// In order for this operation to be correct
// we need that op is a commutative operation so
// we can convert it into reg1 = reg1 op reg2 and emit
// the same code as above
else if (op2reg == targetReg)
{
noway_assert(GenTree::OperIsCommutative(oper));
dst = op2;
src = op1;
}
// now we know there are 3 different operands so attempt to use LEA
else if (oper == GT_ADD && !varTypeIsFloating(treeNode) && !treeNode->gtOverflowEx() // LEA does not set flags
&& (op2->isContainedIntOrIImmed() || op2->isUsedFromReg()) && !treeNode->gtSetFlags())
{
if (op2->isContainedIntOrIImmed())
{
emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
(int)op2->AsIntConCommon()->IconValue());
}
else
{
assert(op2reg != REG_NA);
emit->emitIns_R_ARX(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, op2reg, 1, 0);
}
genProduceReg(treeNode);
return;
}
// dest, op1 and op2 registers are different:
// reg3 = reg1 op reg2
// We can implement this by issuing a mov:
// reg3 = reg1
// reg3 = reg3 op reg2
else
{
var_types op1Type = op1->TypeGet();
inst_Mov(op1Type, targetReg, op1reg, /* canSkip */ false);
regSet.verifyRegUsed(targetReg);
gcInfo.gcMarkRegPtrVal(targetReg, op1Type);
dst = treeNode;
src = op2;
}
// try to use an inc or dec
if (oper == GT_ADD && !varTypeIsFloating(treeNode) && src->isContainedIntOrIImmed() && !treeNode->gtOverflowEx())
{
if (src->IsIntegralConst(1))
{
emit->emitIns_R(INS_inc, emitTypeSize(treeNode), targetReg);
genProduceReg(treeNode);
return;
}
else if (src->IsIntegralConst(-1))
{
emit->emitIns_R(INS_dec, emitTypeSize(treeNode), targetReg);
genProduceReg(treeNode);
return;
}
}
regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
noway_assert(r == targetReg);
if (treeNode->gtOverflowEx())
{
#if !defined(TARGET_64BIT)
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI);
#else
assert(oper == GT_ADD || oper == GT_SUB);
#endif
genCheckOverflow(treeNode);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForMul: Generate code for a MUL operation.
//
// Arguments:
// treeNode - the node to generate the code for
//
void CodeGen::genCodeForMul(GenTreeOp* treeNode)
{
assert(treeNode->OperIs(GT_MUL));
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
// Node's type must be int or long (only on x64), small integer types are not
// supported and floating point types are handled by genCodeForBinary.
assert(varTypeIsIntOrI(targetType));
instruction ins;
emitAttr size = emitTypeSize(treeNode);
bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
bool requiresOverflowCheck = treeNode->gtOverflowEx();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
// there are 3 forms of x64 multiply:
// 1-op form with 128 result: RDX:RAX = RAX * rm
// 2-op form: reg *= rm
// 3-op form: reg = rm * imm
genConsumeOperands(treeNode);
// This matches the 'mul' lowering in Lowering::SetMulOpCounts()
//
// immOp :: Only one operand can be an immediate
// rmOp :: Only one operand can be a memory op.
// regOp :: A register op (especially the operand that matches 'targetReg')
// (can be nullptr when we have both a memory op and an immediate op)
GenTree* immOp = nullptr;
GenTree* rmOp = op1;
GenTree* regOp;
if (op2->isContainedIntOrIImmed())
{
immOp = op2;
}
else if (op1->isContainedIntOrIImmed())
{
immOp = op1;
rmOp = op2;
}
if (immOp != nullptr)
{
// CQ: When possible use LEA for mul by imm 3, 5 or 9
ssize_t imm = immOp->AsIntConCommon()->IconValue();
if (!requiresOverflowCheck && rmOp->isUsedFromReg() && ((imm == 3) || (imm == 5) || (imm == 9)))
{
// We will use the LEA instruction to perform this multiply
// Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
unsigned int scale = (unsigned int)(imm - 1);
GetEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->GetRegNum(), rmOp->GetRegNum(), scale, 0);
}
else if (!requiresOverflowCheck && rmOp->isUsedFromReg() && (imm == genFindLowestBit(imm)) && (imm != 0))
{
// Use shift for constant multiply when legal
uint64_t zextImm = static_cast<uint64_t>(static_cast<size_t>(imm));
unsigned int shiftAmount = genLog2(zextImm);
// Copy reg src to dest register
inst_Mov(targetType, targetReg, rmOp->GetRegNum(), /* canSkip */ true);
inst_RV_SH(INS_shl, size, targetReg, shiftAmount);
}
else
{
// use the 3-op form with immediate
ins = GetEmitter()->inst3opImulForReg(targetReg);
emit->emitInsBinary(ins, size, rmOp, immOp);
}
}
else // we have no contained immediate operand
{
regOp = op1;
rmOp = op2;
regNumber mulTargetReg = targetReg;
if (isUnsignedMultiply && requiresOverflowCheck)
{
ins = INS_mulEAX;
mulTargetReg = REG_RAX;
}
else
{
ins = INS_imul;
}
// Set rmOp to the memory operand (if any)
// or set regOp to the op2 when it has the matching target register for our multiply op
//
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == mulTargetReg)))
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, mulTargetReg, regOp->GetRegNum(), /* canSkip */ true);
emit->emitInsBinary(ins, size, treeNode, rmOp);
// Move the result to the desired register, if necessary
if (ins == INS_mulEAX)
{
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
}
}
if (requiresOverflowCheck)
{
// Overflow checking is only used for non-floating point types
noway_assert(!varTypeIsFloating(treeNode));
genCheckOverflow(treeNode);
}
genProduceReg(treeNode);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDSplitReturn: Generates code for returning a fixed-size SIMD type that lives
// in a single register, but is returned in multiple registers.
//
// Arguments:
// src - The source of the return
// retTypeDesc - The return type descriptor.
//
void CodeGen::genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc)
{
assert(varTypeIsSIMD(src));
assert(src->isUsedFromReg());
// This is a case of operand is in a single reg and needs to be
// returned in multiple ABI return registers.
regNumber opReg = src->GetRegNum();
regNumber reg0 = retTypeDesc->GetABIReturnReg(0);
regNumber reg1 = retTypeDesc->GetABIReturnReg(1);
assert((reg0 != REG_NA) && (reg1 != REG_NA) && (opReg != REG_NA));
const bool srcIsFloatReg = genIsValidFloatReg(opReg);
const bool dstIsFloatReg = genIsValidFloatReg(reg0);
assert(srcIsFloatReg);
#ifdef TARGET_AMD64
assert(src->TypeIs(TYP_SIMD16));
assert(srcIsFloatReg == dstIsFloatReg);
if (opReg != reg0 && opReg != reg1)
{
// Operand reg is different from return regs.
// Copy opReg to reg0 and let it to be handled by one of the
// two cases below.
inst_Mov(TYP_SIMD16, reg0, opReg, /* canSkip */ false);
opReg = reg0;
}
if (opReg == reg0)
{
assert(opReg != reg1);
// reg1 = opReg.
inst_Mov(TYP_SIMD16, reg1, opReg, /* canSkip */ false);
}
else
{
assert(opReg == reg1);
// reg0 = opReg.
inst_Mov(TYP_SIMD16, reg0, opReg, /* canSkip */ false);
}
// reg0 - already has required 8-byte in bit position [63:0].
// swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, reg1, reg1, 0x01);
#else // TARGET_X86
assert(src->TypeIs(TYP_SIMD8));
assert(srcIsFloatReg != dstIsFloatReg);
assert((reg0 == REG_EAX) && (reg1 == REG_EDX));
// reg0 = opReg[31:0]
inst_Mov(TYP_INT, reg0, opReg, /* canSkip */ false);
// reg1 = opRef[61:32]
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
inst_RV_TT_IV(INS_pextrd, EA_4BYTE, reg1, src, 1);
}
else
{
int8_t shuffleMask = 1; // we only need [61:32]->[31:0], the rest is not read.
inst_RV_TT_IV(INS_pshufd, EA_8BYTE, opReg, src, shuffleMask);
inst_Mov(TYP_INT, reg1, opReg, /* canSkip */ false);
}
#endif // TARGET_X86
}
#endif // FEATURE_SIMD
#if defined(TARGET_X86)
//------------------------------------------------------------------------
// genFloatReturn: Generates code for float return statement for x86.
//
// Note: treeNode's and op1's registers are already consumed.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node with float type.
//
// Return Value:
// None
//
void CodeGen::genFloatReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
assert(varTypeIsFloating(treeNode));
GenTree* op1 = treeNode->gtGetOp1();
// Spill the return value register from an XMM register to the stack, then load it on the x87 stack.
// If it already has a home location, use that. Otherwise, we need a temp.
if (genIsRegCandidateLocal(op1) && compiler->lvaGetDesc(op1->AsLclVarCommon())->lvOnFrame)
{
if (compiler->lvaGetDesc(op1->AsLclVarCommon())->GetRegNum() != REG_STK)
{
op1->gtFlags |= GTF_SPILL;
inst_TT_RV(ins_Store(op1->gtType, compiler->isSIMDTypeLocalAligned(op1->AsLclVarCommon()->GetLclNum())),
emitTypeSize(op1->TypeGet()), op1, op1->GetRegNum());
}
// Now, load it to the fp stack.
GetEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->GetLclNum(), 0);
}
else
{
// Spill the value, which should be in a register, then load it to the fp stack.
// TODO-X86-CQ: Deal with things that are already in memory (don't call genConsumeReg yet).
op1->gtFlags |= GTF_SPILL;
regSet.rsSpillTree(op1->GetRegNum(), op1);
op1->gtFlags |= GTF_SPILLED;
op1->gtFlags &= ~GTF_SPILL;
TempDsc* t = regSet.rsUnspillInPlace(op1, op1->GetRegNum());
inst_FS_ST(INS_fld, emitActualTypeSize(op1->gtType), t, 0);
op1->gtFlags &= ~GTF_SPILLED;
regSet.tmpRlsTemp(t);
}
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE/GT_CMP node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
assert(tree->OperIs(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE, GT_CMP));
// TODO-XArch-CQ: Check if we can use the currently set flags.
// TODO-XArch-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
GenTree* op1 = tree->gtOp1;
var_types op1Type = op1->TypeGet();
if (varTypeIsFloating(op1Type))
{
genCompareFloat(tree);
}
else
{
genCompareInt(tree);
}
}
//------------------------------------------------------------------------
// genCodeForBT: Generates code for a GT_BT node.
//
// Arguments:
// tree - The node.
//
void CodeGen::genCodeForBT(GenTreeOp* bt)
{
assert(bt->OperIs(GT_BT));
GenTree* op1 = bt->gtGetOp1();
GenTree* op2 = bt->gtGetOp2();
var_types type = genActualType(op1->TypeGet());
assert(op1->isUsedFromReg() && op2->isUsedFromReg());
assert((genTypeSize(type) >= genTypeSize(TYP_INT)) && (genTypeSize(type) <= genTypeSize(TYP_I_IMPL)));
genConsumeOperands(bt);
// Note that the emitter doesn't fully support INS_bt, it only supports the reg,reg
// form and encodes the registers in reverse order. To get the correct order we need
// to reverse the operands when calling emitIns_R_R.
GetEmitter()->emitIns_R_R(INS_bt, emitTypeSize(type), op2->GetRegNum(), op1->GetRegNum());
}
// clang-format off
const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32]
{
{ }, // NONE
{ }, // 1
{ EJ_jl }, // SLT
{ EJ_jle }, // SLE
{ EJ_jge }, // SGE
{ EJ_jg }, // SGT
{ EJ_js }, // S
{ EJ_jns }, // NS
{ EJ_je }, // EQ
{ EJ_jne }, // NE
{ EJ_jb }, // ULT
{ EJ_jbe }, // ULE
{ EJ_jae }, // UGE
{ EJ_ja }, // UGT
{ EJ_jb }, // C
{ EJ_jae }, // NC
// Floating point compare instructions (UCOMISS, UCOMISD etc.) set the condition flags as follows:
// ZF PF CF Meaning
// ---------------------
// 1 1 1 Unordered
// 0 0 0 Greater
// 0 0 1 Less Than
// 1 0 0 Equal
//
// Since ZF and CF are also set when the result is unordered, in some cases we first need to check
// PF before checking ZF/CF. In general, ordered conditions will result in a jump only if PF is not
// set and unordered conditions will result in a jump only if PF is set.
{ EJ_jnp, GT_AND, EJ_je }, // FEQ
{ EJ_jne }, // FNE
{ EJ_jnp, GT_AND, EJ_jb }, // FLT
{ EJ_jnp, GT_AND, EJ_jbe }, // FLE
{ EJ_jae }, // FGE
{ EJ_ja }, // FGT
{ EJ_jo }, // O
{ EJ_jno }, // NO
{ EJ_je }, // FEQU
{ EJ_jp, GT_OR, EJ_jne }, // FNEU
{ EJ_jb }, // FLTU
{ EJ_jbe }, // FLEU
{ EJ_jp, GT_OR, EJ_jae }, // FGEU
{ EJ_jp, GT_OR, EJ_ja }, // FGTU
{ EJ_jp }, // P
{ EJ_jnp }, // NP
};
// clang-format on
//------------------------------------------------------------------------
// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition.
//
// Arguments:
// condition - The condition
// type - The type of the value to be produced
// dstReg - The destination register to be set to 1 or 0
//
void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg)
{
assert(varTypeIsIntegral(type));
assert(genIsValidIntReg(dstReg) && isByteReg(dstReg));
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
inst_SET(desc.jumpKind1, dstReg);
if (desc.oper != GT_NONE)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_SET(desc.jumpKind2, dstReg);
genDefineTempLabel(labelNext);
}
if (!varTypeIsByte(type))
{
GetEmitter()->emitIns_Mov(INS_movzx, EA_1BYTE, dstReg, dstReg, /* canSkip */ false);
}
}
//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
// tree - the GT_RETURNTRAP node
//
void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
{
assert(tree->OperGet() == GT_RETURNTRAP);
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
cns.SetContained();
GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
BasicBlock* skipLabel = genCreateTempLabel();
inst_JMP(EJ_je, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
regNumber tmpReg = tree->GetSingleTempReg(RBM_ALLINT);
assert(genIsValidIntReg(tmpReg));
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN, tmpReg);
genDefineTempLabel(skipLabel);
}
/*****************************************************************************
*
* Generate code for a single node in the tree.
* Preconditions: All operands have been evaluated
*
*/
void CodeGen::genCodeForTreeNode(GenTree* treeNode)
{
regNumber targetReg;
#if !defined(TARGET_64BIT)
if (treeNode->TypeGet() == TYP_LONG)
{
// All long enregistered nodes will have been decomposed into their
// constituent lo and hi nodes.
targetReg = REG_NA;
}
else
#endif // !defined(TARGET_64BIT)
{
targetReg = treeNode->GetRegNum();
}
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
compiler->gtDispLIRNode(treeNode, "Generating: ");
}
#endif // DEBUG
// Is this a node whose value is already in a register? LSRA denotes this by
// setting the GTF_REUSE_REG_VAL flag.
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
assert((treeNode->OperIsConst()));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
// contained nodes are part of their parents for codegen purposes
// ex : immediates, most LEAs
if (treeNode->isContained())
{
return;
}
switch (treeNode->gtOper)
{
#ifndef JIT32_GCENCODER
case GT_START_NONGC:
GetEmitter()->emitDisableGC();
break;
#endif // !defined(JIT32_GCENCODER)
case GT_START_PREEMPTGC:
// Kill callee saves GC registers, and create a label
// so that information gets propagated to the emitter.
gcInfo.gcMarkRegSetNpt(RBM_INT_CALLEE_SAVED);
genDefineTempLabel(genCreateTempLabel());
break;
case GT_PROF_HOOK:
#ifdef PROFILING_SUPPORTED
// We should be seeing this only if profiler hook is needed
noway_assert(compiler->compIsProfilerHookNeeded());
// Right now this node is used only for tail calls. In future if
// we intend to use it for Enter or Leave hooks, add a data member
// to this node indicating the kind of profiler hook. For example,
// helper number can be used.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
break;
case GT_LCLHEAP:
genLclHeap(treeNode);
break;
case GT_CNS_INT:
#ifdef TARGET_X86
assert(!treeNode->IsIconHandle(GTF_ICON_TLS_HDL));
#endif // TARGET_X86
FALLTHROUGH;
case GT_CNS_DBL:
genSetRegToConst(targetReg, targetType, treeNode);
genProduceReg(treeNode);
break;
case GT_NOT:
case GT_NEG:
genCodeForNegNot(treeNode);
break;
case GT_BSWAP:
case GT_BSWAP16:
genCodeForBswap(treeNode);
break;
case GT_DIV:
if (varTypeIsFloating(treeNode->TypeGet()))
{
genCodeForBinary(treeNode->AsOp());
break;
}
FALLTHROUGH;
case GT_MOD:
case GT_UMOD:
case GT_UDIV:
genCodeForDivMod(treeNode->AsOp());
break;
case GT_OR:
case GT_XOR:
case GT_AND:
assert(varTypeIsIntegralOrI(treeNode));
FALLTHROUGH;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif // !defined(TARGET_64BIT)
case GT_ADD:
case GT_SUB:
genCodeForBinary(treeNode->AsOp());
break;
case GT_MUL:
if (varTypeIsFloating(treeNode->TypeGet()))
{
genCodeForBinary(treeNode->AsOp());
break;
}
genCodeForMul(treeNode->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
genCodeForShift(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LSH_HI:
case GT_RSH_LO:
genCodeForShiftLong(treeNode);
break;
#endif // !defined(TARGET_64BIT)
case GT_CAST:
genCodeForCast(treeNode->AsOp());
break;
case GT_BITCAST:
genCodeForBitCast(treeNode->AsOp());
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
genCodeForLclAddr(treeNode);
break;
case GT_LCL_FLD:
genCodeForLclFld(treeNode->AsLclFld());
break;
case GT_LCL_VAR:
genCodeForLclVar(treeNode->AsLclVar());
break;
case GT_STORE_LCL_FLD:
genCodeForStoreLclFld(treeNode->AsLclFld());
break;
case GT_STORE_LCL_VAR:
genCodeForStoreLclVar(treeNode->AsLclVar());
break;
case GT_RETFILT:
case GT_RETURN:
genReturn(treeNode);
break;
case GT_LEA:
// If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
genLeaInstruction(treeNode->AsAddrMode());
break;
case GT_INDEX_ADDR:
genCodeForIndexAddr(treeNode->AsIndexAddr());
break;
case GT_IND:
genCodeForIndir(treeNode->AsIndir());
break;
case GT_INC_SATURATE:
genCodeForIncSaturate(treeNode);
break;
case GT_MULHI:
#ifdef TARGET_X86
case GT_MUL_LONG:
#endif
genCodeForMulHi(treeNode->AsOp());
break;
case GT_INTRINSIC:
genIntrinsic(treeNode);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
genSIMDIntrinsic(treeNode->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
genHWIntrinsic(treeNode->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_CKFINITE:
genCkfinite(treeNode);
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_CMP:
genCodeForCompare(treeNode->AsOp());
break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;
case GT_JCC:
genCodeForJcc(treeNode->AsCC());
break;
case GT_SETCC:
genCodeForSetcc(treeNode->AsCC());
break;
case GT_BT:
genCodeForBT(treeNode->AsOp());
break;
case GT_RETURNTRAP:
genCodeForReturnTrap(treeNode->AsOp());
break;
case GT_STOREIND:
genCodeForStoreInd(treeNode->AsStoreInd());
break;
case GT_COPY:
// This is handled at the time we call genConsumeReg() on the GT_COPY
break;
case GT_FIELD_LIST:
// Should always be marked contained.
assert(!"LIST, FIELD_LIST nodes should always be marked contained.");
break;
case GT_SWAP:
genCodeForSwap(treeNode->AsOp());
break;
case GT_PUTARG_STK:
genPutArgStk(treeNode->AsPutArgStk());
break;
case GT_PUTARG_REG:
genPutArgReg(treeNode->AsOp());
break;
case GT_CALL:
genCall(treeNode->AsCall());
break;
case GT_JMP:
genJmpMethod(treeNode);
break;
case GT_LOCKADD:
genCodeForLockAdd(treeNode->AsOp());
break;
case GT_XCHG:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;
case GT_XORR:
case GT_XAND:
NYI("Interlocked.Or and Interlocked.And aren't implemented for x86 yet.");
break;
case GT_MEMORYBARRIER:
{
CodeGen::BarrierKind barrierKind =
treeNode->gtFlags & GTF_MEMORYBARRIER_LOAD ? BARRIER_LOAD_ONLY : BARRIER_FULL;
instGen_MemoryBarrier(barrierKind);
break;
}
case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
// into the register specified in this node.
break;
case GT_NOP:
break;
case GT_KEEPALIVE:
genConsumeRegs(treeNode->AsOp()->gtOp1);
break;
case GT_NO_OP:
GetEmitter()->emitIns_Nop(1);
break;
case GT_BOUNDS_CHECK:
genRangeCheck(treeNode);
break;
case GT_PHYSREG:
genCodeForPhysReg(treeNode->AsPhysReg());
break;
case GT_NULLCHECK:
genCodeForNullCheck(treeNode->AsIndir());
break;
case GT_CATCH_ARG:
noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
/* Catch arguments get passed in a register. genCodeForBBlist()
would have marked it as holding a GC object, but not used. */
noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
genConsumeReg(treeNode);
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
// Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
// mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
size_t finallyNesting;
finallyNesting = treeNode->AsVal()->gtVal1;
noway_assert(treeNode->AsVal()->gtVal1 < compiler->compHndBBtabCount);
noway_assert(finallyNesting < compiler->compHndBBtabCount);
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
TARGET_POINTER_SIZE); // below doesn't underflow.
filterEndOffsetSlotOffs =
(unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
size_t curNestingSlotOffs;
curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, (unsigned)curNestingSlotOffs,
0);
break;
#endif // !FEATURE_EH_FUNCLETS
case GT_PINVOKE_PROLOG:
noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
#ifdef PSEUDORANDOM_NOP_INSERTION
// the runtime side requires the codegen here to be consistent
emit->emitDisableRandomNops();
#endif // PSEUDORANDOM_NOP_INSERTION
break;
case GT_LABEL:
genPendingCallLabel = genCreateTempLabel();
emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->GetRegNum());
break;
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
case GT_JMPTABLE:
genJumpTable(treeNode);
break;
case GT_SWITCH_TABLE:
genTableBasedSwitch(treeNode);
break;
case GT_ARR_INDEX:
genCodeForArrIndex(treeNode->AsArrIndex());
break;
case GT_ARR_OFFSET:
genCodeForArrOffset(treeNode->AsArrOffs());
break;
case GT_CLS_VAR_ADDR:
emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0);
genProduceReg(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LONG:
assert(treeNode->isUsedFromReg());
genConsumeRegs(treeNode);
break;
#endif
case GT_IL_OFFSET:
// Do nothing; these nodes are simply markers for debug info.
break;
default:
{
#ifdef DEBUG
char message[256];
_snprintf_s(message, ArrLen(message), _TRUNCATE, "NYI: Unimplemented node type %s\n",
GenTree::OpName(treeNode->OperGet()));
NYIRAW(message);
#endif
assert(!"Unknown node in codegen");
}
break;
}
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------
// genMultiRegStoreToSIMDLocal: store multi-reg value to a single-reg SIMD local
//
// Arguments:
// lclNode - GenTreeLclVar of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode)
{
assert(varTypeIsSIMD(lclNode));
regNumber dst = lclNode->GetRegNum();
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount = actualOp1->GetMultiRegCount(compiler);
assert(op1->IsMultiRegNode());
genConsumeRegs(op1);
// Right now the only enregistrable structs supported are SIMD types.
// They are only returned in 1 or 2 registers - the 1 register case is
// handled as a regular STORE_LCL_VAR.
// This case is always a call (AsCall() will assert if it is not).
GenTreeCall* call = actualOp1->AsCall();
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
assert(retTypeDesc->GetReturnRegCount() == MAX_RET_REG_COUNT);
assert(regCount == 2);
regNumber targetReg = lclNode->GetRegNum();
regNumber reg0 = call->GetRegNumByIdx(0);
regNumber reg1 = call->GetRegNumByIdx(1);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
// that need to be copied or reloaded.
regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
if (reloadReg != REG_NA)
{
reg0 = reloadReg;
}
reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
if (reloadReg != REG_NA)
{
reg1 = reloadReg;
}
}
#ifdef UNIX_AMD64_ABI
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(0)));
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(1)));
// This is a case where the two 8-bytes that comprise the operand are in
// two different xmm registers and need to be assembled into a single
// xmm register.
if (targetReg != reg0 && targetReg != reg1)
{
// targetReg = reg0;
// targetReg[127:64] = reg1[127:64]
inst_Mov(TYP_DOUBLE, targetReg, reg0, /* canSkip */ false);
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
else if (targetReg == reg0)
{
// (elided) targetReg = reg0
// targetReg[127:64] = reg1[127:64]
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
else
{
assert(targetReg == reg1);
// We need two shuffles to achieve this
// First:
// targetReg[63:0] = targetReg[63:0]
// targetReg[127:64] = reg0[63:0]
//
// Second:
// targetReg[63:0] = targetReg[127:64]
// targetReg[127:64] = targetReg[63:0]
//
// Essentially copy low 8-bytes from reg0 to high 8-bytes of targetReg
// and next swap low and high 8-bytes of targetReg to have them
// rearranged in the right order.
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg0, 0x00);
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, targetReg, 0x01);
}
genProduceReg(lclNode);
#elif defined(TARGET_X86)
if (TargetOS::IsWindows)
{
assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(0)));
assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(1)));
assert(lclNode->TypeIs(TYP_SIMD8));
// This is a case where a SIMD8 struct returned as [EAX, EDX]
// and needs to be assembled into a single xmm register,
// note we can't check reg0=EAX, reg1=EDX because they could be already moved.
inst_Mov(TYP_FLOAT, targetReg, reg0, /* canSkip */ false);
const emitAttr size = emitTypeSize(TYP_SIMD8);
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
GetEmitter()->emitIns_SIMD_R_R_R_I(INS_pinsrd, size, targetReg, targetReg, reg1, 1);
}
else
{
regNumber tempXmm = lclNode->GetSingleTempReg();
assert(tempXmm != targetReg);
inst_Mov(TYP_FLOAT, tempXmm, reg1, /* canSkip */ false);
GetEmitter()->emitIns_SIMD_R_R_R(INS_punpckldq, size, targetReg, targetReg, tempXmm);
}
genProduceReg(lclNode);
}
#elif defined(TARGET_AMD64)
assert(!TargetOS::IsWindows || !"Multireg store to SIMD reg not supported on Windows x64");
#else
#error Unsupported or unset target architecture
#endif
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer.
//
// Arguments:
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
//
void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
if (delta == 0)
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, /* canSkip */ false);
#ifdef USING_SCOPE_INFO
psiMoveESPtoEBP();
#endif // USING_SCOPE_INFO
}
else
{
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
// We don't update prolog scope info (there is no function to handle lea), but that is currently dead code
// anyway.
}
if (reportUnwindData)
{
compiler->unwindSetFrameReg(REG_FPBASE, delta);
}
}
//------------------------------------------------------------------------
// genAllocLclFrame: Probe the stack and allocate the local stack frame - subtract from SP.
//
// Arguments:
// frameSize - the size of the stack frame being allocated.
// initReg - register to use as a scratch register.
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
// maskArgRegsLiveIn - incoming argument registers that are currently live.
//
// Return value:
// None
//
void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
if (frameSize == 0)
{
return;
}
const target_size_t pageSize = compiler->eeGetPageSize();
if (frameSize == REGSIZE_BYTES)
{
// Frame size is the same as register size.
GetEmitter()->emitIns_R(INS_push, EA_PTRSIZE, REG_EAX);
compiler->unwindAllocStack(frameSize);
}
else if (frameSize < pageSize)
{
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
compiler->unwindAllocStack(frameSize);
const unsigned lastProbedLocToFinalSp = frameSize;
if (lastProbedLocToFinalSp + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize)
{
// We haven't probed almost a complete page. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we need to probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_R_AR(INS_test, EA_4BYTE, REG_EAX, REG_SPBASE, 0);
}
}
else
{
#ifdef TARGET_X86
int spOffset = -(int)frameSize;
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_R(INS_push, EA_PTRSIZE, REG_SECRET_STUB_PARAM);
spOffset += REGSIZE_BYTES;
}
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, spOffset);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN);
if (compiler->info.compPublishStubParam)
{
GetEmitter()->emitIns_R(INS_pop, EA_PTRSIZE, REG_SECRET_STUB_PARAM);
GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, frameSize);
}
else
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
}
#else // !TARGET_X86
static_assert_no_msg((RBM_STACK_PROBE_HELPER_ARG & (RBM_SECRET_STUB_PARAM | RBM_DEFAULT_HELPER_CALL_TARGET)) ==
RBM_NONE);
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_STACK_PROBE_HELPER_ARG, REG_SPBASE, -(int)frameSize);
regSet.verifyRegUsed(REG_STACK_PROBE_HELPER_ARG);
genEmitHelperCall(CORINFO_HELP_STACK_PROBE, 0, EA_UNKNOWN);
if (initReg == REG_DEFAULT_HELPER_CALL_TARGET)
{
*pInitRegZeroed = false;
}
static_assert_no_msg((RBM_STACK_PROBE_HELPER_TRASH & RBM_STACK_PROBE_HELPER_ARG) == RBM_NONE);
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG, /* canSkip */ false);
#endif // !TARGET_X86
compiler->unwindAllocStack(frameSize);
if (initReg == REG_STACK_PROBE_HELPER_ARG)
{
*pInitRegZeroed = false;
}
}
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(frameSize);
}
#endif // USING_SCOPE_INFO
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustment: add a specified constant value to the stack pointer.
// No probe is done.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
// We assert that the SP change is less than one page. If it's greater, you should have called a
// function that does a probe, which will in turn call this function.
assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize());
#ifdef TARGET_X86
if (regTmp != REG_NA)
{
// For x86, some cases don't want to use "sub ESP" because we don't want the emitter to track the adjustment
// to ESP. So do the work in the count register.
// TODO-CQ: manipulate ESP directly, to share code, reduce #ifdefs, and improve CQ. This would require
// creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't
// track".
inst_Mov(TYP_I_IMPL, regTmp, REG_SPBASE, /* canSkip */ false);
inst_RV_IV(INS_sub, regTmp, (target_ssize_t)-spDelta, EA_PTRSIZE);
inst_Mov(TYP_I_IMPL, REG_SPBASE, regTmp, /* canSkip */ false);
}
else
#endif // TARGET_X86
{
inst_RV_IV(INS_sub, REG_SPBASE, (target_ssize_t)-spDelta, EA_PTRSIZE);
}
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Should only be called as a helper for
// genStackPointerConstantAdjustmentLoopWithProbe.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens,
// but the stack pointer doesn't move.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Generates one probe per page, up to the total amount required.
// This will generate a sequence of probes in-line. It is required for the case where we need to expose
// (not hide) the stack level adjustment. We can't use the dynamic loop in that case, because the total
// stack adjustment would not be visible to the emitter. It would be possible to use this version for
// multiple hidden constant stack level adjustments but we don't do that currently (we use the loop
// version in genStackPointerDynamicAdjustmentWithProbe instead).
//
// Arguments:
// spDelta - the value to add to SP. Must be negative.
// regTmp - x86 only: an available temporary register. If not REG_NA, hide the SP
// adjustment from the emitter, using this register.
//
// Return Value:
// Offset in bytes from SP to last probed address.
//
target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
const target_size_t pageSize = compiler->eeGetPageSize();
ssize_t spRemainingDelta = spDelta;
do
{
ssize_t spOneDelta = -(ssize_t)min((target_size_t)-spRemainingDelta, pageSize);
genStackPointerConstantAdjustmentWithProbe(spOneDelta, regTmp);
spRemainingDelta -= spOneDelta;
} while (spRemainingDelta < 0);
// What offset from the final SP was the last probe? This depends on the fact that
// genStackPointerConstantAdjustmentWithProbe() probes first, then does "SUB SP".
target_size_t lastTouchDelta = (target_size_t)(-spDelta) % pageSize;
if ((lastTouchDelta == 0) || (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize))
{
// We haven't probed almost a complete page. If lastTouchDelta==0, then spDelta was an exact
// multiple of pageSize, which means we last probed exactly one page back. Otherwise, we probed
// the page, but very far from the end. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we do one more probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, 0);
lastTouchDelta = 0;
}
return lastTouchDelta;
}
//------------------------------------------------------------------------
// genStackPointerDynamicAdjustmentWithProbe: add a register value to the stack pointer,
// and probe the stack as appropriate.
//
// Note that for x86, we hide the ESP adjustment from the emitter. To do that, currently,
// requires a temporary register and extra code.
//
// Arguments:
// regSpDelta - the register value to add to SP. The value in this register must be negative.
// This register might be trashed.
// regTmp - an available temporary register. Will be trashed.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp)
{
assert(regSpDelta != REG_NA);
assert(regTmp != REG_NA);
// Tickle the pages to ensure that ESP is always valid and is
// in sync with the "stack guard page". Note that in the worst
// case ESP is on the last byte of the guard page. Thus you must
// touch ESP-0 first not ESP-0x1000.
//
// Another subtlety is that you don't want ESP to be exactly on the
// boundary of the guard page because PUSH is predecrement, thus
// call setup would not touch the guard page but just beyond it.
//
// Note that we go through a few hoops so that ESP never points to
// illegal pages at any time during the tickling process
//
// add regSpDelta, ESP // reg now holds ultimate ESP
// jb loop // result is smaller than original ESP (no wrap around)
// xor regSpDelta, regSpDelta // Overflow, pick lowest possible number
// loop:
// test ESP, [ESP+0] // tickle the page
// mov regTmp, ESP
// sub regTmp, eeGetPageSize()
// mov ESP, regTmp
// cmp ESP, regSpDelta
// jae loop
// mov ESP, regSpDelta
BasicBlock* loop = genCreateTempLabel();
inst_RV_RV(INS_add, regSpDelta, REG_SPBASE, TYP_I_IMPL);
inst_JMP(EJ_jb, loop);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regSpDelta);
genDefineTempLabel(loop);
// Tickle the decremented value. Note that it must be done BEFORE the update of ESP since ESP might already
// be on the guard page. It is OK to leave the final value of ESP on the guard page.
GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
// Subtract a page from ESP. This is a trick to avoid the emitter trying to track the
// decrement of the ESP - we do the subtraction in another reg instead of adjusting ESP directly.
inst_Mov(TYP_I_IMPL, regTmp, REG_SPBASE, /* canSkip */ false);
inst_RV_IV(INS_sub, regTmp, compiler->eeGetPageSize(), EA_PTRSIZE);
inst_Mov(TYP_I_IMPL, REG_SPBASE, regTmp, /* canSkip */ false);
inst_RV_RV(INS_cmp, REG_SPBASE, regSpDelta, TYP_I_IMPL);
inst_JMP(EJ_jae, loop);
// Move the final value to ESP
inst_Mov(TYP_I_IMPL, REG_SPBASE, regSpDelta, /* canSkip */ false);
}
//------------------------------------------------------------------------
// genLclHeap: Generate code for localloc.
//
// Arguments:
// tree - the localloc tree to generate.
//
// Notes:
// Note that for x86, we don't track ESP movements while generating the localloc code.
// The ESP tracking is used to report stack pointer-relative GC info, which is not
// interesting while doing the localloc construction. Also, for functions with localloc,
// we have EBP frames, and EBP-relative locals, and ESP-relative accesses only for function
// call arguments.
//
// For x86, we store the ESP after the localloc is complete in the LocAllocSP
// variable. This variable is implicitly reported to the VM in the GC info (its position
// is defined by convention relative to other items), and is used by the GC to find the
// "base" stack pointer in functions with localloc.
//
void CodeGen::genLclHeap(GenTree* tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
GenTree* size = tree->AsOp()->gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
regNumber targetReg = tree->GetRegNum();
regNumber regCnt = REG_NA;
var_types type = genActualType(size->gtType);
emitAttr easz = emitTypeSize(type);
BasicBlock* endLabel = nullptr;
target_ssize_t lastTouchDelta = (target_ssize_t)-1;
#ifdef DEBUG
genStackPointerCheck(compiler->opts.compStackCheckOnRet, compiler->lvaReturnSpCheck);
#endif
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
target_size_t stackAdjustment = 0;
target_size_t locAllocStackOffset = 0;
// compute the amount of memory to allocate to properly STACK_ALIGN.
size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
assert(size->isContained());
// If amount is zero then return null in targetReg
amount = size->AsIntCon()->gtIconVal;
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
goto BAILOUT;
}
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
amount = AlignUp(amount, STACK_ALIGN);
}
else
{
// The localloc requested memory size is non-constant.
// Put the size value in targetReg. If it is zero, bail out by returning null in targetReg.
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
GetEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
inst_JMP(EJ_je, endLabel);
// Compute the size of the block to allocate and perform alignment.
// If compInitMem=true, we can reuse targetReg as regcnt,
// since we don't need any internal registers.
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
// Above, we put the size in targetReg. Now, copy it to our new temp register if necessary.
inst_Mov(size->TypeGet(), regCnt, targetReg, /* canSkip */ true);
}
// Round up the number of bytes to allocate to a STACK_ALIGN boundary. This is done
// by code like:
// add reg, 15
// and reg, -16
// However, in the initialized memory case, we need the count of STACK_ALIGN-sized
// elements, not a byte count, after the alignment. So instead of the "and", which
// becomes unnecessary, generate a shift, e.g.:
// add reg, 15
// shr reg, 4
inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
if (compiler->info.compInitMem)
{
// Convert the count from a count of bytes to a loop count. We will loop once per
// stack alignment size, so each loop will zero 4 bytes on Windows/x86, and 16 bytes
// on x64 and Linux/x86.
//
// Note that we zero a single reg-size word per iteration on x86, and 2 reg-size
// words per iteration on x64. We will shift off all the stack alignment bits
// added above, so there is no need for an 'and' instruction.
// --- shr regCnt, 2 (or 4) ---
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_PTRSIZE, regCnt, STACK_ALIGN_SHIFT);
}
else
{
// Otherwise, mask off the low bits to align the byte count.
inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
}
bool initMemOrLargeAlloc; // Declaration must be separate from initialization to avoid clang compiler error.
initMemOrLargeAlloc = compiler->info.compInitMem || (amount >= compiler->eeGetPageSize()); // must be >= not >
#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
//
// Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
// are the cases that need to be handled:
// i) Method has out-going arg area.
// It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
// Therefore, we will pop off the out-going arg area from RSP before allocating the localloc space.
// ii) Method has no out-going arg area.
// Nothing to pop off from the stack.
if (compiler->lvaOutgoingArgSpaceSize > 0)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
// If the localloc amount is a small enough constant, and we're not initializing the allocated
// memory, then don't bother popping off the ougoing arg space first; just allocate the amount
// of space needed by the allocation, and call the bottom part the new outgoing arg space.
if ((amount > 0) && !initMemOrLargeAlloc)
{
lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, REG_NA);
stackAdjustment = 0;
locAllocStackOffset = (target_size_t)compiler->lvaOutgoingArgSpaceSize;
goto ALLOC_DONE;
}
inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
stackAdjustment += (target_size_t)compiler->lvaOutgoingArgSpaceSize;
locAllocStackOffset = stackAdjustment;
}
#endif
if (size->IsCnsIntOrI())
{
// We should reach here only for non-zero, constant size allocations.
assert(amount > 0);
assert((amount % STACK_ALIGN) == 0);
assert((amount % REGSIZE_BYTES) == 0);
// For small allocations we will generate up to six push 0 inline
size_t cntRegSizedWords = amount / REGSIZE_BYTES;
if (compiler->info.compInitMem && (cntRegSizedWords <= 6))
{
for (; cntRegSizedWords != 0; cntRegSizedWords--)
{
inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
}
lastTouchDelta = 0;
goto ALLOC_DONE;
}
#ifdef TARGET_X86
bool needRegCntRegister = true;
#else // !TARGET_X86
bool needRegCntRegister = initMemOrLargeAlloc;
#endif // !TARGET_X86
if (needRegCntRegister)
{
// If compInitMem=true, we can reuse targetReg as regcnt.
// Since size is a constant, regCnt is not yet initialized.
assert(regCnt == REG_NA);
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
}
}
if (!initMemOrLargeAlloc)
{
// Since the size is less than a page, and we don't need to zero init memory, simply adjust ESP.
// ESP might already be in the guard page, so we must touch it BEFORE
// the alloc, not after.
assert(amount < compiler->eeGetPageSize()); // must be < not <=
lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, regCnt);
goto ALLOC_DONE;
}
// else, "mov regCnt, amount"
if (compiler->info.compInitMem)
{
// When initializing memory, we want 'amount' to be the loop count.
assert((amount % STACK_ALIGN) == 0);
amount /= STACK_ALIGN;
}
instGen_Set_Reg_To_Imm(((size_t)(int)amount == amount) ? EA_4BYTE : EA_8BYTE, regCnt, amount);
}
if (compiler->info.compInitMem)
{
// At this point 'regCnt' is set to the number of loop iterations for this loop, if each
// iteration zeros (and subtracts from the stack pointer) STACK_ALIGN bytes.
// Since we have to zero out the allocated memory AND ensure that RSP is always valid
// by tickling the pages, we will just push 0's on the stack.
assert(genIsValidIntReg(regCnt));
// Loop:
BasicBlock* loop = genCreateTempLabel();
genDefineTempLabel(loop);
static_assert_no_msg((STACK_ALIGN % REGSIZE_BYTES) == 0);
unsigned const count = (STACK_ALIGN / REGSIZE_BYTES);
for (unsigned i = 0; i < count; i++)
{
inst_IV(INS_push_hide, 0); // --- push REG_SIZE bytes of 0
}
// Note that the stack must always be aligned to STACK_ALIGN bytes
// Decrement the loop counter and loop if not done.
inst_RV(INS_dec, regCnt, TYP_I_IMPL);
inst_JMP(EJ_jne, loop);
lastTouchDelta = 0;
}
else
{
// At this point 'regCnt' is set to the total number of bytes to localloc.
// Negate this value before calling the function to adjust the stack (which
// adds to ESP).
inst_RV(INS_NEG, regCnt, TYP_I_IMPL);
regNumber regTmp = tree->GetSingleTempReg();
genStackPointerDynamicAdjustmentWithProbe(regCnt, regTmp);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
}
ALLOC_DONE:
// Re-adjust SP to allocate out-going arg area. Note: this also requires probes, if we have
// a very large stack adjustment! For simplicity, we use the same function used elsewhere,
// which probes the current address before subtracting. We may end up probing multiple
// times relatively "nearby".
if (stackAdjustment > 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert(lastTouchDelta >= -1);
if ((lastTouchDelta == (target_ssize_t)-1) ||
(stackAdjustment + (target_size_t)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, REG_NA);
}
else
{
genStackPointerConstantAdjustment(-(ssize_t)stackAdjustment, REG_NA);
}
}
// Return the stackalloc'ed address in result register.
// TargetReg = RSP + locAllocStackOffset
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, (int)locAllocStackOffset);
if (endLabel != nullptr)
{
genDefineTempLabel(endLabel);
}
BAILOUT:
#ifdef JIT32_GCENCODER
if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
{
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
}
#endif // JIT32_GCENCODER
#ifdef DEBUG
// Update local variable to reflect the new stack pointer.
if (compiler->opts.compStackCheckOnRet)
{
noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaReturnSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
}
#endif
genProduceReg(tree);
}
void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode)
{
assert(storeBlkNode->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
if (storeBlkNode->OperIs(GT_STORE_OBJ))
{
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
assert(storeBlkNode->OperIsCopyBlkOp());
assert(storeBlkNode->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(storeBlkNode->AsObj());
return;
}
bool isCopyBlk = storeBlkNode->OperIsCopyBlkOp();
switch (storeBlkNode->gtBlkOpKind)
{
#ifdef TARGET_AMD64
case GenTreeBlk::BlkOpKindHelper:
assert(!storeBlkNode->gtBlkOpGcUnsafe);
if (isCopyBlk)
{
genCodeForCpBlkHelper(storeBlkNode);
}
else
{
genCodeForInitBlkHelper(storeBlkNode);
}
break;
#endif // TARGET_AMD64
case GenTreeBlk::BlkOpKindRepInstr:
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
if (isCopyBlk)
{
genCodeForCpBlkRepMovs(storeBlkNode);
}
else
{
genCodeForInitBlkRepStos(storeBlkNode);
}
break;
case GenTreeBlk::BlkOpKindUnroll:
if (isCopyBlk)
{
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
GetEmitter()->emitDisableGC();
}
#endif
genCodeForCpBlkUnroll(storeBlkNode);
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
GetEmitter()->emitEnableGC();
}
#endif
}
else
{
#ifndef JIT32_GCENCODER
assert(!storeBlkNode->gtBlkOpGcUnsafe);
#endif
genCodeForInitBlkUnroll(storeBlkNode);
}
break;
default:
unreached();
}
}
//
//------------------------------------------------------------------------
// genCodeForInitBlkRepStos: Generate code for InitBlk using rep stos.
//
// Arguments:
// initBlkNode - The Block store for which we are generating code.
//
void CodeGen::genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode)
{
genConsumeBlockOp(initBlkNode, REG_RDI, REG_RAX, REG_RCX);
instGen(INS_r_stosb);
}
//----------------------------------------------------------------------------------
// genCodeForInitBlkUnroll: Generate unrolled block initialization code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
regNumber dstAddrIndexReg = REG_NA;
unsigned dstAddrIndexScale = 1;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = dstAddr->AsAddrMode();
if (addrMode->HasBase())
{
dstAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
dstAddrIndexReg = genConsumeReg(addrMode->Index());
dstAddrIndexScale = addrMode->GetScale();
}
dstOffset = addrMode->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
regNumber srcIntReg = REG_NA;
GenTree* src = node->Data();
if (src->OperIs(GT_INIT_VAL))
{
assert(src->isContained());
src = src->AsUnOp()->gtGetOp1();
}
unsigned size = node->GetLayout()->GetSize();
// An SSE mov that accesses data larger than 8 bytes may be implemented using
// multiple memory accesses. Hence, the JIT must not use such stores when
// INITBLK zeroes a struct that contains GC pointers and can be observed by
// other threads (i.e. when dstAddr is not an address of a local).
// For example, this can happen when initializing a struct field of an object.
const bool canUse16BytesSimdMov = !node->IsOnHeapAndContainsReferences();
#ifdef TARGET_AMD64
// On Amd64 the JIT will not use SIMD stores for such structs and instead
// will always allocate a GP register for src node.
const bool willUseSimdMov = canUse16BytesSimdMov && (size >= XMM_REGSIZE_BYTES);
#else
// On X86 the JIT will use movq for structs that are larger than 16 bytes
// since it is more beneficial than using two mov-s from a GP register.
const bool willUseSimdMov = (size >= 16);
#endif
if (!src->isContained())
{
srcIntReg = genConsumeReg(src);
}
else
{
// If src is contained then it must be 0.
assert(src->IsIntegralConst(0));
assert(willUseSimdMov);
#ifdef TARGET_AMD64
assert(size >= XMM_REGSIZE_BYTES);
#else
assert(size % 8 == 0);
#endif
}
emitter* emit = GetEmitter();
assert(size <= INT32_MAX);
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
if (willUseSimdMov)
{
regNumber srcXmmReg = node->GetSingleTempReg(RBM_ALLFLOAT);
unsigned regSize = (size >= YMM_REGSIZE_BYTES) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX)
? YMM_REGSIZE_BYTES
: XMM_REGSIZE_BYTES;
if (src->gtSkipReloadOrCopy()->IsIntegralConst(0))
{
// If the source is constant 0 then always use xorps, it's faster
// than copying the constant from a GPR to a XMM register.
emit->emitIns_R_R(INS_xorps, EA_ATTR(regSize), srcXmmReg, srcXmmReg);
}
else
{
emit->emitIns_Mov(INS_movd, EA_PTRSIZE, srcXmmReg, srcIntReg, /* canSkip */ false);
emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg);
#ifdef TARGET_X86
// For x86, we need one more to convert it from 8 bytes to 16 bytes.
emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg);
#endif
if (regSize == YMM_REGSIZE_BYTES)
{
// Extend the bytes in the lower lanes to the upper lanes
emit->emitIns_R_R_R_I(INS_vinsertf128, EA_32BYTE, srcXmmReg, srcXmmReg, srcXmmReg, 1);
}
}
instruction simdMov = simdUnalignedMovIns();
unsigned bytesWritten = 0;
while (bytesWritten < size)
{
#ifdef TARGET_X86
if (!canUse16BytesSimdMov || (bytesWritten + regSize > size))
{
simdMov = INS_movq;
regSize = 8;
}
#endif
if (bytesWritten + regSize > size)
{
assert(srcIntReg != REG_NA);
break;
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
dstOffset += regSize;
bytesWritten += regSize;
if (regSize == YMM_REGSIZE_BYTES && size - bytesWritten < YMM_REGSIZE_BYTES)
{
regSize = XMM_REGSIZE_BYTES;
}
}
size -= bytesWritten;
}
// Fill the remainder using normal stores.
#ifdef TARGET_AMD64
unsigned regSize = REGSIZE_BYTES;
while (regSize > size)
{
regSize /= 2;
}
for (; size > regSize; size -= regSize, dstOffset += regSize)
{
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
if (size > 0)
{
unsigned shiftBack = regSize - size;
assert(shiftBack <= regSize);
dstOffset -= shiftBack;
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#else // TARGET_X86
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), srcIntReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#endif
}
#ifdef TARGET_AMD64
//------------------------------------------------------------------------
// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call
//
// Arguments:
// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
#endif // TARGET_AMD64
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Generate code for a load from some address + offset
// baseNode: tree node which can be either a local address or arbitrary node
// offset: distance from the baseNode from which to load
void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
{
emitter* emit = GetEmitter();
if (baseNode->OperIsLocalAddr())
{
const GenTreeLclVarCommon* lclVar = baseNode->AsLclVarCommon();
offset += lclVar->GetLclOffs();
emit->emitIns_R_S(ins, size, dst, lclVar->GetLclNum(), offset);
}
else
{
emit->emitIns_R_AR(ins, size, dst, baseNode->GetRegNum(), offset);
}
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
//----------------------------------------------------------------------------------
// genCodeForCpBlkUnroll - Generate unrolled block copy code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
regNumber dstAddrIndexReg = REG_NA;
unsigned dstAddrIndexScale = 1;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = dstAddr->AsAddrMode();
if (addrMode->HasBase())
{
dstAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
dstAddrIndexReg = genConsumeReg(addrMode->Index());
dstAddrIndexScale = addrMode->GetScale();
}
dstOffset = addrMode->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
const GenTreeLclVarCommon* lclVar = dstAddr->AsLclVarCommon();
dstLclNum = lclVar->GetLclNum();
dstOffset = lclVar->GetLclOffs();
}
unsigned srcLclNum = BAD_VAR_NUM;
regNumber srcAddrBaseReg = REG_NA;
regNumber srcAddrIndexReg = REG_NA;
unsigned srcAddrIndexScale = 1;
int srcOffset = 0;
GenTree* src = node->Data();
assert(src->isContained());
if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
srcLclNum = src->AsLclVarCommon()->GetLclNum();
srcOffset = src->AsLclVarCommon()->GetLclOffs();
}
else
{
assert(src->OperIs(GT_IND));
GenTree* srcAddr = src->AsIndir()->Addr();
if (!srcAddr->isContained())
{
srcAddrBaseReg = genConsumeReg(srcAddr);
}
else if (srcAddr->OperIsAddrMode())
{
GenTreeAddrMode* addrMode = srcAddr->AsAddrMode();
if (addrMode->HasBase())
{
srcAddrBaseReg = genConsumeReg(addrMode->Base());
}
if (addrMode->HasIndex())
{
srcAddrIndexReg = genConsumeReg(addrMode->Index());
srcAddrIndexScale = addrMode->GetScale();
}
srcOffset = addrMode->Offset();
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(srcOffset < (INT32_MAX - static_cast<int>(size)));
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
if (size >= XMM_REGSIZE_BYTES)
{
regNumber tempReg = node->GetSingleTempReg(RBM_ALLFLOAT);
instruction simdMov = simdUnalignedMovIns();
// Get the largest SIMD register available if the size is large enough
unsigned regSize = (size >= YMM_REGSIZE_BYTES) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX)
? YMM_REGSIZE_BYTES
: XMM_REGSIZE_BYTES;
while (size >= regSize)
{
for (; size >= regSize; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(simdMov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(simdMov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
// Size is too large for YMM moves, try stepping down to XMM size to finish SIMD copies.
if (regSize == YMM_REGSIZE_BYTES)
{
regSize = XMM_REGSIZE_BYTES;
}
}
}
// Fill the remainder with normal loads/stores
if (size > 0)
{
regNumber tempReg = node->GetSingleTempReg(RBM_ALLINT);
#ifdef TARGET_AMD64
unsigned regSize = REGSIZE_BYTES;
while (regSize > size)
{
regSize /= 2;
}
for (; size > regSize; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
if (size > 0)
{
unsigned shiftBack = regSize - size;
assert(shiftBack <= regSize);
srcOffset -= shiftBack;
dstOffset -= shiftBack;
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#else // TARGET_X86
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(INS_mov, EA_ATTR(regSize), tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_ARX(INS_mov, EA_ATTR(regSize), tempReg, srcAddrBaseReg, srcAddrIndexReg,
srcAddrIndexScale, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(INS_mov, EA_ATTR(regSize), tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_ARX_R(INS_mov, EA_ATTR(regSize), tempReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
}
#endif
}
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkRepMovs - Generate code for CpBlk by using rep movs
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode)
{
// Destination address goes in RDI, source address goes in RSE, and size goes in RCX.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
#ifdef FEATURE_PUT_STRUCT_ARG_STK
//------------------------------------------------------------------------
// CodeGen::genMove8IfNeeded: Conditionally move 8 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// longTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (8 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// On x86, longTmpReg must be an xmm reg; on x64 it must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove8IfNeeded(unsigned size, regNumber longTmpReg, GenTree* srcAddr, unsigned offset)
{
#ifdef TARGET_X86
instruction longMovIns = INS_movq;
#else // !TARGET_X86
instruction longMovIns = INS_mov;
#endif // !TARGET_X86
if ((size & 8) != 0)
{
genCodeForLoadOffset(longMovIns, EA_8BYTE, longTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_LONG, longTmpReg, offset);
return 8;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove4IfNeeded: Conditionally move 4 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (4 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove4IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 4) != 0)
{
genCodeForLoadOffset(INS_mov, EA_4BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_INT, intTmpReg, offset);
return 4;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove2IfNeeded: Conditionally move 2 bytes of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (2 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove2IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 2) != 0)
{
genCodeForLoadOffset(INS_mov, EA_2BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_SHORT, intTmpReg, offset);
return 2;
}
return 0;
}
//------------------------------------------------------------------------
// CodeGen::genMove1IfNeeded: Conditionally move 1 byte of a struct to the argument area
//
// Arguments:
// size - The size of bytes remaining to be moved
// intTmpReg - The tmp register to be used for the long value
// srcAddr - The address of the source struct
// offset - The current offset being copied
//
// Return Value:
// Returns the number of bytes moved (1 or 0).
//
// Notes:
// This is used in the PutArgStkKindUnroll case, to move any bytes that are
// not an even multiple of 16.
// intTmpReg must be an integer register.
// This is checked by genStoreRegToStackArg.
//
unsigned CodeGen::genMove1IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
{
if ((size & 1) != 0)
{
genCodeForLoadOffset(INS_mov, EA_1BYTE, intTmpReg, srcAddr, offset);
genStoreRegToStackArg(TYP_BYTE, intTmpReg, offset);
return 1;
}
return 0;
}
//---------------------------------------------------------------------------------------------------------------//
// genStructPutArgUnroll: Generates code for passing a struct arg on stack by value using loop unrolling.
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// m_stkArgVarNum must be set to the base var number, relative to which the by-val struct will be copied to the
// stack.
//
// TODO-Amd64-Unix: Try to share code with copyblk.
// Need refactoring of copyblk before it could be used for putarg_stk.
// The difference for now is that a putarg_stk contains its children, while cpyblk does not.
// This creates differences in code. After some significant refactoring it could be reused.
//
void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode)
{
GenTree* src = putArgNode->AsOp()->gtOp1;
// We will never call this method for SIMD types, which are stored directly
// in genPutStructArgStk().
assert(src->isContained() && src->OperIs(GT_OBJ) && src->TypeIs(TYP_STRUCT));
assert(!src->AsObj()->GetLayout()->HasGCPtr());
#ifdef TARGET_X86
assert(!m_pushStkArg);
#endif
unsigned size = putArgNode->GetStackByteSize();
#ifdef TARGET_X86
assert((XMM_REGSIZE_BYTES <= size) && (size <= CPBLK_UNROLL_LIMIT));
#else // !TARGET_X86
assert(size <= CPBLK_UNROLL_LIMIT);
#endif // !TARGET_X86
if (src->AsOp()->gtOp1->isUsedFromReg())
{
genConsumeReg(src->AsOp()->gtOp1);
}
unsigned offset = 0;
regNumber xmmTmpReg = REG_NA;
regNumber intTmpReg = REG_NA;
regNumber longTmpReg = REG_NA;
if (size >= XMM_REGSIZE_BYTES)
{
xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
}
if ((size % XMM_REGSIZE_BYTES) != 0)
{
intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
}
#ifdef TARGET_X86
longTmpReg = xmmTmpReg;
#else
longTmpReg = intTmpReg;
#endif
// Let's use SSE2 to be able to do 16 byte at a time with loads and stores.
size_t slots = size / XMM_REGSIZE_BYTES;
while (slots-- > 0)
{
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
// Load
genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmTmpReg, src->gtGetOp1(), offset);
// Store
genStoreRegToStackArg(TYP_STRUCT, xmmTmpReg, offset);
offset += XMM_REGSIZE_BYTES;
}
// Fill the remainder (15 bytes or less) if there's one.
if ((size % XMM_REGSIZE_BYTES) != 0)
{
offset += genMove8IfNeeded(size, longTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove4IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove2IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
offset += genMove1IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset);
assert(offset == size);
}
}
//------------------------------------------------------------------------
// genStructPutArgRepMovs: Generates code for passing a struct arg by value on stack using Rep Movs.
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Preconditions:
// m_stkArgVarNum must be set to the base var number, relative to which the by-val struct bits will go.
//
void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode)
{
GenTree* src = putArgNode->gtGetOp1();
assert(src->TypeGet() == TYP_STRUCT);
assert(!src->AsObj()->GetLayout()->HasGCPtr());
// Make sure we got the arguments of the cpblk operation in the right registers, and that
// 'src' is contained as expected.
assert(putArgNode->gtRsvdRegs == (RBM_RDI | RBM_RCX | RBM_RSI));
assert(src->isContained());
genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_RCX);
instGen(INS_r_movsb);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genStructPutArgPush: Generates code for passing a struct arg by value on stack using "push".
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// Used only on x86, in two cases:
// - Structs 4, 8, or 12 bytes in size (less than XMM_REGSIZE_BYTES, multiple of TARGET_POINTER_SIZE).
// - Structs that contain GC pointers - they are guaranteed to be sized correctly by the VM.
//
void CodeGen::genStructPutArgPush(GenTreePutArgStk* putArgNode)
{
// On x86, any struct that contains GC references must be stored to the stack using `push` instructions so
// that the emitter properly detects the need to update the method's GC information.
//
// Strictly speaking, it is only necessary to use "push" to store the GC references themselves, so for structs
// with large numbers of consecutive non-GC-ref-typed fields, we may be able to improve the code size in the
// future.
assert(m_pushStkArg);
GenTree* src = putArgNode->Data();
GenTree* srcAddr = putArgNode->Data()->AsObj()->Addr();
regNumber srcAddrReg = srcAddr->GetRegNum();
const bool srcAddrInReg = srcAddrReg != REG_NA;
unsigned srcLclNum = 0;
unsigned srcLclOffset = 0;
if (srcAddrInReg)
{
srcAddrReg = genConsumeReg(srcAddr);
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcLclOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
ClassLayout* layout = src->AsObj()->GetLayout();
const unsigned byteSize = putArgNode->GetStackByteSize();
assert((byteSize % TARGET_POINTER_SIZE == 0) && ((byteSize < XMM_REGSIZE_BYTES) || layout->HasGCPtr()));
const unsigned numSlots = byteSize / TARGET_POINTER_SIZE;
assert(putArgNode->gtNumSlots == numSlots);
for (int i = numSlots - 1; i >= 0; --i)
{
emitAttr slotAttr = emitTypeSize(layout->GetGCPtrType(i));
const unsigned byteOffset = i * TARGET_POINTER_SIZE;
if (srcAddrInReg)
{
GetEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcAddrReg, byteOffset);
}
else
{
GetEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + byteOffset);
}
AddStackLevel(TARGET_POINTER_SIZE);
}
}
#endif // TARGET_X86
#ifndef TARGET_X86
//------------------------------------------------------------------------
// genStructPutArgPartialRepMovs: Generates code for passing a struct arg by value on stack using
// a mix of pointer-sized stores, "movsq" and "rep movsd".
//
// Arguments:
// putArgNode - the PutArgStk tree.
//
// Notes:
// Used on non-x86 targets (Unix x64) for structs with GC pointers.
//
void CodeGen::genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgNode)
{
// Consume these registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_NA);
GenTreeObj* src = putArgNode->gtGetOp1()->AsObj();
ClassLayout* layout = src->GetLayout();
const bool srcIsLocal = src->Addr()->OperIsLocalAddr();
const emitAttr srcAddrAttr = srcIsLocal ? EA_PTRSIZE : EA_BYREF;
#if DEBUG
unsigned numGCSlotsCopied = 0;
#endif // DEBUG
assert(layout->HasGCPtr());
const unsigned byteSize = putArgNode->GetStackByteSize();
assert(byteSize % TARGET_POINTER_SIZE == 0);
const unsigned numSlots = byteSize / TARGET_POINTER_SIZE;
assert(putArgNode->gtNumSlots == numSlots);
// No need to disable GC the way COPYOBJ does. Here the refs are copied in atomic operations always.
for (unsigned i = 0; i < numSlots;)
{
if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively)
// instead of a sequence of movsp instructions to save cycles and code size.
unsigned adjacentNonGCSlotCount = 0;
do
{
adjacentNonGCSlotCount++;
i++;
} while ((i < numSlots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-ref region, it's better just to
// emit a sequence of movsp instructions
if (adjacentNonGCSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
{
for (; adjacentNonGCSlotCount > 0; adjacentNonGCSlotCount--)
{
instGen(INS_movsp);
}
}
else
{
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
instGen(INS_r_movsp);
}
}
else
{
// We have a GC (byref or ref) pointer
// TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction,
// but the logic for emitting a GC info record is not available (it is internal for the emitter
// only.) See emitGCVarLiveUpd function. If we could call it separately, we could do
// instGen(INS_movsp); and emission of gc info.
var_types memType = layout->GetGCPtrType(i);
GetEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
#ifdef DEBUG
numGCSlotsCopied++;
#endif // DEBUG
i++;
if (i < numSlots)
{
// Source for the copy operation.
// If a LocalAddr, use EA_PTRSIZE - copy from stack.
// If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
GetEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
// Always copying to the stack - outgoing arg area
// (or the outgoing arg area of the caller for a tail call) - use EA_PTRSIZE.
GetEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
}
}
}
assert(numGCSlotsCopied == layout->GetGCPtrCount());
}
#endif // !TARGET_X86
//------------------------------------------------------------------------
// If any Vector3 args are on stack and they are not pass-by-ref, the upper 32bits
// must be cleared to zeroes. The native compiler doesn't clear the upper bits
// and there is no way to know if the caller is native or not. So, the upper
// 32 bits of Vector argument on stack are always cleared to zero.
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void CodeGen::genClearStackVec3ArgUpperBits()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genClearStackVec3ArgUpperBits()\n");
}
#endif
assert(compiler->compGeneratingProlog);
unsigned varNum = 0;
for (unsigned varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(varDsc->lvIsParam);
// Does var has simd12 type?
if (varDsc->lvType != TYP_SIMD12)
{
continue;
}
if (!varDsc->lvIsRegArg)
{
// Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
GetEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
}
else
{
// Assume that for x64 linux, an argument is fully in registers
// or fully on stack.
regNumber argReg = varDsc->GetOtherArgReg();
// Clear the upper 32 bits by two shift instructions.
// argReg = argReg << 96
GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
// argReg = argReg >> 96
GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
}
}
}
#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#endif // FEATURE_PUT_STRUCT_ARG_STK
//
// genCodeForCpObj - Generate code for CpObj nodes to copy structs that have interleaved
// GC pointers.
//
// Arguments:
// cpObjNode - the GT_STORE_OBJ
//
// Notes:
// This will generate a sequence of movsp instructions for the cases of non-gc members.
// Note that movsp is an alias for movsd on x86 and movsq on x64.
// and calls to the BY_REF_ASSIGN helper otherwise.
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
{
// Make sure we got the arguments of the cpobj operation in the right registers
GenTree* dstAddr = cpObjNode->Addr();
GenTree* source = cpObjNode->Data();
GenTree* srcAddr = nullptr;
var_types srcAddrType = TYP_BYREF;
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
// If the GenTree node has data about GC pointers, this means we're dealing
// with CpObj, so this requires special logic.
assert(cpObjNode->GetLayout()->HasGCPtr());
// MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields
// and it needs src = RSI and dst = RDI.
// Either these registers must not contain lclVars, or they must be dying or marked for spill.
// This is because these registers are incremented as we go through the struct.
if (!source->IsLocal())
{
assert(source->gtOper == GT_IND);
srcAddr = source->gtGetOp1();
GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
unsigned srcLclVarNum = BAD_VAR_NUM;
unsigned dstLclVarNum = BAD_VAR_NUM;
bool isSrcAddrLiveOut = false;
bool isDstAddrLiveOut = false;
if (genIsRegCandidateLocal(actualSrcAddr))
{
srcLclVarNum = actualSrcAddr->AsLclVarCommon()->GetLclNum();
isSrcAddrLiveOut = ((actualSrcAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
if (genIsRegCandidateLocal(actualDstAddr))
{
dstLclVarNum = actualDstAddr->AsLclVarCommon()->GetLclNum();
isDstAddrLiveOut = ((actualDstAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
assert((actualSrcAddr->GetRegNum() != REG_RSI) || !isSrcAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isDstAddrLiveOut));
assert((actualDstAddr->GetRegNum() != REG_RDI) || !isDstAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isSrcAddrLiveOut));
srcAddrType = srcAddr->TypeGet();
}
#endif // DEBUG
// Consume the operands and get them into the right registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumeBlockOp(cpObjNode, REG_RDI, REG_RSI, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
unsigned slots = cpObjNode->GetLayout()->GetSlotCount();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
if (slots >= CPOBJ_NONGC_SLOTS_LIMIT)
{
// If the destination of the CpObj is on the stack, make sure we allocated
// RCX to emit the movsp (alias for movsd or movsq for 32 and 64 bits respectively).
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
instGen(INS_r_movsp);
}
else
{
// For small structs, it's better to emit a sequence of movsp than to
// emit a rep movsp instruction.
while (slots > 0)
{
instGen(INS_movsp);
slots--;
}
}
}
else
{
ClassLayout* layout = cpObjNode->GetLayout();
unsigned gcPtrCount = layout->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp instead of a sequence of movsp instructions
// to save cycles and code size.
unsigned nonGcSlotCount = 0;
do
{
nonGcSlotCount++;
i++;
} while ((i < slots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-gc region, it's better just to
// emit a sequence of movsp instructions
if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
{
while (nonGcSlotCount > 0)
{
instGen(INS_movsp);
nonGcSlotCount--;
}
}
else
{
// Otherwise, we can save code-size and improve CQ by emitting
// rep movsp (alias for movsd/movsq for x86/x64)
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
instGen(INS_r_movsp);
}
}
else
{
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
gcPtrCount--;
i++;
}
}
assert(gcPtrCount == 0);
}
// Clear the gcInfo for RSI and RDI.
// While we normally update GC info prior to the last instruction that uses them,
// these actually live into the helper call.
gcInfo.gcMarkRegSetNpt(RBM_RSI);
gcInfo.gcMarkRegSetNpt(RBM_RDI);
}
#ifdef TARGET_AMD64
//----------------------------------------------------------------------------------
// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
}
#endif // TARGET_AMD64
// generate code do a switch statement based on a table of ip-relative offsets
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
regNumber idxReg = treeNode->AsOp()->gtOp1->GetRegNum();
regNumber baseReg = treeNode->AsOp()->gtOp2->GetRegNum();
regNumber tmpReg = treeNode->GetSingleTempReg();
// load the ip-relative offset (which is relative to start of fgFirstBB)
GetEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
// add it to the absolute address of fgFirstBB
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
// jmp baseReg
GetEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
}
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabOffs;
unsigned jmpTabBase;
jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
jmpTabOffs = 0;
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
GetEmitter()->emitDataGenData(i, target);
};
GetEmitter()->emitDataGenEnd();
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
GetEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->GetRegNum(),
compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForLockAdd: Generate code for a GT_LOCKADD node
//
// Arguments:
// node - the GT_LOCKADD node
//
void CodeGen::genCodeForLockAdd(GenTreeOp* node)
{
assert(node->OperIs(GT_LOCKADD));
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtGetOp2();
emitAttr size = emitActualTypeSize(data->TypeGet());
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg() || data->isContainedIntOrIImmed());
assert((size == EA_4BYTE) || (size == EA_PTRSIZE));
genConsumeOperands(node);
instGen(INS_lock);
if (data->isContainedIntOrIImmed())
{
int imm = static_cast<int>(data->AsIntCon()->IconValue());
assert(imm == data->AsIntCon()->IconValue());
GetEmitter()->emitIns_I_AR(INS_add, size, imm, addr->GetRegNum(), 0);
}
else
{
GetEmitter()->emitIns_AR_R(INS_add, size, data->GetRegNum(), addr->GetRegNum(), 0);
}
}
//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD or GT_XCHG node.
//
// Arguments:
// node - the GT_XADD/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* node)
{
assert(node->OperIs(GT_XADD, GT_XCHG));
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtGetOp2();
emitAttr size = emitTypeSize(node->TypeGet());
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert((size == EA_4BYTE) || (size == EA_PTRSIZE));
genConsumeOperands(node);
// If the destination register is different from the data register then we need
// to first move the data to the target register. Make sure we don't overwrite
// the address, the register allocator should have taken care of this.
assert((node->GetRegNum() != addr->GetRegNum()) || (node->GetRegNum() == data->GetRegNum()));
GetEmitter()->emitIns_Mov(INS_mov, size, node->GetRegNum(), data->GetRegNum(), /* canSkip */ true);
instruction ins = node->OperIs(GT_XADD) ? INS_xadd : INS_xchg;
// XCHG has an implied lock prefix when the first operand is a memory operand.
if (ins != INS_xchg)
{
instGen(INS_lock);
}
GetEmitter()->emitIns_AR_R(ins, size, node->GetRegNum(), addr->GetRegNum(), 0);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* tree)
{
assert(tree->OperIs(GT_CMPXCHG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
GenTree* location = tree->gtOpLocation; // arg1
GenTree* value = tree->gtOpValue; // arg2
GenTree* comparand = tree->gtOpComparand; // arg3
assert(location->GetRegNum() != REG_NA && location->GetRegNum() != REG_RAX);
assert(value->GetRegNum() != REG_NA && value->GetRegNum() != REG_RAX);
genConsumeReg(location);
genConsumeReg(value);
genConsumeReg(comparand);
// comparand goes to RAX;
// Note that we must issue this move after the genConsumeRegs(), in case any of the above
// have a GT_COPY from RAX.
inst_Mov(comparand->TypeGet(), REG_RAX, comparand->GetRegNum(), /* canSkip */ true);
// location is Rm
instGen(INS_lock);
GetEmitter()->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->GetRegNum(), location->GetRegNum(), 0);
// Result is in RAX
inst_Mov(targetType, targetReg, REG_RAX, /* canSkip */ true);
genProduceReg(tree);
}
// generate code for BoundsCheck nodes
void CodeGen::genRangeCheck(GenTree* oper)
{
noway_assert(oper->OperIs(GT_BOUNDS_CHECK));
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
GenTree* arrIndex = bndsChk->GetIndex();
GenTree* arrLen = bndsChk->GetArrayLength();
GenTree * src1, *src2;
emitJumpKind jmpKind;
instruction cmpKind;
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
if (arrIndex->IsIntegralConst(0) && arrLen->isUsedFromReg())
{
// arrIndex is 0 and arrLen is in a reg. In this case
// we can generate
// test reg, reg
// since arrLen is non-negative
src1 = arrLen;
src2 = arrLen;
jmpKind = EJ_je;
cmpKind = INS_test;
}
else if (arrIndex->isContainedIntOrIImmed())
{
// arrIndex is a contained constant. In this case
// we will generate one of the following
// cmp [mem], immed (if arrLen is a memory op)
// cmp reg, immed (if arrLen is in a reg)
//
// That is arrLen cannot be a contained immed.
assert(!arrLen->isContainedIntOrIImmed());
src1 = arrLen;
src2 = arrIndex;
jmpKind = EJ_jbe;
cmpKind = INS_cmp;
}
else
{
// arrIndex could either be a contained memory op or a reg
// In this case we will generate one of the following
// cmp [mem], immed (if arrLen is a constant)
// cmp [mem], reg (if arrLen is in a reg)
// cmp reg, immed (if arrIndex is in a reg)
// cmp reg1, reg2 (if arrIndex is in reg1)
// cmp reg, [mem] (if arrLen is a memory op)
//
// That is only one of arrIndex or arrLen can be a memory op.
assert(!arrIndex->isUsedFromMemory() || !arrLen->isUsedFromMemory());
src1 = arrIndex;
src2 = arrLen;
jmpKind = EJ_jae;
cmpKind = INS_cmp;
}
var_types bndsChkType = src2->TypeGet();
#if DEBUG
// Bounds checks can only be 32 or 64 bit sized comparisons.
assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
#endif // DEBUG
GetEmitter()->emitInsBinary(cmpKind, emitTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
//---------------------------------------------------------------------
// genCodeForPhysReg - generate code for a GT_PHYSREG node
//
// Arguments
// tree - the GT_PHYSREG node
//
// Return value:
// None
//
void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
{
assert(tree->OperIs(GT_PHYSREG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
genTransferRegGCState(targetReg, tree->gtSrcReg);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genCodeForNullCheck - generate code for a GT_NULLCHECK node
//
// Arguments
// tree - the GT_NULLCHECK node
//
// Return value:
// None
//
void CodeGen::genCodeForNullCheck(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_NULLCHECK));
assert(tree->gtOp1->isUsedFromReg());
regNumber reg = genConsumeReg(tree->gtOp1);
GetEmitter()->emitIns_AR_R(INS_cmp, emitTypeSize(tree), reg, reg, 0);
}
//------------------------------------------------------------------------
// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
// producing the effective index by subtracting the lower bound.
//
// Arguments:
// arrIndex - the node for which we're generating code
//
// Return Value:
// None.
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
regNumber indexReg = genConsumeReg(indexNode);
regNumber tgtReg = arrIndex->GetRegNum();
unsigned dim = arrIndex->gtCurrDim;
unsigned rank = arrIndex->gtArrRank;
var_types elemType = arrIndex->gtArrElemType;
noway_assert(tgtReg != REG_NA);
// Subtract the lower bound for this dimension.
// TODO-XArch-CQ: make this contained if it's an immediate that fits.
inst_Mov(indexNode->TypeGet(), tgtReg, indexReg, /* canSkip */ true);
GetEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
compiler->eeGetMDArrayLowerBoundOffset(rank, dim));
GetEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
compiler->eeGetMDArrayLengthOffset(rank, dim));
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
//
// Arguments:
// arrOffset - the node for which we're generating code
//
// Return Value:
// None.
//
// Notes:
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTree* offsetNode = arrOffset->gtOffset;
GenTree* indexNode = arrOffset->gtIndex;
GenTree* arrObj = arrOffset->gtArrObj;
regNumber tgtReg = arrOffset->GetRegNum();
assert(tgtReg != REG_NA);
unsigned dim = arrOffset->gtCurrDim;
unsigned rank = arrOffset->gtArrRank;
var_types elemType = arrOffset->gtArrElemType;
// First, consume the operands in the correct order.
regNumber offsetReg = REG_NA;
regNumber tmpReg = REG_NA;
if (!offsetNode->IsIntegralConst(0))
{
offsetReg = genConsumeReg(offsetNode);
// We will use a temp register for the offset*scale+effectiveIndex computation.
tmpReg = arrOffset->GetSingleTempReg();
}
else
{
assert(offsetNode->isContained());
}
regNumber indexReg = genConsumeReg(indexNode);
// Although arrReg may not be used in the constant-index case, if we have generated
// the value into a register, we must consume it, otherwise we will fail to end the
// live range of the gc ptr.
// TODO-CQ: Currently arrObj will always have a register allocated to it.
// We could avoid allocating a register for it, which would be of value if the arrObj
// is an on-stack lclVar.
regNumber arrReg = REG_NA;
if (arrObj->gtHasReg(compiler))
{
arrReg = genConsumeReg(arrObj);
}
if (!offsetNode->IsIntegralConst(0))
{
assert(tmpReg != REG_NA);
assert(arrReg != REG_NA);
// Evaluate tgtReg = offsetReg*dim_size + indexReg.
// tmpReg is used to load dim_size and the result of the multiplication.
// Note that dim_size will never be negative.
GetEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
compiler->eeGetMDArrayLengthOffset(rank, dim));
inst_RV_RV(INS_imul, tmpReg, offsetReg);
if (tmpReg == tgtReg)
{
inst_RV_RV(INS_add, tmpReg, indexReg);
}
else
{
inst_Mov(TYP_I_IMPL, tgtReg, indexReg, /* canSkip */ true);
inst_RV_RV(INS_add, tgtReg, tmpReg);
}
}
else
{
inst_Mov(TYP_INT, tgtReg, indexReg, /* canSkip */ true);
}
genProduceReg(arrOffset);
}
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
// Operations on SIMD vectors shouldn't come this path
assert(!varTypeIsSIMD(type));
if (varTypeIsFloating(type))
{
return ins_MathOp(oper, type);
}
switch (oper)
{
case GT_ADD:
ins = INS_add;
break;
case GT_AND:
ins = INS_and;
break;
case GT_LSH:
ins = INS_shl;
break;
case GT_MUL:
ins = INS_imul;
break;
case GT_NEG:
ins = INS_neg;
break;
case GT_NOT:
ins = INS_not;
break;
case GT_OR:
ins = INS_or;
break;
case GT_ROL:
ins = INS_rol;
break;
case GT_ROR:
ins = INS_ror;
break;
case GT_RSH:
ins = INS_sar;
break;
case GT_RSZ:
ins = INS_shr;
break;
case GT_SUB:
ins = INS_sub;
break;
case GT_XOR:
ins = INS_xor;
break;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
ins = INS_add;
break;
case GT_ADD_HI:
ins = INS_adc;
break;
case GT_SUB_LO:
ins = INS_sub;
break;
case GT_SUB_HI:
ins = INS_sbb;
break;
case GT_LSH_HI:
ins = INS_shld;
break;
case GT_RSH_LO:
ins = INS_shrd;
break;
#endif // !defined(TARGET_64BIT)
default:
unreached();
break;
}
return ins;
}
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is either a contained constant or
// it's a register-allocated expression. If it is in a register that is
// not RCX, it will be moved to RCX (so RCX better not be in use!).
//
void CodeGen::genCodeForShift(GenTree* tree)
{
// Only the non-RMW case here.
assert(tree->OperIsShiftOrRotate());
assert(tree->AsOp()->gtOp1->isUsedFromReg());
assert(tree->GetRegNum() != REG_NA);
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
GenTree* operand = tree->gtGetOp1();
regNumber operandReg = operand->GetRegNum();
GenTree* shiftBy = tree->gtGetOp2();
if (shiftBy->isContainedIntOrIImmed())
{
emitAttr size = emitTypeSize(tree);
// Optimize "X<<1" to "lea [reg+reg]" or "add reg, reg"
if (tree->OperIs(GT_LSH) && !tree->gtOverflowEx() && !tree->gtSetFlags() && shiftBy->IsIntegralConst(1))
{
if (tree->GetRegNum() == operandReg)
{
GetEmitter()->emitIns_R_R(INS_add, size, tree->GetRegNum(), operandReg);
}
else
{
GetEmitter()->emitIns_R_ARX(INS_lea, size, tree->GetRegNum(), operandReg, operandReg, 1, 0);
}
}
else
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
#if defined(TARGET_64BIT)
// Try to emit rorx if BMI2 is available instead of mov+rol
// it makes sense only for 64bit integers
if ((genActualType(targetType) == TYP_LONG) && (tree->GetRegNum() != operandReg) &&
compiler->compOpportunisticallyDependsOn(InstructionSet_BMI2) && tree->OperIs(GT_ROL, GT_ROR) &&
(shiftByValue > 0) && (shiftByValue < 64))
{
const int value = tree->OperIs(GT_ROL) ? (64 - shiftByValue) : shiftByValue;
GetEmitter()->emitIns_R_R_I(INS_rorx, size, tree->GetRegNum(), operandReg, value);
genProduceReg(tree);
return;
}
#endif
// First, move the operand to the destination register and
// later on perform the shift in-place.
// (LSRA will try to avoid this situation through preferencing.)
inst_Mov(targetType, tree->GetRegNum(), operandReg, /* canSkip */ true);
inst_RV_SH(ins, size, tree->GetRegNum(), shiftByValue);
}
}
else
{
// We must have the number of bits to shift stored in ECX, since we constrained this node to
// sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
// register destination requirement.
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The operand to be shifted must not be in ECX
noway_assert(operandReg != REG_RCX);
inst_Mov(targetType, tree->GetRegNum(), operandReg, /* canSkip */ true);
inst_RV(ins, tree->GetRegNum(), targetType);
}
genProduceReg(tree);
}
#ifdef TARGET_X86
//------------------------------------------------------------------------
// genCodeForShiftLong: Generates the code sequence for a GenTree node that
// represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
// b) The shift-by-amount in tree->AsOp()->gtOp2 is a contained constant
//
// TODO-X86-CQ: This only handles the case where the operand being shifted is in a register. We don't
// need sourceHi to be always in reg in case of GT_LSH_HI (because it could be moved from memory to
// targetReg if sourceHi is a memory operand). Similarly for GT_RSH_LO, sourceLo could be marked as
// contained memory-op. Even if not a memory-op, we could mark it as reg-optional.
//
void CodeGen::genCodeForShiftLong(GenTree* tree)
{
// Only the non-RMW case here.
genTreeOps oper = tree->OperGet();
assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
GenTree* operand = tree->AsOp()->gtOp1;
assert(operand->OperGet() == GT_LONG);
assert(operand->AsOp()->gtOp1->isUsedFromReg());
assert(operand->AsOp()->gtOp2->isUsedFromReg());
GenTree* operandLo = operand->gtGetOp1();
GenTree* operandHi = operand->gtGetOp2();
regNumber regLo = operandLo->GetRegNum();
regNumber regHi = operandHi->GetRegNum();
genConsumeOperands(tree->AsOp());
var_types targetType = tree->TypeGet();
instruction ins = genGetInsForOper(oper, targetType);
GenTree* shiftBy = tree->gtGetOp2();
assert(shiftBy->isContainedIntOrIImmed());
unsigned int count = (unsigned int)shiftBy->AsIntConCommon()->IconValue();
regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
inst_Mov(targetType, tree->GetRegNum(), regResult, /* canSkip */ true);
if (oper == GT_LSH_HI)
{
inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->GetRegNum(), regLo, count);
}
else
{
assert(oper == GT_RSH_LO);
inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->GetRegNum(), regHi, count);
}
genProduceReg(tree);
}
#endif
//------------------------------------------------------------------------
// genMapShiftInsToShiftByConstantIns: Given a general shift/rotate instruction,
// map it to the specific x86/x64 shift opcode for a shift/rotate by a constant.
// X86/x64 has a special encoding for shift/rotate-by-constant-1.
//
// Arguments:
// ins: the base shift/rotate instruction
// shiftByValue: the constant value by which we are shifting/rotating
//
instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue)
{
assert(ins == INS_rcl || ins == INS_rcr || ins == INS_rol || ins == INS_ror || ins == INS_shl || ins == INS_shr ||
ins == INS_sar);
// Which format should we use?
instruction shiftByConstantIns;
if (shiftByValue == 1)
{
// Use the shift-by-one format.
assert(INS_rcl + 1 == INS_rcl_1);
assert(INS_rcr + 1 == INS_rcr_1);
assert(INS_rol + 1 == INS_rol_1);
assert(INS_ror + 1 == INS_ror_1);
assert(INS_shl + 1 == INS_shl_1);
assert(INS_shr + 1 == INS_shr_1);
assert(INS_sar + 1 == INS_sar_1);
shiftByConstantIns = (instruction)(ins + 1);
}
else
{
// Use the shift-by-NNN format.
assert(INS_rcl + 2 == INS_rcl_N);
assert(INS_rcr + 2 == INS_rcr_N);
assert(INS_rol + 2 == INS_rol_N);
assert(INS_ror + 2 == INS_ror_N);
assert(INS_shl + 2 == INS_shl_N);
assert(INS_shr + 2 == INS_shr_N);
assert(INS_sar + 2 == INS_sar_N);
shiftByConstantIns = (instruction)(ins + 2);
}
return shiftByConstantIns;
}
//------------------------------------------------------------------------
// genCodeForShiftRMW: Generates the code sequence for a GT_STOREIND GenTree node that
// represents a RMW bit shift or rotate operation (<<, >>, >>>, rol, ror), for example:
// GT_STOREIND( AddressTree, GT_SHL( Ind ( AddressTree ), Operand ) )
//
// Arguments:
// storeIndNode: the GT_STOREIND node.
//
void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
{
GenTree* data = storeInd->Data();
assert(data->OperIsShift() || data->OperIsRotate());
// This function only handles the RMW case.
assert(data->AsOp()->gtOp1->isUsedFromMemory());
assert(data->AsOp()->gtOp1->isIndir());
assert(Lowering::IndirsAreEquivalent(data->AsOp()->gtOp1, storeInd));
assert(data->GetRegNum() == REG_NA);
var_types targetType = data->TypeGet();
genTreeOps oper = data->OperGet();
instruction ins = genGetInsForOper(oper, targetType);
emitAttr attr = EA_ATTR(genTypeSize(targetType));
GenTree* shiftBy = data->AsOp()->gtOp2;
if (shiftBy->isContainedIntOrIImmed())
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
if (shiftByValue == 1)
{
// There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
else
{
GetEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
}
}
else
{
// We must have the number of bits to shift stored in ECX, since we constrained this node to
// sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
// register destination requirement.
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The shiftBy operand is implicit, so call the unary version of emitInsRMW.
GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
}
//------------------------------------------------------------------------
// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
//
// Arguments:
// tree - the node.
//
void CodeGen::genCodeForLclAddr(GenTree* tree)
{
assert(tree->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
// Address of a local var.
noway_assert((targetType == TYP_BYREF) || (targetType == TYP_I_IMPL));
emitAttr size = emitTypeSize(targetType);
inst_RV_TT(INS_lea, targetReg, tree, 0, size);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclFld: Produce code for a GT_LCL_FLD node.
//
// Arguments:
// tree - the GT_LCL_FLD node
//
void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_LCL_FLD));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
noway_assert(targetReg != REG_NA);
#ifdef FEATURE_SIMD
// Loading of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genLoadLclTypeSIMD12(tree);
return;
}
#endif
noway_assert(targetType != TYP_STRUCT);
emitAttr size = emitTypeSize(targetType);
unsigned offs = tree->GetLclOffs();
unsigned varNum = tree->GetLclNum();
assert(varNum < compiler->lvaCount);
GetEmitter()->emitIns_R_S(ins_Load(targetType), size, targetReg, varNum, offs);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclVar: Produce code for a GT_LCL_VAR node.
//
// Arguments:
// tree - the GT_LCL_VAR node
//
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
assert(tree->OperIs(GT_LCL_VAR));
// lcl_vars are not defs
assert((tree->gtFlags & GTF_VAR_DEF) == 0);
LclVarDsc* varDsc = compiler->lvaGetDesc(tree);
bool isRegCandidate = varDsc->lvIsRegCandidate();
// If this is a register candidate that has been spilled, genConsumeReg() will
// reload it at the point of use. Otherwise, if it's not in a register, we load it here.
if (!isRegCandidate && !tree->IsMultiReg() && !(tree->gtFlags & GTF_SPILLED))
{
#if defined(FEATURE_SIMD) && defined(TARGET_X86)
// Loading of TYP_SIMD12 (i.e. Vector3) variable
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadLclTypeSIMD12(tree);
return;
}
#endif // defined(FEATURE_SIMD) && defined(TARGET_X86)
var_types type = varDsc->GetRegisterType(tree);
GetEmitter()->emitIns_R_S(ins_Load(type, compiler->isSIMDTypeLocalAligned(tree->GetLclNum())),
emitTypeSize(type), tree->GetRegNum(), tree->GetLclNum(), 0);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
//
// Arguments:
// tree - the GT_STORE_LCL_FLD node
//
void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_STORE_LCL_FLD));
var_types targetType = tree->TypeGet();
GenTree* op1 = tree->gtGetOp1();
noway_assert(targetType != TYP_STRUCT);
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
assert(varTypeUsesFloatReg(targetType) == varTypeUsesFloatReg(op1));
assert(genTypeSize(genActualType(targetType)) == genTypeSize(genActualType(op1->TypeGet())));
genConsumeRegs(op1);
if (op1->OperIs(GT_BITCAST) && op1->isContained())
{
regNumber targetReg = tree->GetRegNum();
GenTree* bitCastSrc = op1->gtGetOp1();
var_types srcType = bitCastSrc->TypeGet();
noway_assert(!bitCastSrc->isContained());
if (targetReg == REG_NA)
{
unsigned lclNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
GetEmitter()->emitIns_S_R(ins_Store(srcType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), bitCastSrc->GetRegNum(), lclNum, tree->GetLclOffs());
varDsc->SetRegNum(REG_STK);
}
else
{
genBitCast(targetType, targetReg, srcType, bitCastSrc->GetRegNum());
}
}
else
{
GetEmitter()->emitInsBinary(ins_Store(targetType), emitTypeSize(tree), tree, op1);
}
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
}
//------------------------------------------------------------------------
// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
//
// Arguments:
// lclNode - the GT_STORE_LCL_VAR node
//
void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode)
{
assert(lclNode->OperIs(GT_STORE_LCL_VAR));
regNumber targetReg = lclNode->GetRegNum();
emitter* emit = GetEmitter();
GenTree* op1 = lclNode->gtGetOp1();
// Stores from a multi-reg source are handled separately.
if (op1->gtSkipReloadOrCopy()->IsMultiRegNode())
{
genMultiRegStoreToLocal(lclNode);
}
else
{
unsigned lclNum = lclNode->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
var_types targetType = varDsc->GetRegisterType(lclNode);
#ifdef DEBUG
var_types op1Type = op1->TypeGet();
if (op1Type == TYP_STRUCT)
{
assert(op1->IsLocal());
GenTreeLclVar* op1LclVar = op1->AsLclVar();
unsigned op1lclNum = op1LclVar->GetLclNum();
LclVarDsc* op1VarDsc = compiler->lvaGetDesc(op1lclNum);
op1Type = op1VarDsc->GetRegisterType(op1LclVar);
}
assert(varTypeUsesFloatReg(targetType) == varTypeUsesFloatReg(op1Type));
assert(!varTypeUsesFloatReg(targetType) || (emitTypeSize(targetType) == emitTypeSize(op1Type)));
#endif
#if !defined(TARGET_64BIT)
if (targetType == TYP_LONG)
{
genStoreLongLclVar(lclNode);
return;
}
#endif // !defined(TARGET_64BIT)
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(lclNode);
return;
}
#endif // FEATURE_SIMD
genConsumeRegs(op1);
if (op1->OperIs(GT_BITCAST) && op1->isContained())
{
GenTree* bitCastSrc = op1->gtGetOp1();
var_types srcType = bitCastSrc->TypeGet();
noway_assert(!bitCastSrc->isContained());
if (targetReg == REG_NA)
{
emit->emitIns_S_R(ins_Store(srcType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), bitCastSrc->GetRegNum(), lclNum, 0);
genUpdateLife(lclNode);
varDsc->SetRegNum(REG_STK);
}
else
{
genBitCast(targetType, targetReg, srcType, bitCastSrc->GetRegNum());
}
}
else if (targetReg == REG_NA)
{
// stack store
emit->emitInsStoreLcl(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)),
emitTypeSize(targetType), lclNode);
varDsc->SetRegNum(REG_STK);
}
else
{
// Look for the case where we have a constant zero which we've marked for reuse,
// but which isn't actually in the register we want. In that case, it's better to create
// zero in the target register, because an xor is smaller than a copy. Note that we could
// potentially handle this in the register allocator, but we can't always catch it there
// because the target may not have a register allocated for it yet.
if (op1->isUsedFromReg() && (op1->GetRegNum() != targetReg) && (op1->IsIntegralConst(0) || op1->IsFPZero()))
{
op1->SetRegNum(REG_NA);
op1->ResetReuseRegVal();
op1->SetContained();
}
if (!op1->isUsedFromReg())
{
// Currently, we assume that the non-reg source of a GT_STORE_LCL_VAR writing to a register
// must be a constant. However, in the future we might want to support an operand used from
// memory. This is a bit tricky because we have to decide it can be used from memory before
// register allocation,
// and this would be a case where, once that's done, we need to mark that node as always
// requiring a register - which we always assume now anyway, but once we "optimize" that
// we'll have to take cases like this into account.
assert((op1->GetRegNum() == REG_NA) && op1->OperIsConst());
genSetRegToConst(targetReg, targetType, op1);
}
else
{
assert(targetReg == lclNode->GetRegNum());
assert(op1->GetRegNum() != REG_NA);
inst_Mov_Extend(targetType, /* srcInReg */ true, targetReg, op1->GetRegNum(), /* canSkip */ true,
emitTypeSize(targetType));
}
}
if (targetReg != REG_NA)
{
genProduceReg(lclNode);
}
}
}
//------------------------------------------------------------------------
// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node.
//
// Arguments:
// tree - the GT_INDEX_ADDR node
//
void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
{
GenTree* const base = node->Arr();
GenTree* const index = node->Index();
const regNumber baseReg = genConsumeReg(base);
regNumber indexReg = genConsumeReg(index);
const regNumber dstReg = node->GetRegNum();
// NOTE: `genConsumeReg` marks the consumed register as not a GC pointer, as it assumes that the input registers
// die at the first instruction generated by the node. This is not the case for `INDEX_ADDR`, however, as the
// base register is multiply-used. As such, we need to mark the base register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(baseReg, base->TypeGet());
assert(varTypeIsIntegral(index->TypeGet()));
regNumber tmpReg = REG_NA;
#ifdef TARGET_64BIT
tmpReg = node->GetSingleTempReg();
#endif
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
#ifdef TARGET_64BIT
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case that the index
// is a native int on a 64-bit platform, we will need to widen the array length and then compare.
if (index->TypeGet() == TYP_I_IMPL)
{
GetEmitter()->emitIns_R_AR(INS_mov, EA_4BYTE, tmpReg, baseReg, static_cast<int>(node->gtLenOffset));
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, indexReg, tmpReg);
}
else
#endif // TARGET_64BIT
{
GetEmitter()->emitIns_R_AR(INS_cmp, EA_4BYTE, indexReg, baseReg, static_cast<int>(node->gtLenOffset));
}
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
#ifdef TARGET_64BIT
if (index->TypeGet() != TYP_I_IMPL)
{
// LEA needs 64-bit operands so we need to widen the index if it's TYP_INT.
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, tmpReg, indexReg, /* canSkip */ false);
indexReg = tmpReg;
}
#endif // TARGET_64BIT
// Compute the address of the array element.
unsigned scale = node->gtElemSize;
switch (scale)
{
case 1:
case 2:
case 4:
case 8:
tmpReg = indexReg;
break;
default:
#ifdef TARGET_64BIT
// IMUL treats its immediate operand as signed so scale can't be larger than INT32_MAX.
// The VM doesn't allow such large array elements but let's be sure.
noway_assert(scale <= INT32_MAX);
#else // !TARGET_64BIT
tmpReg = node->GetSingleTempReg();
#endif // !TARGET_64BIT
GetEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg,
static_cast<ssize_t>(scale));
scale = 1;
break;
}
GetEmitter()->emitIns_R_ARX(INS_lea, emitTypeSize(node->TypeGet()), dstReg, baseReg, tmpReg, scale,
static_cast<int>(node->gtElemOffset));
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForIndir: Produce code for a GT_IND node.
//
// Arguments:
// tree - the GT_IND node
//
void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
#ifdef FEATURE_SIMD
// Handling of Vector3 type values loaded through indirection.
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
var_types targetType = tree->TypeGet();
emitter* emit = GetEmitter();
GenTree* addr = tree->Addr();
if (addr->IsCnsIntOrI() && addr->IsIconHandle(GTF_ICON_TLS_HDL))
{
noway_assert(EA_ATTR(genTypeSize(targetType)) == EA_PTRSIZE);
emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tree->GetRegNum(), FLD_GLOBAL_FS,
(int)addr->AsIntCon()->gtIconVal);
}
else
{
genConsumeAddress(addr);
emit->emitInsLoadInd(ins_Load(targetType), emitTypeSize(tree), tree->GetRegNum(), tree);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForStoreInd: Produce code for a GT_STOREIND node.
//
// Arguments:
// tree - the GT_STOREIND node
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
assert(tree->OperIs(GT_STOREIND));
#ifdef FEATURE_SIMD
// Storing Vector3 of size 12 bytes through indirection
if (tree->TypeGet() == TYP_SIMD12)
{
genStoreIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
GenTree* data = tree->Data();
GenTree* addr = tree->Addr();
var_types targetType = tree->TypeGet();
assert(!varTypeIsFloating(targetType) || (genTypeSize(targetType) == genTypeSize(data->TypeGet())));
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
// data and addr must be in registers.
// Consume both registers so that any copies of interfering registers are taken care of.
genConsumeOperands(tree);
if (genEmitOptimizedGCWriteBarrier(writeBarrierForm, addr, data))
{
return;
}
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_ARG_0, as that is where 'addr' must go.
noway_assert(data->GetRegNum() != REG_ARG_0);
// addr goes in REG_ARG_0
genCopyRegIfNeeded(addr, REG_ARG_0);
// data goes in REG_ARG_1
genCopyRegIfNeeded(data, REG_ARG_1);
genGCWriteBarrier(tree, writeBarrierForm);
}
else
{
bool dataIsUnary = false;
bool isRMWMemoryOp = tree->IsRMWMemoryOp();
GenTree* rmwSrc = nullptr;
// We must consume the operands in the proper execution order, so that liveness is
// updated appropriately.
genConsumeAddress(addr);
// If tree represents a RMW memory op then its data is a non-leaf node marked as contained
// and non-indir operand of data is the source of RMW memory op.
if (isRMWMemoryOp)
{
assert(data->isContained() && !data->OperIsLeaf());
GenTree* rmwDst = nullptr;
dataIsUnary = (GenTree::OperIsUnary(data->OperGet()) != 0);
if (!dataIsUnary)
{
if (tree->IsRMWDstOp1())
{
rmwDst = data->gtGetOp1();
rmwSrc = data->gtGetOp2();
}
else
{
assert(tree->IsRMWDstOp2());
rmwDst = data->gtGetOp2();
rmwSrc = data->gtGetOp1();
}
genConsumeRegs(rmwSrc);
}
else
{
// *(p) = oper *(p): Here addr = p, rmwsrc=rmwDst = *(p) i.e. GT_IND(p)
// For unary RMW ops, src and dst of RMW memory op is the same. Lower
// clears operand counts on rmwSrc and we don't need to perform a
// genConsumeReg() on it.
assert(tree->IsRMWDstOp1());
rmwSrc = data->gtGetOp1();
rmwDst = data->gtGetOp1();
assert(rmwSrc->isUsedFromMemory());
}
assert(rmwSrc != nullptr);
assert(rmwDst != nullptr);
assert(Lowering::IndirsAreEquivalent(rmwDst, tree));
}
else
{
genConsumeRegs(data);
}
if (isRMWMemoryOp)
{
if (dataIsUnary)
{
// generate code for unary RMW memory ops like neg/not
GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree), tree);
}
else
{
if (data->OperIsShiftOrRotate())
{
// Generate code for shift RMW memory ops.
// The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] =
// <amount> <shift> [addr]).
assert(tree->IsRMWDstOp1());
assert(rmwSrc == data->gtGetOp2());
genCodeForShiftRMW(tree);
}
else if (data->OperGet() == GT_ADD && (rmwSrc->IsIntegralConst(1) || rmwSrc->IsIntegralConst(-1)))
{
// Generate "inc/dec [mem]" instead of "add/sub [mem], 1".
//
// Notes:
// 1) Global morph transforms GT_SUB(x, +/-1) into GT_ADD(x, -/+1).
// 2) TODO-AMD64: Debugger routine NativeWalker::Decode() runs into
// an assert while decoding ModR/M byte of "inc dword ptr [rax]".
// It is not clear whether Decode() can handle all possible
// addr modes with inc/dec. For this reason, inc/dec [mem]
// is not generated while generating debuggable code. Update
// the above if condition once Decode() routine is fixed.
assert(rmwSrc->isContainedIntOrIImmed());
instruction ins = rmwSrc->IsIntegralConst(1) ? INS_inc : INS_dec;
GetEmitter()->emitInsRMW(ins, emitTypeSize(tree), tree);
}
else
{
// generate code for remaining binary RMW memory ops like add/sub/and/or/xor
GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree),
tree, rmwSrc);
}
}
}
else
{
GetEmitter()->emitInsStoreInd(ins_Store(data->TypeGet()), emitTypeSize(tree), tree);
}
}
}
//------------------------------------------------------------------------
// genCodeForSwap: Produce code for a GT_SWAP node.
//
// Arguments:
// tree - the GT_SWAP node
//
void CodeGen::genCodeForSwap(GenTreeOp* tree)
{
assert(tree->OperIs(GT_SWAP));
// Swap is only supported for lclVar operands that are enregistered
// We do not consume or produce any registers. Both operands remain enregistered.
// However, the gc-ness may change.
assert(genIsRegCandidateLocal(tree->gtOp1) && genIsRegCandidateLocal(tree->gtOp2));
GenTreeLclVarCommon* lcl1 = tree->gtOp1->AsLclVarCommon();
LclVarDsc* varDsc1 = compiler->lvaGetDesc(lcl1);
var_types type1 = varDsc1->TypeGet();
GenTreeLclVarCommon* lcl2 = tree->gtOp2->AsLclVarCommon();
LclVarDsc* varDsc2 = compiler->lvaGetDesc(lcl2);
var_types type2 = varDsc2->TypeGet();
// We must have both int or both fp regs
assert(!varTypeUsesFloatReg(type1) || varTypeUsesFloatReg(type2));
// FP swap is not yet implemented (and should have NYI'd in LSRA)
assert(!varTypeUsesFloatReg(type1));
regNumber oldOp1Reg = lcl1->GetRegNum();
regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
regNumber oldOp2Reg = lcl2->GetRegNum();
regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
// We don't call genUpdateVarReg because we don't have a tree node with the new register.
varDsc1->SetRegNum(oldOp2Reg);
varDsc2->SetRegNum(oldOp1Reg);
// Do the xchg
emitAttr size = EA_PTRSIZE;
if (varTypeGCtype(type1) != varTypeGCtype(type2))
{
// If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
// Otherwise it will leave them alone, which is correct if they have the same GC-ness.
size = EA_GCREF;
}
inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
// Update the gcInfo.
// Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
// gcMarkRegPtrVal will do the appropriate thing for non-gc types.
// It will also dump the updates.
gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
}
//------------------------------------------------------------------------
// genEmitOptimizedGCWriteBarrier: Generate write barrier store using the optimized
// helper functions.
//
// Arguments:
// writeBarrierForm - the write barrier form to use
// addr - the address at which to do the store
// data - the data to store
//
// Return Value:
// true if an optimized write barrier form was used, false if not. If this
// function returns false, the caller must emit a "standard" write barrier.
bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data)
{
assert(writeBarrierForm != GCInfo::WBF_NoBarrier);
#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS
if (!genUseOptimizedWriteBarriers(writeBarrierForm))
{
return false;
}
const static int regToHelper[2][8] = {
// If the target is known to be in managed memory
{
CORINFO_HELP_ASSIGN_REF_EAX, // EAX
CORINFO_HELP_ASSIGN_REF_ECX, // ECX
-1, // EDX (always the target address)
CORINFO_HELP_ASSIGN_REF_EBX, // EBX
-1, // ESP
CORINFO_HELP_ASSIGN_REF_EBP, // EBP
CORINFO_HELP_ASSIGN_REF_ESI, // ESI
CORINFO_HELP_ASSIGN_REF_EDI, // EDI
},
// Don't know if the target is in managed memory
{
CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, // EAX
CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, // ECX
-1, // EDX (always the target address)
CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, // EBX
-1, // ESP
CORINFO_HELP_CHECKED_ASSIGN_REF_EBP, // EBP
CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, // ESI
CORINFO_HELP_CHECKED_ASSIGN_REF_EDI, // EDI
},
};
noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
noway_assert(regToHelper[0][REG_ESP] == -1);
noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
noway_assert(regToHelper[1][REG_ESP] == -1);
noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
regNumber reg = data->GetRegNum();
noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
// Generate the following code:
// lea edx, addr
// call write_barrier_helper_reg
// addr goes in REG_ARG_0
genCopyRegIfNeeded(addr, REG_WRITE_BARRIER);
unsigned tgtAnywhere = 0;
if (writeBarrierForm != GCInfo::WBF_BarrierUnchecked)
{
tgtAnywhere = 1;
}
// We might want to call a modified version of genGCWriteBarrier() to get the benefit of
// the FEATURE_COUNT_GC_WRITE_BARRIERS code there, but that code doesn't look like it works
// with rationalized RyuJIT IR. So, for now, just emit the helper call directly here.
genEmitHelperCall(regToHelper[tgtAnywhere][reg],
0, // argSize
EA_PTRSIZE); // retSize
return true;
#else // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS
return false;
#endif // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS
}
// Produce code for a GT_CALL node
void CodeGen::genCall(GenTreeCall* call)
{
genAlignStackBeforeCall(call);
// all virtuals should have been expanded into a control expression
assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
// Insert a GS check if necessary
if (call->IsTailCallViaJitHelper())
{
if (compiler->getNeedsGSSecurityCookie())
{
#if FEATURE_FIXED_OUT_ARGS
// If either of the conditions below is true, we will need a temporary register in order to perform the GS
// cookie check. When FEATURE_FIXED_OUT_ARGS is disabled, we save and restore the temporary register using
// push/pop. When FEATURE_FIXED_OUT_ARGS is enabled, however, we need an alternative solution. For now,
// though, the tail prefix is ignored on all platforms that use fixed out args, so we should never hit this
// case.
assert(compiler->gsGlobalSecurityCookieAddr == nullptr);
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
#endif
genEmitGSCookieCheck(true);
}
}
// Consume all the arg regs
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* argNode = use.GetNode();
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
assert(curArgTabEntry);
if (curArgTabEntry->GetRegNum() == REG_STK)
{
continue;
}
#ifdef UNIX_AMD64_ABI
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
unsigned regIndex = 0;
for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses())
{
GenTree* putArgRegNode = use.GetNode();
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
regNumber argReg = curArgTabEntry->GetRegNum(regIndex++);
genConsumeReg(putArgRegNode);
// Validate the putArgRegNode has the right type.
assert(varTypeUsesFloatReg(putArgRegNode->TypeGet()) == genIsValidFloatReg(argReg));
inst_Mov_Extend(putArgRegNode->TypeGet(), /* srcInReg */ false, argReg, putArgRegNode->GetRegNum(),
/* canSkip */ true, emitActualTypeSize(TYP_I_IMPL));
}
}
else
#endif // UNIX_AMD64_ABI
{
regNumber argReg = curArgTabEntry->GetRegNum();
genConsumeReg(argNode);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ false, argReg, argNode->GetRegNum(), /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
// In the case of a varargs call,
// the ABI dictates that if we have floating point args,
// we must pass the enregistered arguments in both the
// integer and floating point registers so, let's do that.
if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode))
{
regNumber srcReg = argNode->GetRegNum();
regNumber targetReg = compiler->getCallArgIntRegister(argNode->GetRegNum());
inst_Mov(TYP_LONG, targetReg, srcReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL));
}
}
#if defined(TARGET_X86) || defined(UNIX_AMD64_ABI)
// The call will pop its arguments.
// for each putarg_stk:
target_ssize_t stackArgBytes = 0;
for (GenTreeCall::Use& use : call->Args())
{
GenTree* arg = use.GetNode();
if (arg->OperIs(GT_PUTARG_STK) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
GenTree* source = arg->AsPutArgStk()->gtGetOp1();
unsigned size = arg->AsPutArgStk()->GetStackByteSize();
stackArgBytes += size;
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
assert(curArgTabEntry != nullptr);
assert(size == (curArgTabEntry->numSlots * TARGET_POINTER_SIZE));
#ifdef FEATURE_PUT_STRUCT_ARG_STK
if (!source->OperIs(GT_FIELD_LIST) && (source->TypeGet() == TYP_STRUCT))
{
GenTreeObj* obj = source->AsObj();
unsigned argBytes = roundUp(obj->GetLayout()->GetSize(), TARGET_POINTER_SIZE);
#ifdef TARGET_X86
// If we have an OBJ, we must have created a copy if the original arg was not a
// local and was not a multiple of TARGET_POINTER_SIZE.
// Note that on x64/ux this will be handled by unrolling in genStructPutArgUnroll.
assert((argBytes == obj->GetLayout()->GetSize()) || obj->Addr()->IsLocalAddrExpr());
#endif // TARGET_X86
assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes);
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
#endif // DEBUG
}
}
#endif // defined(TARGET_X86) || defined(UNIX_AMD64_ABI)
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
GetEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
}
// If fast tail call, then we are done here, we just have to load the call
// target into the right registers. We ensure in RA that the registers used
// for the target (e.g. contained indir) are loaded into volatile registers
// that won't be restored by epilog sequence.
if (call->IsFastTailCall())
{
GenTree* target = getCallTarget(call, nullptr);
if (target != nullptr)
{
if (target->isContainedIndir())
{
genConsumeAddress(target->AsIndir()->Addr());
}
else
{
assert(!target->isContained());
genConsumeReg(target);
}
}
return;
}
// For a pinvoke to unmanged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
if (compiler->killGCRefs(call))
{
genDefineTempLabel(genCreateTempLabel());
}
#if defined(DEBUG) && defined(TARGET_X86)
// Store the stack pointer so we can check it after the call.
if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC)
{
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvOnFrame);
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
#endif // defined(DEBUG) && defined(TARGET_X86)
// When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
// if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
// transition penalty, assuming the user function contains legacy SSE instruction.
// To limit code size increase impact: we only issue VZEROUPPER before PInvoke call, not issue
// VZEROUPPER after PInvoke call because transition penalty from legacy SSE to AVX only happens
// when there's preceding 256-bit AVX to legacy SSE transition penalty.
if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && GetEmitter()->Contains256bitAVX())
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
}
genCallInstruction(call X86_ARG(stackArgBytes));
// for pinvoke/intrinsic/tailcalls we may have needed to get the address of
// a label. In case it is indirect with CFG enabled make sure we do not get
// the address after the validation but only after the actual call that
// comes after.
if (genPendingCallLabel && !call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
genDefineInlineTempLabel(genPendingCallLabel);
genPendingCallLabel = nullptr;
}
#ifdef DEBUG
// We should not have GC pointers in killed registers live around the call.
// GC info for arg registers were cleared when consuming arg nodes above
// and LSRA should ensure it for other trashed registers.
regMaskTP killMask = RBM_CALLEE_TRASH;
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
assert((gcInfo.gcRegGCrefSetCur & killMask) == 0);
assert((gcInfo.gcRegByrefSetCur & killMask) == 0);
#endif
var_types returnType = call->TypeGet();
if (returnType != TYP_VOID)
{
#ifdef TARGET_X86
if (varTypeIsFloating(returnType))
{
// Spill the value from the fp stack.
// Then, load it into the target register.
call->gtFlags |= GTF_SPILL;
regSet.rsSpillFPStack(call);
call->gtFlags |= GTF_SPILLED;
call->gtFlags &= ~GTF_SPILL;
}
else
#endif // TARGET_X86
{
regNumber returnReg;
if (call->HasMultiRegRetVal())
{
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
assert(retTypeDesc != nullptr);
const unsigned regCount = retTypeDesc->GetReturnRegCount();
// If regs allocated to call node are different from ABI return
// regs in which the call has returned its result, move the result
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
var_types regType = retTypeDesc->GetReturnRegType(i);
returnReg = retTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
inst_Mov(regType, allocatedReg, returnReg, /* canSkip */ true);
}
#ifdef FEATURE_SIMD
// A Vector3 return value is stored in xmm0 and xmm1.
// RyuJIT assumes that the upper unused bits of xmm1 are cleared but
// the native compiler doesn't guarantee it.
if (call->IsUnmanaged() && (returnType == TYP_SIMD12))
{
returnReg = retTypeDesc->GetABIReturnReg(1);
// Clear the upper 32 bits by two shift instructions.
// retReg = retReg << 96
// retReg = retReg >> 96
GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
}
#endif // FEATURE_SIMD
}
else
{
#ifdef TARGET_X86
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
// The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
// TCB in REG_PINVOKE_TCB. AMD64/ARM64 use the standard calling convention. fgMorphCall() sets the
// correct argument registers.
returnReg = REG_PINVOKE_TCB;
}
else
#endif // TARGET_X86
if (varTypeIsFloating(returnType))
{
returnReg = REG_FLOATRET;
}
else
{
returnReg = REG_INTRET;
}
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ true);
}
genProduceReg(call);
}
}
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
if ((call->gtNext == nullptr) && compiler->opts.OptimizationEnabled())
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
#if defined(DEBUG) && defined(TARGET_X86)
if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC)
{
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvDoNotEnregister &&
compiler->lvaGetDesc(compiler->lvaCallSpCheck)->lvOnFrame);
if (!call->CallerPop() && (stackArgBytes != 0))
{
// ECX is trashed, so can be used to compute the expected SP. We saved the value of SP
// after pushing all the stack arguments, but the caller popped the arguments, so we need
// to do some math to figure a good comparison.
GetEmitter()->emitIns_Mov(INS_mov, EA_4BYTE, REG_ARG_0, REG_SPBASE, /* canSkip */ false);
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_ARG_0, stackArgBytes);
GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_ARG_0, compiler->lvaCallSpCheck, 0);
}
else
{
GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
BasicBlock* sp_check = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_je, sp_check);
instGen(INS_BREAKPOINT);
genDefineTempLabel(sp_check);
}
#endif // defined(DEBUG) && defined(TARGET_X86)
#if !defined(FEATURE_EH_FUNCLETS)
//-------------------------------------------------------------------------
// Create a label for tracking of region protected by the monitor in synchronized methods.
// This needs to be here, rather than above where fPossibleSyncHelperCall is set,
// so the GC state vars have been updated before creating the label.
if ((call->gtCallType == CT_HELPER) && (compiler->info.compFlags & CORINFO_FLG_SYNCH))
{
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(call->gtCallMethHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
switch (helperNum)
{
case CORINFO_HELP_MON_ENTER:
case CORINFO_HELP_MON_ENTER_STATIC:
noway_assert(compiler->syncStartEmitCookie == NULL);
compiler->syncStartEmitCookie =
GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncStartEmitCookie != NULL);
break;
case CORINFO_HELP_MON_EXIT:
case CORINFO_HELP_MON_EXIT_STATIC:
noway_assert(compiler->syncEndEmitCookie == NULL);
compiler->syncEndEmitCookie =
GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncEndEmitCookie != NULL);
break;
default:
break;
}
}
#endif // !FEATURE_EH_FUNCLETS
unsigned stackAdjustBias = 0;
#if defined(TARGET_X86)
// Is the caller supposed to pop the arguments?
if (call->CallerPop() && (stackArgBytes != 0))
{
stackAdjustBias = stackArgBytes;
}
SubtractStackLevel(stackArgBytes);
#endif // TARGET_X86
genRemoveAlignmentAfterCall(call, stackAdjustBias);
}
//------------------------------------------------------------------------
// genCallInstruction - Generate instructions necessary to transfer control to the call.
//
// Arguments:
// call - the GT_CALL node
//
// Remaks:
// For tailcalls this function will generate a jump.
//
void CodeGen::genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes))
{
#if defined(TARGET_X86)
// If the callee pops the arguments, we pass a positive value as the argSize, and the emitter will
// adjust its stack level accordingly.
// If the caller needs to explicitly pop its arguments, we must pass a negative value, and then do the
// pop when we're done.
target_ssize_t argSizeForEmitter = stackArgBytes;
if (call->CallerPop())
{
argSizeForEmitter = -stackArgBytes;
}
#endif // defined(TARGET_X86)
// Determine return value size(s).
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
emitAttr retSize = EA_PTRSIZE;
emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(retTypeDesc->GetReturnRegType(1));
}
else
{
assert(!varTypeIsStruct(call));
if (call->gtType == TYP_REF)
{
retSize = EA_GCREF;
}
else if (call->gtType == TYP_BYREF)
{
retSize = EA_BYREF;
}
}
// We need to propagate the IL offset information to the call instruction, so we can emit
// an IL to native mapping record for the call, to support managed return value debugging.
// We don't want tail call helper calls that were converted from normal calls to get a record,
// so we skip this hash table lookup logic in that case.
DebugInfo di;
if (compiler->opts.compDbgInfo && compiler->genCallSite2DebugInfoMap != nullptr && !call->IsTailCall())
{
(void)compiler->genCallSite2DebugInfoMap->Lookup(call, &di);
}
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
// native call sites with the signatures they were generated from.
if (call->gtCallType != CT_HELPER)
{
sigInfo = call->callSig;
}
#endif // DEBUG
CORINFO_METHOD_HANDLE methHnd;
GenTree* target = getCallTarget(call, &methHnd);
if (target != nullptr)
{
#ifdef TARGET_X86
if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT))
{
// On x86, we need to generate a very specific pattern for indirect VSD calls:
//
// 3-byte nop
// call dword ptr [eax]
//
// Where EAX is also used as an argument to the stub dispatch helper. Make
// sure that the call target address is computed into EAX in this case.
assert(compiler->virtualStubParamInfo->GetReg() == REG_VIRTUAL_STUB_TARGET);
assert(target->isContainedIndir());
assert(target->OperGet() == GT_IND);
GenTree* addr = target->AsIndir()->Addr();
assert(addr->isUsedFromReg());
genConsumeReg(addr);
genCopyRegIfNeeded(addr, REG_VIRTUAL_STUB_TARGET);
GetEmitter()->emitIns_Nop(3);
// clang-format off
GetEmitter()->emitIns_Call(emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
argSizeForEmitter,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, REG_VIRTUAL_STUB_TARGET, REG_NA, 1, 0);
// clang-format on
}
else
#endif
if (target->isContainedIndir())
{
// When CFG is enabled we should not be emitting any non-register indirect calls.
assert(!compiler->opts.IsCFGEnabled() ||
call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL) ||
call->IsHelperCall(compiler, CORINFO_HELP_DISPATCH_INDIRECT_CALL));
if (target->AsIndir()->HasBase() && target->AsIndir()->Base()->isContainedIntOrIImmed())
{
// Note that if gtControlExpr is an indir of an absolute address, we mark it as
// contained only if it can be encoded as PC-relative offset.
assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
(void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
else
{
// For fast tailcalls this is happening in epilog, so we should
// have already consumed target in genCall.
if (!call->IsFastTailCall())
{
genConsumeAddress(target->AsIndir()->Addr());
}
// clang-format off
genEmitCallIndir(emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
target->AsIndir()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
call->IsFastTailCall());
// clang-format on
}
}
else
{
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
assert(genIsValidIntReg(target->GetRegNum()));
// For fast tailcalls this is happening in epilog, so we should
// have already consumed target in genCall.
if (!call->IsFastTailCall())
{
genConsumeReg(target);
}
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr // addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
target->GetRegNum(),
call->IsFastTailCall());
// clang-format on
}
}
else
{
// If we have no target and this is a call with indirection cell
// then emit call through that indir cell. This means we generate e.g.
// lea r11, [addr of cell]
// call [r11]
// which is more efficent than
// lea r11, [addr of cell]
// call [addr of cell]
regNumber indirCellReg = getCallIndirectionCellReg(call);
if (indirCellReg != REG_NA)
{
// clang-format off
GetEmitter()->emitIns_Call(
emitter::EC_INDIR_ARD,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
0,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
di, indirCellReg, REG_NA, 0, 0,
call->IsFastTailCall());
// clang-format on
}
#ifdef FEATURE_READYTORUN
else if (call->gtEntryPoint.addr != nullptr)
{
emitter::EmitCallType type =
(call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN : emitter::EC_FUNC_TOKEN_INDIR;
// clang-format off
genEmitCall(type,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
(void*)call->gtEntryPoint.addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
#endif
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(call->gtCallType == CT_HELPER || call->gtCallType == CT_USER_FUNC);
void* addr = nullptr;
if (call->gtCallType == CT_HELPER)
{
// Direct call to a helper method.
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
void* pAddr = nullptr;
addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
assert(pAddr == nullptr);
}
else
{
// Direct call to a non-virtual user function.
addr = call->gtDirectCallAddress;
}
assert(addr != nullptr);
// Non-virtual direct calls to known addresses
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
}
}
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
void CodeGen::genJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
LclVarDsc* varDsc;
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
// We are not strictly required to spill reg args that are not in the desired reg for a jmp call
// But that would require us to deal with circularity while moving values around. Spilling
// to stack makes the implementation simple, which is not a bad trade off given Jmp calls
// are not frequent.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg && (varDsc->GetRegNum() != REG_STK))
{
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
// If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->GetRegNum() == varDsc->GetArgReg()))
{
continue;
}
}
else if (varDsc->GetRegNum() == REG_STK)
{
// Skip args which are currently living in stack.
continue;
}
// If we came here it means either a reg argument not in the right register or
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->GetRegNum() != REG_STK);
assert(!varDsc->lvIsStructField || (compiler->lvaGetDesc(varDsc->lvParentLcl)->lvFieldCnt == 1));
var_types storeType = varDsc->GetActualRegisterType(); // We own the memory and can use the full move.
GetEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), varDsc->GetRegNum(), varNum, 0);
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be expecting it.
// Therefore manually update life of varDsc->GetRegNum().
regMaskTP tempMask = varDsc->lvRegMask();
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
if (compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
}
#endif // DEBUG
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif
// Next move any un-enregistered register arguments back to their register.
regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
if (!varDsc->lvIsRegArg)
{
continue;
}
#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
assert(structDesc.passedInRegisters);
unsigned __int8 offset0 = 0;
unsigned __int8 offset1 = 0;
var_types type0 = TYP_UNKNOWN;
var_types type1 = TYP_UNKNOWN;
// Get the eightbyte data
compiler->GetStructTypeOffset(structDesc, &type0, &type1, &offset0, &offset1);
// Move the values into the right registers.
//
// Update varDsc->GetArgReg() and lvOtherArgReg life and GC Info to indicate varDsc stack slot is dead and
// argReg is going live. Note that we cannot modify varDsc->GetRegNum() and lvOtherArgReg here
// because another basic block may not be expecting it.
// Therefore manually update life of argReg. Note that GT_JMP marks
// the end of the basic block and after which reg life and gc info will be recomputed for the new block in
// genCodeForBBList().
if (type0 != TYP_UNKNOWN)
{
GetEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->GetArgReg(), varNum, offset0);
regSet.SetMaskVars(regSet.GetMaskVars() | genRegMask(varDsc->GetArgReg()));
gcInfo.gcMarkRegPtrVal(varDsc->GetArgReg(), type0);
}
if (type1 != TYP_UNKNOWN)
{
GetEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->GetOtherArgReg(), varNum,
offset1);
regSet.SetMaskVars(regSet.GetMaskVars() | genRegMask(varDsc->GetOtherArgReg()));
gcInfo.gcMarkRegPtrVal(varDsc->GetOtherArgReg(), type1);
}
if (varDsc->lvTracked)
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
else
#endif // !defined(UNIX_AMD64_ABI)
{
// Register argument
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
noway_assert(
isRegParamType(genActualType(varDsc->TypeGet())) ||
(varTypeIsStruct(varDsc->TypeGet()) && compiler->isTrivialPointerSizedStruct(varDsc->GetStructHnd())));
#else
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
#endif // TARGET_X86
// Is register argument already in the right register?
// If not load it from its stack location.
var_types loadType = varDsc->GetRegisterType();
#ifdef TARGET_X86
if (varTypeIsStruct(varDsc->TypeGet()))
{
// Treat trivial pointer-sized structs as a pointer sized primitive
// for the purposes of registers.
loadType = TYP_I_IMPL;
}
#endif
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varDsc->GetRegNum() != argReg)
{
assert(genIsValidReg(argReg));
GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be
// expecting it. Therefore manually update life of argReg. Note that GT_JMP marks the end of the
// basic block and after which reg life and gc info will be recomputed for the new block in
// genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (compiler->lvaIsGCTracked(varDsc))
{
#ifdef DEBUG
if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming dead\n", varNum);
}
else
{
JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing dead\n", varNum);
}
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
#if defined(TARGET_AMD64)
// In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg
// register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to
// be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point
// values on the stack.
if (compFeatureVarArg() && compiler->info.compIsVarArgs)
{
regNumber intArgReg;
var_types loadType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varTypeIsFloating(loadType))
{
intArgReg = compiler->getCallArgIntRegister(argReg);
inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType));
}
else
{
intArgReg = argReg;
}
fixedIntArgMask |= genRegMask(intArgReg);
if (intArgReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
}
}
#endif // TARGET_AMD64
}
#if defined(TARGET_AMD64)
// Jmp call to a vararg method - if the method has fewer than 4 fixed arguments,
// load the remaining arg registers (both int and float) from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
// of caller passing float/double args both in int and float arg regs.
//
// This doesn't apply to x86, which doesn't pass floating point values in floating
// point registers.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
// remaining arg registers from shadow stack slots as non-gc interruptible.
if (compFeatureVarArg() && fixedIntArgMask != RBM_NONE)
{
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
GetEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
// also load it in corresponding float arg reg
regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
inst_Mov(TYP_DOUBLE, floatReg, argReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL));
}
argOffset += REGSIZE_BYTES;
}
GetEmitter()->emitEnableGC();
}
}
#endif // TARGET_AMD64
}
// produce code for a GT_LEA subnode
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
emitAttr size = emitTypeSize(lea);
genConsumeOperands(lea);
if (lea->Base() && lea->Index())
{
regNumber baseReg = lea->Base()->GetRegNum();
regNumber indexReg = lea->Index()->GetRegNum();
GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), baseReg, indexReg, lea->gtScale, lea->Offset());
}
else if (lea->Base())
{
GetEmitter()->emitIns_R_AR(INS_lea, size, lea->GetRegNum(), lea->Base()->GetRegNum(), lea->Offset());
}
else if (lea->Index())
{
GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), REG_NA, lea->Index()->GetRegNum(), lea->gtScale,
lea->Offset());
}
genProduceReg(lea);
}
//------------------------------------------------------------------------
// genCompareFloat: Generate code for comparing two floating point values
//
// Arguments:
// treeNode - the compare tree
//
void CodeGen::genCompareFloat(GenTree* treeNode)
{
assert(treeNode->OperIsCompare());
GenTreeOp* tree = treeNode->AsOp();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
genConsumeOperands(tree);
assert(varTypeIsFloating(op1Type));
assert(op1Type == op2Type);
regNumber targetReg = treeNode->GetRegNum();
instruction ins;
emitAttr cmpAttr;
GenCondition condition = GenCondition::FromFloatRelop(treeNode);
if (condition.PreferSwap())
{
condition = GenCondition::Swap(condition);
std::swap(op1, op2);
}
ins = (op1Type == TYP_FLOAT) ? INS_ucomiss : INS_ucomisd;
cmpAttr = emitTypeSize(op1Type);
GetEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
if ((condition.GetCode() == GenCondition::FNEU) && (op1->GetRegNum() == op2->GetRegNum()))
{
// For floating point, `x != x` is a common way of
// checking for NaN. So, in the case where both
// operands are the same, we can optimize codegen
// to only do a single check.
condition = GenCondition(GenCondition::P);
}
inst_SETCC(condition, treeNode->TypeGet(), targetReg);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCompareInt: Generate code for comparing ints or, on amd64, longs.
//
// Arguments:
// treeNode - the compare tree
//
// Return Value:
// None.
void CodeGen::genCompareInt(GenTree* treeNode)
{
assert(treeNode->OperIsCompare() || treeNode->OperIs(GT_CMP));
GenTreeOp* tree = treeNode->AsOp();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
bool canReuseFlags = false;
genConsumeOperands(tree);
assert(!op1->isContainedIntOrIImmed());
assert(!varTypeIsFloating(op2Type));
instruction ins;
var_types type = TYP_UNKNOWN;
if (tree->OperIs(GT_TEST_EQ, GT_TEST_NE))
{
ins = INS_test;
// Unlike many xarch instructions TEST doesn't have a form with a 16/32/64 bit first operand and
// an 8 bit immediate second operand. But if the immediate value fits in 8 bits then we can simply
// emit a 8 bit TEST instruction, unless we're targeting x86 and the first operand is a non-byteable
// register.
// Note that lowering does something similar but its main purpose is to allow memory operands to be
// contained so it doesn't handle other kind of operands. It could do more but on x86 that results
// in additional register constrains and that may be worse than wasting 3 bytes on an immediate.
if (
#ifdef TARGET_X86
(!op1->isUsedFromReg() || isByteReg(op1->GetRegNum())) &&
#endif
(op2->IsCnsIntOrI() && FitsIn<uint8_t>(op2->AsIntCon()->IconValue())))
{
type = TYP_UBYTE;
}
}
else if (op1->isUsedFromReg() && op2->IsIntegralConst(0))
{
if (compiler->opts.OptimizationEnabled())
{
emitAttr op1Size = emitActualTypeSize(op1->TypeGet());
assert((int)op1Size >= 4);
// Optimize "x<0" and "x>=0" to "x>>31" if "x" is not a jump condition and in a reg.
// Morph/Lowering are responsible to rotate "0<x" to "x>0" so we won't handle it here.
if ((targetReg != REG_NA) && tree->OperIs(GT_LT, GT_GE) && !tree->IsUnsigned())
{
inst_Mov(op1->TypeGet(), targetReg, op1->GetRegNum(), /* canSkip */ true);
if (tree->OperIs(GT_GE))
{
// emit "not" for "x>=0" case
inst_RV(INS_not, targetReg, op1->TypeGet());
}
inst_RV_IV(INS_shr_N, targetReg, (int)op1Size * 8 - 1, op1Size);
genProduceReg(tree);
return;
}
canReuseFlags = true;
}
// We're comparing a register to 0 so we can generate "test reg1, reg1"
// instead of the longer "cmp reg1, 0"
ins = INS_test;
op2 = op1;
}
else
{
ins = INS_cmp;
}
if (type == TYP_UNKNOWN)
{
if (op1Type == op2Type)
{
type = op1Type;
}
else if (genTypeSize(op1Type) == genTypeSize(op2Type))
{
// If the types are different but have the same size then we'll use TYP_INT or TYP_LONG.
// This primarily deals with small type mixes (e.g. byte/ubyte) that need to be widened
// and compared as int. We should not get long type mixes here but handle that as well
// just in case.
type = genTypeSize(op1Type) == 8 ? TYP_LONG : TYP_INT;
}
else
{
// In the types are different simply use TYP_INT. This deals with small type/int type
// mixes (e.g. byte/short ubyte/int) that need to be widened and compared as int.
// Lowering is expected to handle any mixes that involve long types (e.g. int/long).
type = TYP_INT;
}
// The common type cannot be smaller than any of the operand types, we're probably mixing int/long
assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type)));
// Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons
assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
// If op1 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory());
// If op2 is smaller then it cannot be in memory, we're probably missing a cast
assert((genTypeSize(op2Type) >= genTypeSize(type)) || !op2->isUsedFromMemory());
// If we ended up with a small type and op2 is a constant then make sure we don't lose constant bits
assert(!op2->IsCnsIntOrI() || !varTypeIsSmall(type) || FitsIn(type, op2->AsIntCon()->IconValue()));
}
// The type cannot be larger than the machine word size
assert(genTypeSize(type) <= genTypeSize(TYP_I_IMPL));
// TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
// Sign jump optimization should only be set the following check
assert((tree->gtFlags & GTF_RELOP_SJUMP_OPT) == 0);
if (canReuseFlags && emit->AreFlagsSetToZeroCmp(op1->GetRegNum(), emitTypeSize(type), tree->OperGet()))
{
JITDUMP("Not emitting compare due to flags being already set\n");
}
else if (canReuseFlags && emit->AreFlagsSetForSignJumpOpt(op1->GetRegNum(), emitTypeSize(type), tree))
{
JITDUMP("Not emitting compare due to sign being already set, follow up instr will transform jump\n");
tree->gtFlags |= GTF_RELOP_SJUMP_OPT;
}
else
{
emit->emitInsBinary(ins, emitTypeSize(type), op1, op2);
}
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
inst_SETCC(GenCondition::FromIntegralRelop(tree), tree->TypeGet(), targetReg);
genProduceReg(tree);
}
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// genLongToIntCast: Generate code for long to int casts on x86.
//
// Arguments:
// cast - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// The cast node and its sources (via GT_LONG) must have been assigned registers.
// The destination cannot be a floating point type or a small integer type.
//
void CodeGen::genLongToIntCast(GenTree* cast)
{
assert(cast->OperGet() == GT_CAST);
GenTree* src = cast->gtGetOp1();
noway_assert(src->OperGet() == GT_LONG);
genConsumeRegs(src);
var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
var_types dstType = cast->CastToType();
regNumber loSrcReg = src->gtGetOp1()->GetRegNum();
regNumber hiSrcReg = src->gtGetOp2()->GetRegNum();
regNumber dstReg = cast->GetRegNum();
assert((dstType == TYP_INT) || (dstType == TYP_UINT));
assert(genIsValidIntReg(loSrcReg));
assert(genIsValidIntReg(hiSrcReg));
assert(genIsValidIntReg(dstReg));
if (cast->gtOverflow())
{
//
// Generate an overflow check for [u]long to [u]int casts:
//
// long -> int - check if the upper 33 bits are all 0 or all 1
//
// ulong -> int - check if the upper 33 bits are all 0
//
// long -> uint - check if the upper 32 bits are all 0
// ulong -> uint - check if the upper 32 bits are all 0
//
if ((srcType == TYP_LONG) && (dstType == TYP_INT))
{
BasicBlock* allOne = genCreateTempLabel();
BasicBlock* success = genCreateTempLabel();
inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
inst_JMP(EJ_js, allOne);
inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
inst_JMP(EJ_jmp, success);
genDefineTempLabel(allOne);
inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
genDefineTempLabel(success);
}
else
{
if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
{
inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_js, SCK_OVERFLOW);
}
inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
}
inst_Mov(TYP_INT, dstReg, loSrcReg, /* canSkip */ true);
genProduceReg(cast);
}
#endif
//------------------------------------------------------------------------
// genIntCastOverflowCheck: Generate overflow checking code for an integer cast.
//
// Arguments:
// cast - The GT_CAST node
// desc - The cast description
// reg - The register containing the value to check
//
void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg)
{
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
GetEmitter()->emitIns_R_R(INS_test, EA_SIZE(desc.CheckSrcSize()), reg, reg);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::CHECK_UINT_RANGE:
{
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in an immediate operand. Use a right shift to test if the
// upper 32 bits are zero. This requires a temporary register.
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
GetEmitter()->emitIns_Mov(INS_mov, EA_8BYTE, tempReg, reg, /* canSkip */ false);
GetEmitter()->emitIns_R_I(INS_shr_N, EA_8BYTE, tempReg, 32);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_ja, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MIN);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#endif
default:
{
assert(desc.CheckKind() == GenIntCastDesc::CHECK_SMALL_INT_RANGE);
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_ja : EJ_jg, SCK_OVERFLOW);
if (castMinValue != 0)
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
}
}
break;
}
}
//------------------------------------------------------------------------
// genIntToIntCast: Generate code for an integer cast, with or without overflow check.
//
// Arguments:
// cast - The GT_CAST node
//
// Assumptions:
// The cast node is not a contained node and must have an assigned register.
// Neither the source nor target type can be a floating point type.
// On x86 casts to (U)BYTE require that the source be in a byte register.
//
// TODO-XArch-CQ: Allow castOp to be a contained node without an assigned register.
//
void CodeGen::genIntToIntCast(GenTreeCast* cast)
{
genConsumeRegs(cast->gtGetOp1());
const regNumber srcReg = cast->gtGetOp1()->GetRegNum();
const regNumber dstReg = cast->GetRegNum();
emitter* emit = GetEmitter();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
GenIntCastDesc desc(cast);
if (desc.CheckKind() != GenIntCastDesc::CHECK_NONE)
{
genIntCastOverflowCheck(cast, desc, srcReg);
}
instruction ins;
unsigned insSize;
bool canSkip = false;
switch (desc.ExtendKind())
{
case GenIntCastDesc::ZERO_EXTEND_SMALL_INT:
ins = INS_movzx;
insSize = desc.ExtendSrcSize();
break;
case GenIntCastDesc::SIGN_EXTEND_SMALL_INT:
ins = INS_movsx;
insSize = desc.ExtendSrcSize();
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::ZERO_EXTEND_INT:
ins = INS_mov;
insSize = 4;
canSkip = compiler->opts.OptimizationEnabled() && emit->AreUpper32BitsZero(srcReg);
break;
case GenIntCastDesc::SIGN_EXTEND_INT:
ins = INS_movsxd;
insSize = 4;
break;
#endif
default:
assert(desc.ExtendKind() == GenIntCastDesc::COPY);
ins = INS_mov;
insSize = desc.ExtendSrcSize();
canSkip = true;
break;
}
emit->emitIns_Mov(ins, EA_ATTR(insSize), dstReg, srcReg, canSkip);
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genFloatToFloatCast: Generate code for a cast between float and double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// The cast is between float and double or vice versa.
//
void CodeGen::genFloatToFloatCast(GenTree* treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
// If not contained, must be a valid float reg.
if (op1->isUsedFromReg())
{
assert(genIsValidFloatReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
if (srcType == dstType && (op1->isUsedFromReg() && (targetReg == op1->GetRegNum())))
{
// source and destinations types are the same and also reside in the same register.
// we just need to consume and produce the reg in this case.
;
}
else
{
instruction ins = ins_FloatConv(dstType, srcType);
GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int/long to float/double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
void CodeGen::genIntToFloatCast(GenTree* treeNode)
{
// int type --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
if (op1->isUsedFromReg())
{
assert(genIsValidIntReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
#if !defined(TARGET_64BIT)
// We expect morph to replace long to float/double casts with helper calls
noway_assert(!varTypeIsLong(srcType));
#endif // !defined(TARGET_64BIT)
// Since xarch emitter doesn't handle reporting gc-info correctly while casting away gc-ness we
// ensure srcType of a cast is non gc-type. Codegen should never see BYREF as source type except
// for GT_LCL_VAR_ADDR and GT_LCL_FLD_ADDR that represent stack addresses and can be considered
// as TYP_I_IMPL. In all other cases where src operand is a gc-type and not known to be on stack,
// Front-end (see fgMorphCast()) ensures this by assigning gc-type local to a non gc-type
// temp and using temp as operand of cast operation.
if (srcType == TYP_BYREF)
{
noway_assert(op1->OperGet() == GT_LCL_VAR_ADDR || op1->OperGet() == GT_LCL_FLD_ADDR);
srcType = TYP_I_IMPL;
}
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (treeNode->gtFlags & GTF_UNSIGNED)
{
srcType = varTypeToUnsigned(srcType);
}
noway_assert(!varTypeIsGC(srcType));
// We should never be seeing srcType whose size is not sizeof(int) nor sizeof(long).
// For conversions from byte/sbyte/int16/uint16 to float/double, we would expect
// either the front-end or lowering phase to have generated two levels of cast.
// The first one is for widening smaller int type to int32 and the second one is
// to the float/double.
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) || (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
// Also we don't expect to see uint32 -> float/double and uint64 -> float conversions
// here since they should have been lowered apropriately.
noway_assert(srcType != TYP_UINT);
noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
// To convert int to a float/double, cvtsi2ss/sd SSE2 instruction is used
// which does a partial write to lower 4/8 bytes of xmm register keeping the other
// upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
// the partial write could introduce a false dependency and could cause a stall
// if there are further uses of xmmReg. We have such a case occurring with a
// customer reported version of SpectralNorm benchmark, resulting in 2x perf
// regression. To avoid false dependency, we emit "xorps xmmReg, xmmReg" before
// cvtsi2ss/sd instruction.
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->GetRegNum(), treeNode->GetRegNum());
// Note that here we need to specify srcType that will determine
// the size of source reg/mem operand and rex.w prefix.
instruction ins = ins_FloatConv(dstType, TYP_INT);
GetEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
// Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
// will interpret ULONG value as LONG. Hence we need to adjust the
// result if sign-bit of srcType is set.
if (srcType == TYP_ULONG)
{
// The instruction sequence below is less accurate than what clang
// and gcc generate. However, we keep the current sequence for backward compatibility.
// If we change the instructions below, FloatingPointUtils::convertUInt64ToDobule
// should be also updated for consistent conversion result.
assert(dstType == TYP_DOUBLE);
assert(op1->isUsedFromReg());
// Set the flags without modifying op1.
// test op1Reg, op1Reg
inst_RV_RV(INS_test, op1->GetRegNum(), op1->GetRegNum(), srcType);
// No need to adjust result if op1 >= 0 i.e. positive
// Jge label
BasicBlock* label = genCreateTempLabel();
inst_JMP(EJ_jge, label);
// Adjust the result
// result = result + 0x43f00000 00000000
// addsd resultReg, 0x43f00000 00000000
CORINFO_FIELD_HANDLE* cns = &u8ToDblBitmask;
if (*cns == nullptr)
{
double d;
static_assert_no_msg(sizeof(double) == sizeof(__int64));
*((__int64*)&d) = 0x43f0000000000000LL;
*cns = GetEmitter()->emitFltOrDblConst(d, EA_8BYTE);
}
GetEmitter()->emitIns_R_C(INS_addsd, EA_8BYTE, treeNode->GetRegNum(), *cns, 0);
genDefineTempLabel(label);
}
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genFloatToIntCast: Generate code to cast float/double to int/long
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
// TODO-XArch-CQ: (Low-pri) - generate in-line code when DstType = uint64
//
void CodeGen::genFloatToIntCast(GenTree* treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidIntReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
#ifdef DEBUG
if (op1->isUsedFromReg())
{
assert(genIsValidFloatReg(op1->GetRegNum()));
}
#endif
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We should never be seeing dstType whose size is neither sizeof(TYP_INT) nor sizeof(TYP_LONG).
// For conversions to byte/sbyte/int16/uint16 from float/double, we would expect the
// front-end or lowering phase to have generated two levels of cast. The first one is
// for float or double to int32/uint32 and the second one for narrowing int32/uint32 to
// the required smaller int type.
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) || (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
// We shouldn't be seeing uint64 here as it should have been converted
// into a helper call by either front-end or lowering phase.
noway_assert(!varTypeIsUnsigned(dstType) || (dstSize != EA_ATTR(genTypeSize(TYP_LONG))));
// If the dstType is TYP_UINT, we have 32-bits to encode the
// float number. Any of 33rd or above bits can be the sign bit.
// To achieve it we pretend as if we are converting it to a long.
if (varTypeIsUnsigned(dstType) && (dstSize == EA_ATTR(genTypeSize(TYP_INT))))
{
dstType = TYP_LONG;
}
// Note that we need to specify dstType here so that it will determine
// the size of destination integer register and also the rex.w prefix.
genConsumeOperands(treeNode->AsOp());
instruction ins = ins_FloatConv(TYP_INT, srcType);
GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCkfinite: Generate code for ckfinite opcode.
//
// Arguments:
// treeNode - The GT_CKFINITE node
//
// Return Value:
// None.
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
//
// TODO-XArch-CQ - mark the operand as contained if known to be in
// memory (e.g. field or an array element).
//
void CodeGen::genCkfinite(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
GenTree* op1 = treeNode->AsOp()->gtOp1;
var_types targetType = treeNode->TypeGet();
int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
regNumber targetReg = treeNode->GetRegNum();
// Extract exponent into a register.
regNumber tmpReg = treeNode->GetSingleTempReg();
genConsumeReg(op1);
#ifdef TARGET_64BIT
// Copy the floating-point value to an integer register. If we copied a float to a long, then
// right-shift the value so the high 32 bits of the floating-point value sit in the low 32
// bits of the integer register.
regNumber srcReg = op1->GetRegNum();
var_types targetIntType = ((targetType == TYP_FLOAT) ? TYP_INT : TYP_LONG);
inst_Mov(targetIntType, tmpReg, srcReg, /* canSkip */ false, emitActualTypeSize(targetType));
if (targetType == TYP_DOUBLE)
{
// right shift by 32 bits to get to exponent.
inst_RV_SH(INS_shr, EA_8BYTE, tmpReg, 32);
}
// Mask exponent with all 1's and check if the exponent is all 1's
inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
// if it is a finite value copy it to targetReg
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
#else // !TARGET_64BIT
// If the target type is TYP_DOUBLE, we want to extract the high 32 bits into the register.
// There is no easy way to do this. To not require an extra register, we'll use shuffles
// to move the high 32 bits into the low 32 bits, then shuffle it back, since we
// need to produce the value into the target register.
//
// For TYP_DOUBLE, we'll generate (for targetReg != op1->GetRegNum()):
// movaps targetReg, op1->GetRegNum()
// shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
// mov_xmm2i tmpReg, targetReg // tmpReg <= Y
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// movaps targetReg, op1->GetRegNum() // copy the value again, instead of un-shuffling it
//
// For TYP_DOUBLE with (targetReg == op1->GetRegNum()):
// shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
// mov_xmm2i tmpReg, targetReg // tmpReg <= Y
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// shufps targetReg, targetReg, 0xB1 // ZWXY => WZYX
//
// For TYP_FLOAT, it's the same as TARGET_64BIT:
// mov_xmm2i tmpReg, targetReg // tmpReg <= low 32 bits
// and tmpReg, <mask>
// cmp tmpReg, <mask>
// je <throw block>
// movaps targetReg, op1->GetRegNum() // only if targetReg != op1->GetRegNum()
regNumber copyToTmpSrcReg; // The register we'll copy to the integer temp.
if (targetType == TYP_DOUBLE)
{
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1);
copyToTmpSrcReg = targetReg;
}
else
{
copyToTmpSrcReg = op1->GetRegNum();
}
// Copy only the low 32 bits. This will be the high order 32 bits of the floating-point
// value, no matter the floating-point type.
inst_Mov(TYP_INT, tmpReg, copyToTmpSrcReg, /* canSkip */ false, emitActualTypeSize(TYP_FLOAT));
// Mask exponent with all 1's and check if the exponent is all 1's
inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
if ((targetType == TYP_DOUBLE) && (targetReg == op1->GetRegNum()))
{
// We need to re-shuffle the targetReg to get the correct result.
inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1);
}
else
{
// In both the TYP_FLOAT and TYP_DOUBLE case, the op1 register is untouched,
// so copy it to the targetReg. This is faster and smaller for TYP_DOUBLE
// than re-shuffling the targetReg.
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
}
#endif // !TARGET_64BIT
genProduceReg(treeNode);
}
#ifdef TARGET_AMD64
int CodeGenInterface::genSPtoFPdelta() const
{
int delta;
#ifdef UNIX_AMD64_ABI
// We require frame chaining on Unix to support native tool unwinding (such as
// unwinding by the native debugger). We have a CLR-only extension to the
// unwind codes (UWOP_SET_FPREG_LARGE) to support SP->FP offsets larger than 240.
// If Unix ever supports EnC, the RSP == RBP assumption will have to be reevaluated.
delta = genTotalFrameSize();
#else // !UNIX_AMD64_ABI
// As per Amd64 ABI, RBP offset from initial RSP can be between 0 and 240 if
// RBP needs to be reported in unwind codes. This case would arise for methods
// with localloc.
if (compiler->compLocallocUsed)
{
// We cannot base delta computation on compLclFrameSize since it changes from
// tentative to final frame layout and hence there is a possibility of
// under-estimating offset of vars from FP, which in turn results in under-
// estimating instruction size.
//
// To be predictive and so as never to under-estimate offset of vars from FP
// we will always position FP at min(240, outgoing arg area size).
delta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize);
}
else if (compiler->opts.compDbgEnC)
{
// vm assumption on EnC methods is that rsp and rbp are equal
delta = 0;
}
else
{
delta = genTotalFrameSize();
}
#endif // !UNIX_AMD64_ABI
return delta;
}
//---------------------------------------------------------------------
// genTotalFrameSize - return the total size of the stack frame, including local size,
// callee-saved register size, etc. For AMD64, this does not include the caller-pushed
// return address.
//
// Return value:
// Total frame size
//
int CodeGenInterface::genTotalFrameSize() const
{
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
// address than the frame pointer.
//
// There must be a frame pointer to call this function!
//
// We can't compute this directly from the Caller-SP, since the frame pointer
// is based on a maximum delta from Initial-SP, so first we find SP, then
// compute the FP offset.
int CodeGenInterface::genCallerSPtoFPdelta() const
{
assert(isFramePointerUsed());
int callerSPtoFPdelta;
callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
}
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
// This number will be negative.
int CodeGenInterface::genCallerSPtoInitialSPdelta() const
{
int callerSPtoSPdelta = 0;
callerSPtoSPdelta -= genTotalFrameSize();
callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
// compCalleeRegsPushed does not account for the frame pointer
// TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
if (isFramePointerUsed())
{
callerSPtoSPdelta -= REGSIZE_BYTES;
}
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
}
#endif // TARGET_AMD64
//-----------------------------------------------------------------------------------------
// genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask"
//
// Arguments:
// treeNode - tree node
//
// Return value:
// None
//
// Assumptions:
// i) tree oper is one of GT_NEG or GT_INTRINSIC Abs()
// ii) tree type is floating point type.
// iii) caller of this routine needs to call genProduceReg()
void CodeGen::genSSE2BitwiseOp(GenTree* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
regNumber operandReg = genConsumeReg(treeNode->gtGetOp1());
emitAttr size = emitTypeSize(treeNode);
assert(varTypeIsFloating(treeNode->TypeGet()));
assert(treeNode->gtGetOp1()->isUsedFromReg());
CORINFO_FIELD_HANDLE* maskFld = nullptr;
UINT64 mask = 0;
instruction ins = INS_invalid;
if (treeNode->OperIs(GT_NEG))
{
// Neg(x) = flip the sign bit.
// Neg(f) = f ^ 0x80000000 x4 (packed)
// Neg(d) = d ^ 0x8000000000000000 x2 (packed)
ins = INS_xorps;
mask = treeNode->TypeIs(TYP_FLOAT) ? 0x8000000080000000UL : 0x8000000000000000UL;
maskFld = treeNode->TypeIs(TYP_FLOAT) ? &negBitmaskFlt : &negBitmaskDbl;
}
else if (treeNode->OperIs(GT_INTRINSIC))
{
assert(treeNode->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Abs);
// Abs(x) = set sign-bit to zero
// Abs(f) = f & 0x7fffffff x4 (packed)
// Abs(d) = d & 0x7fffffffffffffff x2 (packed)
ins = INS_andps;
mask = treeNode->TypeIs(TYP_FLOAT) ? 0x7fffffff7fffffffUL : 0x7fffffffffffffffUL;
maskFld = treeNode->TypeIs(TYP_FLOAT) ? &absBitmaskFlt : &absBitmaskDbl;
}
else
{
assert(!"genSSE2BitwiseOp: unsupported oper");
}
if (*maskFld == nullptr)
{
UINT64 maskPack[] = {mask, mask};
*maskFld = GetEmitter()->emitBlkConst(&maskPack, 16, 16, treeNode->TypeGet());
}
GetEmitter()->emitIns_SIMD_R_R_C(ins, size, targetReg, operandReg, *maskFld, 0);
}
//-----------------------------------------------------------------------------------------
// genSSE41RoundOp - generate SSE41 code for the given tree as a round operation
//
// Arguments:
// treeNode - tree node
//
// Return value:
// None
//
// Assumptions:
// i) SSE4.1 is supported by the underlying hardware
// ii) treeNode oper is a GT_INTRINSIC
// iii) treeNode type is a floating point type
// iv) treeNode is not used from memory
// v) tree oper is NI_System_Math{F}_Round, _Ceiling, _Floor, or _Truncate
// vi) caller of this routine needs to call genProduceReg()
void CodeGen::genSSE41RoundOp(GenTreeOp* treeNode)
{
// i) SSE4.1 is supported by the underlying hardware
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_SSE41));
// ii) treeNode oper is a GT_INTRINSIC
assert(treeNode->OperGet() == GT_INTRINSIC);
GenTree* srcNode = treeNode->gtGetOp1();
// iii) treeNode type is floating point type
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
// iv) treeNode is not used from memory
assert(!treeNode->isUsedFromMemory());
genConsumeOperands(treeNode);
instruction ins = (treeNode->TypeGet() == TYP_FLOAT) ? INS_roundss : INS_roundsd;
emitAttr size = emitTypeSize(treeNode);
regNumber dstReg = treeNode->GetRegNum();
unsigned ival = 0;
// v) tree oper is NI_System_Math{F}_Round, _Ceiling, _Floor, or _Truncate
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Round:
ival = 4;
break;
case NI_System_Math_Ceiling:
ival = 10;
break;
case NI_System_Math_Floor:
ival = 9;
break;
case NI_System_Math_Truncate:
ival = 11;
break;
default:
ins = INS_invalid;
assert(!"genSSE41RoundOp: unsupported intrinsic");
unreached();
}
if (srcNode->isContained() || srcNode->isUsedFromSpillTemp())
{
emitter* emit = GetEmitter();
TempDsc* tmpDsc = nullptr;
unsigned varNum = BAD_VAR_NUM;
unsigned offset = (unsigned)-1;
if (srcNode->isUsedFromSpillTemp())
{
assert(srcNode->IsRegOptional());
tmpDsc = getSpillTempDsc(srcNode);
varNum = tmpDsc->tdTempNum();
offset = 0;
regSet.tmpRlsTemp(tmpDsc);
}
else if (srcNode->isIndir())
{
GenTreeIndir* memIndir = srcNode->AsIndir();
GenTree* memBase = memIndir->gtOp1;
switch (memBase->OperGet())
{
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
{
assert(memBase->isContained());
varNum = memBase->AsLclVarCommon()->GetLclNum();
offset = memBase->AsLclVarCommon()->GetLclOffs();
// Ensure that all the GenTreeIndir values are set to their defaults.
assert(memBase->GetRegNum() == REG_NA);
assert(!memIndir->HasIndex());
assert(memIndir->Scale() == 1);
assert(memIndir->Offset() == 0);
break;
}
case GT_CLS_VAR_ADDR:
{
emit->emitIns_R_C_I(ins, size, dstReg, memBase->AsClsVar()->gtClsVarHnd, 0, ival);
return;
}
default:
{
emit->emitIns_R_A_I(ins, size, dstReg, memIndir, ival);
return;
}
}
}
else
{
switch (srcNode->OperGet())
{
case GT_CNS_DBL:
{
GenTreeDblCon* dblConst = srcNode->AsDblCon();
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(dblConst->gtDconVal, emitTypeSize(dblConst));
emit->emitIns_R_C_I(ins, size, dstReg, hnd, 0, ival);
return;
}
case GT_LCL_FLD:
varNum = srcNode->AsLclFld()->GetLclNum();
offset = srcNode->AsLclFld()->GetLclOffs();
break;
case GT_LCL_VAR:
{
assert(srcNode->IsRegOptional() || !compiler->lvaGetDesc(srcNode->AsLclVar())->lvIsRegCandidate());
varNum = srcNode->AsLclVar()->GetLclNum();
offset = 0;
break;
}
default:
unreached();
break;
}
}
// Ensure we got a good varNum and offset.
// We also need to check for `tmpDsc != nullptr` since spill temp numbers
// are negative and start with -1, which also happens to be BAD_VAR_NUM.
assert((varNum != BAD_VAR_NUM) || (tmpDsc != nullptr));
assert(offset != (unsigned)-1);
emit->emitIns_R_S_I(ins, size, dstReg, varNum, offset, ival);
}
else
{
inst_RV_RV_IV(ins, size, dstReg, srcNode->GetRegNum(), ival);
}
}
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
// Arguments
// treeNode - the GT_INTRINSIC node
//
// Return value:
// None
//
void CodeGen::genIntrinsic(GenTree* treeNode)
{
// Handle intrinsics that can be implemented by target-specific instructions
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Abs:
genSSE2BitwiseOp(treeNode);
break;
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
genSSE41RoundOp(treeNode->AsOp());
break;
case NI_System_Math_Sqrt:
{
// Both operand and its result must be of the same floating point type.
GenTree* srcNode = treeNode->AsOp()->gtOp1;
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
genConsumeOperands(treeNode->AsOp());
const instruction ins = (treeNode->TypeGet() == TYP_FLOAT) ? INS_sqrtss : INS_sqrtsd;
GetEmitter()->emitInsBinary(ins, emitTypeSize(treeNode), treeNode, srcNode);
break;
}
default:
assert(!"genIntrinsic: Unsupported intrinsic");
unreached();
}
genProduceReg(treeNode);
}
//-------------------------------------------------------------------------- //
// getBaseVarForPutArgStk - returns the baseVarNum for passing a stack arg.
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
// Return value:
// The number of the base variable.
//
// Note:
// If tail call the outgoing args are placed in the caller's incoming arg stack space.
// Otherwise, they go in the outgoing arg area on the current frame.
//
// On Windows the caller always creates slots (homing space) in its frame for the
// first 4 arguments of a callee (register passed args). So, the baseVarNum is always 0.
// For System V systems there is no such calling convention requirement, and the code needs to find
// the first stack passed argument from the caller. This is done by iterating over
// all the lvParam variables and finding the first with GetArgReg() equals to REG_STK.
//
unsigned CodeGen::getBaseVarForPutArgStk(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_PUTARG_STK);
unsigned baseVarNum;
// Whether to setup stk arg in incoming or out-going arg area?
// Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
// All other calls - stk arg is setup in out-going arg area.
if (treeNode->AsPutArgStk()->putInIncomingArgArea())
{
// See the note in the function header re: finding the first stack passed argument.
baseVarNum = getFirstArgWithStackSlot();
assert(baseVarNum != BAD_VAR_NUM);
#ifdef DEBUG
// This must be a fast tail call.
assert(treeNode->AsPutArgStk()->gtCall->AsCall()->IsFastTailCall());
// Since it is a fast tail call, the existence of first incoming arg is guaranteed
// because fast tail call requires that in-coming arg area of caller is >= out-going
// arg area required for tail call.
LclVarDsc* varDsc = compiler->lvaGetDesc(baseVarNum);
assert(varDsc != nullptr);
#ifdef UNIX_AMD64_ABI
assert(!varDsc->lvIsRegArg && varDsc->GetArgReg() == REG_STK);
#else // !UNIX_AMD64_ABI
// On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
assert(varDsc->lvIsRegArg && (varDsc->GetArgReg() == REG_ARG_0 || varDsc->GetArgReg() == REG_FLTARG_0));
#endif // !UNIX_AMD64_ABI
#endif // !DEBUG
}
else
{
#if FEATURE_FIXED_OUT_ARGS
baseVarNum = compiler->lvaOutgoingArgSpaceVar;
#else // !FEATURE_FIXED_OUT_ARGS
assert(!"No BaseVarForPutArgStk on x86");
baseVarNum = BAD_VAR_NUM;
#endif // !FEATURE_FIXED_OUT_ARGS
}
return baseVarNum;
}
//---------------------------------------------------------------------
// genAlignStackBeforeCall: Align the stack if necessary before a call.
//
// Arguments:
// putArgStk - the putArgStk node.
//
void CodeGen::genAlignStackBeforeCall(GenTreePutArgStk* putArgStk)
{
#if defined(UNIX_X86_ABI)
genAlignStackBeforeCall(putArgStk->gtCall);
#endif // UNIX_X86_ABI
}
//---------------------------------------------------------------------
// genAlignStackBeforeCall: Align the stack if necessary before a call.
//
// Arguments:
// call - the call node.
//
void CodeGen::genAlignStackBeforeCall(GenTreeCall* call)
{
#if defined(UNIX_X86_ABI)
// Have we aligned the stack yet?
if (!call->fgArgInfo->IsStkAlignmentDone())
{
// We haven't done any stack alignment yet for this call. We might need to create
// an alignment adjustment, even if this function itself doesn't have any stack args.
// This can happen if this function call is part of a nested call sequence, and the outer
// call has already pushed some arguments.
unsigned stkLevel = genStackLevel + call->fgArgInfo->GetStkSizeBytes();
call->fgArgInfo->ComputeStackAlignment(stkLevel);
unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
if (padStkAlign != 0)
{
// Now generate the alignment
inst_RV_IV(INS_sub, REG_SPBASE, padStkAlign, EA_PTRSIZE);
AddStackLevel(padStkAlign);
AddNestedAlignment(padStkAlign);
}
call->fgArgInfo->SetStkAlignmentDone();
}
#endif // UNIX_X86_ABI
}
//---------------------------------------------------------------------
// genRemoveAlignmentAfterCall: After a call, remove the alignment
// added before the call, if any.
//
// Arguments:
// call - the call node.
// bias - additional stack adjustment
//
// Note:
// When bias > 0, caller should adjust stack level appropriately as
// bias is not considered when adjusting stack level.
//
void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias)
{
#if defined(TARGET_X86)
#if defined(UNIX_X86_ABI)
// Put back the stack pointer if there was any padding for stack alignment
unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
unsigned padStkAdjust = padStkAlign + bias;
if (padStkAdjust != 0)
{
inst_RV_IV(INS_add, REG_SPBASE, padStkAdjust, EA_PTRSIZE);
SubtractStackLevel(padStkAlign);
SubtractNestedAlignment(padStkAlign);
}
#else // UNIX_X86_ABI
if (bias != 0)
{
if (bias == sizeof(int))
{
inst_RV(INS_pop, REG_ECX, TYP_INT);
}
else
{
inst_RV_IV(INS_add, REG_SPBASE, bias, EA_PTRSIZE);
}
}
#endif // !UNIX_X86_ABI_
#else // TARGET_X86
assert(bias == 0);
#endif // !TARGET_X86
}
#ifdef TARGET_X86
//---------------------------------------------------------------------
// genAdjustStackForPutArgStk:
// adjust the stack pointer for a putArgStk node if necessary.
//
// Arguments:
// putArgStk - the putArgStk node.
//
// Returns: true if the stack pointer was adjusted; false otherwise.
//
// Notes:
// Sets `m_pushStkArg` to true if the stack arg needs to be pushed,
// false if the stack arg needs to be stored at the current stack
// pointer address. This is exactly the opposite of the return value
// of this function.
//
bool CodeGen::genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk)
{
const unsigned argSize = putArgStk->GetStackByteSize();
GenTree* source = putArgStk->gtGetOp1();
#ifdef FEATURE_SIMD
if (!source->OperIs(GT_FIELD_LIST) && varTypeIsSIMD(source))
{
inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
AddStackLevel(argSize);
m_pushStkArg = false;
return true;
}
#endif // FEATURE_SIMD
#ifdef DEBUG
switch (putArgStk->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
case GenTreePutArgStk::Kind::Unroll:
assert(!source->AsObj()->GetLayout()->HasGCPtr());
break;
case GenTreePutArgStk::Kind::Push:
case GenTreePutArgStk::Kind::PushAllSlots:
assert(source->OperIs(GT_FIELD_LIST) || source->AsObj()->GetLayout()->HasGCPtr() ||
(argSize < XMM_REGSIZE_BYTES));
break;
default:
unreached();
}
#endif // DEBUG
// In lowering (see "LowerPutArgStk") we have determined what sort of instructions
// are going to be used for this node. If we'll not be using "push"es, the stack
// needs to be adjusted first (s. t. the SP points to the base of the outgoing arg).
//
if (!putArgStk->isPushKind())
{
// If argSize is large, we need to probe the stack like we do in the prolog (genAllocLclFrame)
// or for localloc (genLclHeap), to ensure we touch the stack pages sequentially, and don't miss
// the stack guard pages. The prolog probes, but we don't know at this point how much higher
// the last probed stack pointer value is. We default a threshold. Any size below this threshold
// we are guaranteed the stack has been probed. Above this threshold, we don't know. The threshold
// should be high enough to cover all common cases. Increasing the threshold means adding a few
// more "lowest address of stack" probes in the prolog. Since this is relatively rare, add it to
// stress modes.
if ((argSize >= ARG_STACK_PROBE_THRESHOLD_BYTES) ||
compiler->compStressCompile(Compiler::STRESS_GENERIC_VARN, 5))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)argSize, REG_NA);
}
else
{
inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
}
AddStackLevel(argSize);
m_pushStkArg = false;
return true;
}
// Otherwise, "push" will be adjusting the stack for us.
m_pushStkArg = true;
return false;
}
//---------------------------------------------------------------------
// genPutArgStkFieldList - generate code for passing a GT_FIELD_LIST arg on the stack.
//
// Arguments
// treeNode - the GT_PUTARG_STK node whose op1 is a GT_FIELD_LIST
//
// Return value:
// None
//
void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk)
{
GenTreeFieldList* const fieldList = putArgStk->gtOp1->AsFieldList();
assert(fieldList != nullptr);
// Set m_pushStkArg and pre-adjust the stack if necessary.
const bool preAdjustedStack = genAdjustStackForPutArgStk(putArgStk);
// For now, we only support the "push" case; we will push a full slot for the first field of each slot
// within the struct.
assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg);
// If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0.
// (Note that this mode is not currently being used.)
// If we are pushing the arguments (i.e. we have not pre-adjusted the stack), then we are pushing them
// in reverse order, so we start with the current field offset at the size of the struct arg (which must be
// a multiple of the target pointer size).
unsigned currentOffset = (preAdjustedStack) ? 0 : putArgStk->GetStackByteSize();
unsigned prevFieldOffset = currentOffset;
regNumber intTmpReg = REG_NA;
regNumber simdTmpReg = REG_NA;
if (putArgStk->AvailableTempRegCount() != 0)
{
regMaskTP rsvdRegs = putArgStk->gtRsvdRegs;
if ((rsvdRegs & RBM_ALLINT) != 0)
{
intTmpReg = putArgStk->GetSingleTempReg(RBM_ALLINT);
assert(genIsValidIntReg(intTmpReg));
}
if ((rsvdRegs & RBM_ALLFLOAT) != 0)
{
simdTmpReg = putArgStk->GetSingleTempReg(RBM_ALLFLOAT);
assert(genIsValidFloatReg(simdTmpReg));
}
assert(genCountBits(rsvdRegs) == (unsigned)((intTmpReg == REG_NA) ? 0 : 1) + ((simdTmpReg == REG_NA) ? 0 : 1));
}
for (GenTreeFieldList::Use& use : fieldList->Uses())
{
GenTree* const fieldNode = use.GetNode();
const unsigned fieldOffset = use.GetOffset();
var_types fieldType = use.GetType();
// Long-typed nodes should have been handled by the decomposition pass, and lowering should have sorted the
// field list in descending order by offset.
assert(!varTypeIsLong(fieldType));
assert(fieldOffset <= prevFieldOffset);
// Consume the register, if any, for this field. Note that genConsumeRegs() will appropriately
// update the liveness info for a lclVar that has been marked RegOptional, which hasn't been
// assigned a register, and which is therefore contained.
// Unlike genConsumeReg(), it handles the case where no registers are being consumed.
genConsumeRegs(fieldNode);
regNumber argReg = fieldNode->isUsedFromSpillTemp() ? REG_NA : fieldNode->GetRegNum();
// If the field is slot-like, we can use a push instruction to store the entire register no matter the type.
//
// The GC encoder requires that the stack remain 4-byte aligned at all times. Round the adjustment up
// to the next multiple of 4. If we are going to generate a `push` instruction, the adjustment must
// not require rounding.
// NOTE: if the field is of GC type, we must use a push instruction, since the emitter is not otherwise
// able to detect stores into the outgoing argument area of the stack on x86.
const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevFieldOffset - fieldOffset) >= 4);
int adjustment = roundUp(currentOffset - fieldOffset, 4);
if (fieldIsSlot && !varTypeIsSIMD(fieldType))
{
fieldType = genActualType(fieldType);
unsigned pushSize = genTypeSize(fieldType);
assert((pushSize % 4) == 0);
adjustment -= pushSize;
while (adjustment != 0)
{
inst_IV(INS_push, 0);
currentOffset -= pushSize;
AddStackLevel(pushSize);
adjustment -= pushSize;
}
m_pushStkArg = true;
}
else
{
m_pushStkArg = false;
// We always "push" floating point fields (i.e. they are full slot values that don't
// require special handling).
assert(varTypeIsIntegralOrI(fieldNode) || varTypeIsSIMD(fieldNode));
// If we can't push this field, it needs to be in a register so that we can store
// it to the stack location.
if (adjustment != 0)
{
// This moves the stack pointer to fieldOffset.
// For this case, we must adjust the stack and generate stack-relative stores rather than pushes.
// Adjust the stack pointer to the next slot boundary.
inst_RV_IV(INS_sub, REG_SPBASE, adjustment, EA_PTRSIZE);
currentOffset -= adjustment;
AddStackLevel(adjustment);
}
// Does it need to be in a byte register?
// If so, we'll use intTmpReg, which must have been allocated as a byte register.
// If it's already in a register, but not a byteable one, then move it.
if (varTypeIsByte(fieldType) && ((argReg == REG_NA) || ((genRegMask(argReg) & RBM_BYTE_REGS) == 0)))
{
assert(intTmpReg != REG_NA);
noway_assert((genRegMask(intTmpReg) & RBM_BYTE_REGS) != 0);
if (argReg != REG_NA)
{
inst_Mov(fieldType, intTmpReg, argReg, /* canSkip */ false);
argReg = intTmpReg;
}
}
}
if (argReg == REG_NA)
{
if (m_pushStkArg)
{
if (fieldNode->isUsedFromSpillTemp())
{
assert(!varTypeIsSIMD(fieldType)); // Q: can we get here with SIMD?
assert(fieldNode->IsRegOptional());
TempDsc* tmp = getSpillTempDsc(fieldNode);
GetEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
regSet.tmpRlsTemp(tmp);
}
else
{
assert(varTypeIsIntegralOrI(fieldNode));
switch (fieldNode->OperGet())
{
case GT_LCL_VAR:
inst_TT(INS_push, fieldNode, 0, 0, emitActualTypeSize(fieldNode->TypeGet()));
break;
case GT_CNS_INT:
if (fieldNode->IsIconHandle())
{
inst_IV_handle(INS_push, fieldNode->AsIntCon()->gtIconVal);
}
else
{
inst_IV(INS_push, fieldNode->AsIntCon()->gtIconVal);
}
break;
default:
unreached();
}
}
currentOffset -= TARGET_POINTER_SIZE;
AddStackLevel(TARGET_POINTER_SIZE);
}
else
{
// The stack has been adjusted and we will load the field to intTmpReg and then store it on the stack.
assert(varTypeIsIntegralOrI(fieldNode));
switch (fieldNode->OperGet())
{
case GT_LCL_VAR:
inst_RV_TT(INS_mov, intTmpReg, fieldNode);
break;
case GT_CNS_INT:
genSetRegToConst(intTmpReg, fieldNode->TypeGet(), fieldNode);
break;
default:
unreached();
}
genStoreRegToStackArg(fieldType, intTmpReg, fieldOffset - currentOffset);
}
}
else
{
#if defined(FEATURE_SIMD)
if (fieldType == TYP_SIMD12)
{
assert(genIsValidFloatReg(simdTmpReg));
genStoreSIMD12ToStack(argReg, simdTmpReg);
}
else
#endif // defined(FEATURE_SIMD)
{
genStoreRegToStackArg(fieldType, argReg, fieldOffset - currentOffset);
}
if (m_pushStkArg)
{
// We always push a slot-rounded size
currentOffset -= genTypeSize(fieldType);
}
}
prevFieldOffset = fieldOffset;
}
if (currentOffset != 0)
{
// We don't expect padding at the beginning of a struct, but it could happen with explicit layout.
inst_RV_IV(INS_sub, REG_SPBASE, currentOffset, EA_PTRSIZE);
AddStackLevel(currentOffset);
}
}
#endif // TARGET_X86
//---------------------------------------------------------------------
// genPutArgStk - generate code for passing an arg on the stack.
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
{
GenTree* data = putArgStk->gtOp1;
var_types targetType = genActualType(data->TypeGet());
#ifdef TARGET_X86
genAlignStackBeforeCall(putArgStk);
if ((data->OperGet() != GT_FIELD_LIST) && varTypeIsStruct(targetType))
{
(void)genAdjustStackForPutArgStk(putArgStk);
genPutStructArgStk(putArgStk);
return;
}
// On a 32-bit target, all of the long arguments are handled with GT_FIELD_LISTs of TYP_INT.
assert(targetType != TYP_LONG);
const unsigned argSize = putArgStk->GetStackByteSize();
assert((argSize % TARGET_POINTER_SIZE) == 0);
if (data->isContainedIntOrIImmed())
{
if (data->IsIconHandle())
{
inst_IV_handle(INS_push, data->AsIntCon()->gtIconVal);
}
else
{
inst_IV(INS_push, data->AsIntCon()->gtIconVal);
}
AddStackLevel(argSize);
}
else if (data->OperGet() == GT_FIELD_LIST)
{
genPutArgStkFieldList(putArgStk);
}
else
{
// We should not see any contained nodes that are not immediates.
assert(data->isUsedFromReg());
genConsumeReg(data);
genPushReg(targetType, data->GetRegNum());
}
#else // !TARGET_X86
{
unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
#ifdef UNIX_AMD64_ABI
if (data->OperIs(GT_FIELD_LIST))
{
genPutArgStkFieldList(putArgStk, baseVarNum);
return;
}
else if (varTypeIsStruct(targetType))
{
m_stkArgVarNum = baseVarNum;
m_stkArgOffset = putArgStk->getArgOffset();
genPutStructArgStk(putArgStk);
m_stkArgVarNum = BAD_VAR_NUM;
return;
}
#endif // UNIX_AMD64_ABI
noway_assert(targetType != TYP_STRUCT);
// Get argument offset on stack.
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
unsigned argOffset = putArgStk->getArgOffset();
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(putArgStk->gtCall, putArgStk);
assert(curArgTabEntry != nullptr);
assert(argOffset == curArgTabEntry->slotNum * TARGET_POINTER_SIZE);
#endif
if (data->isContainedIntOrIImmed())
{
GetEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
(int)data->AsIntConCommon()->IconValue());
}
else
{
assert(data->isUsedFromReg());
genConsumeReg(data);
GetEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->GetRegNum(), baseVarNum,
argOffset);
}
}
#endif // !TARGET_X86
}
//---------------------------------------------------------------------
// genPutArgReg - generate code for a GT_PUTARG_REG node
//
// Arguments
// tree - the GT_PUTARG_REG node
//
// Return value:
// None
//
void CodeGen::genPutArgReg(GenTreeOp* tree)
{
assert(tree->OperIs(GT_PUTARG_REG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
#ifndef UNIX_AMD64_ABI
assert(targetType != TYP_STRUCT);
#endif // !UNIX_AMD64_ABI
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
// If child node is not already in the register we need, move it
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
genProduceReg(tree);
}
#ifdef TARGET_X86
// genPushReg: Push a register value onto the stack and adjust the stack level
//
// Arguments:
// type - the type of value to be stored
// reg - the register containing the value
//
// Notes:
// For TYP_LONG, the srcReg must be a floating point register.
// Otherwise, the register type must be consistent with the given type.
//
void CodeGen::genPushReg(var_types type, regNumber srcReg)
{
unsigned size = genTypeSize(type);
if (varTypeIsIntegralOrI(type) && type != TYP_LONG)
{
assert(genIsValidIntReg(srcReg));
inst_RV(INS_push, srcReg, type);
}
else
{
instruction ins;
emitAttr attr = emitTypeSize(type);
if (type == TYP_LONG)
{
// On x86, the only way we can push a TYP_LONG from a register is if it is in an xmm reg.
// This is only used when we are pushing a struct from memory to memory, and basically is
// handling an 8-byte "chunk", as opposed to strictly a long type.
ins = INS_movq;
}
else
{
ins = ins_Store(type);
}
assert(genIsValidFloatReg(srcReg));
inst_RV_IV(INS_sub, REG_SPBASE, size, EA_PTRSIZE);
GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
}
AddStackLevel(size);
}
#endif // TARGET_X86
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
// genStoreRegToStackArg: Store a register value into the stack argument area
//
// Arguments:
// type - the type of value to be stored
// reg - the register containing the value
// offset - the offset from the base (see Assumptions below)
//
// Notes:
// A type of TYP_STRUCT instructs this method to store a 16-byte chunk
// at the given offset (i.e. not the full struct).
//
// Assumptions:
// The caller must set the context appropriately before calling this method:
// - On x64, m_stkArgVarNum must be set according to whether this is a regular or tail call.
// - On x86, the caller must set m_pushStkArg if this method should push the argument.
// Otherwise, the argument is stored at the given offset from sp.
//
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
//
void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset)
{
assert(srcReg != REG_NA);
instruction ins;
emitAttr attr;
unsigned size;
if (type == TYP_STRUCT)
{
ins = INS_movdqu;
// This should be changed!
attr = EA_8BYTE;
size = 16;
}
else
{
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(type))
{
assert(genIsValidFloatReg(srcReg));
ins = ins_Store(type); // TODO-CQ: pass 'aligned' correctly
}
else
#endif // FEATURE_SIMD
#ifdef TARGET_X86
if (type == TYP_LONG)
{
assert(genIsValidFloatReg(srcReg));
ins = INS_movq;
}
else
#endif // TARGET_X86
{
assert((varTypeUsesFloatReg(type) && genIsValidFloatReg(srcReg)) ||
(varTypeIsIntegralOrI(type) && genIsValidIntReg(srcReg)));
ins = ins_Store(type);
}
attr = emitTypeSize(type);
size = genTypeSize(type);
}
#ifdef TARGET_X86
if (m_pushStkArg)
{
genPushReg(type, srcReg);
}
else
{
GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
}
#else // !TARGET_X86
assert(m_stkArgVarNum != BAD_VAR_NUM);
GetEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
#endif // !TARGET_X86
}
//---------------------------------------------------------------------
// genPutStructArgStk - generate code for copying a struct arg on the stack by value.
// In case there are references to heap object in the struct,
// it generates the gcinfo as well.
//
// Arguments
// putArgStk - the GT_PUTARG_STK node
//
// Notes:
// In the case of fixed out args, the caller must have set m_stkArgVarNum to the variable number
// corresponding to the argument area (where we will put the argument on the stack).
// For tail calls this is the baseVarNum = 0.
// For non tail calls this is the outgoingArgSpace.
//
void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk)
{
GenTree* source = putArgStk->gtGetOp1();
var_types targetType = source->TypeGet();
#if defined(TARGET_X86) && defined(FEATURE_SIMD)
if (putArgStk->isSIMD12())
{
genPutArgStkSIMD12(putArgStk);
return;
}
#endif // defined(TARGET_X86) && defined(FEATURE_SIMD)
if (varTypeIsSIMD(targetType))
{
regNumber srcReg = genConsumeReg(source);
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
genStoreRegToStackArg(targetType, srcReg, 0);
return;
}
assert(targetType == TYP_STRUCT);
ClassLayout* layout = source->AsObj()->GetLayout();
switch (putArgStk->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
genStructPutArgRepMovs(putArgStk);
break;
#ifndef TARGET_X86
case GenTreePutArgStk::Kind::PartialRepInstr:
genStructPutArgPartialRepMovs(putArgStk);
break;
#endif // !TARGET_X86
case GenTreePutArgStk::Kind::Unroll:
genStructPutArgUnroll(putArgStk);
break;
#ifdef TARGET_X86
case GenTreePutArgStk::Kind::Push:
genStructPutArgPush(putArgStk);
break;
#endif // TARGET_X86
default:
unreached();
}
}
#endif // defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Create and record GC Info for the function.
*/
#ifndef JIT32_GCENCODER
void
#else // !JIT32_GCENCODER
void*
#endif // !JIT32_GCENCODER
CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
{
#ifdef JIT32_GCENCODER
return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
#else // !JIT32_GCENCODER
genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
#endif // !JIT32_GCENCODER
}
#ifdef JIT32_GCENCODER
void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr))
{
BYTE headerBuf[64];
InfoHdr header;
int s_cached;
#ifdef FEATURE_EH_FUNCLETS
// We should do this before gcInfoBlockHdrSave since varPtrTableSize must be finalized before it
if (compiler->ehAnyFunclets())
{
gcInfo.gcMarkFilterVarsPinned();
}
#endif
#ifdef DEBUG
size_t headerSize =
#endif
compiler->compInfoBlkSize =
gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
size_t argTabOffset = 0;
size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
#if DISPLAY_SIZES
if (GetInterruptible())
{
gcHeaderISize += compiler->compInfoBlkSize;
gcPtrMapISize += ptrMapSize;
}
else
{
gcHeaderNSize += compiler->compInfoBlkSize;
gcPtrMapNSize += ptrMapSize;
}
#endif // DISPLAY_SIZES
compiler->compInfoBlkSize += ptrMapSize;
/* Allocate the info block for the method */
compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
#if 0 // VERBOSE_SIZES
// TODO-X86-Cleanup: 'dataSize', below, is not defined
// if (compiler->compInfoBlkSize > codeSize && compiler->compInfoBlkSize > 100)
{
printf("[%7u VM, %7u+%7u/%7u x86 %03u/%03u%%] %s.%s\n",
compiler->info.compILCodeSize,
compiler->compInfoBlkSize,
codeSize + dataSize,
codeSize + dataSize - prologSize - epilogSize,
100 * (codeSize + dataSize) / compiler->info.compILCodeSize,
100 * (codeSize + dataSize + compiler->compInfoBlkSize) / compiler->info.compILCodeSize,
compiler->info.compClassName,
compiler->info.compMethodName);
}
#endif
/* Fill in the info block and return it to the caller */
void* infoPtr = compiler->compInfoBlkAddr;
/* Create the method info block: header followed by GC tracking tables */
compiler->compInfoBlkAddr +=
gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
#ifdef DEBUG
if (0)
{
BYTE* temp = (BYTE*)infoPtr;
size_t size = compiler->compInfoBlkAddr - temp;
BYTE* ptab = temp + headerSize;
noway_assert(size == headerSize + ptrMapSize);
printf("Method info block - header [%zu bytes]:", headerSize);
for (unsigned i = 0; i < size; i++)
{
if (temp == ptab)
{
printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
}
else
{
if (!(i % 16))
printf("\n %04X: ", i);
}
printf("%02X ", *temp++);
}
printf("\n");
}
#endif // DEBUG
#if DUMP_GC_TABLES
if (compiler->opts.dspGCtbls)
{
const BYTE* base = (BYTE*)infoPtr;
size_t size;
unsigned methodSize;
InfoHdr dumpHeader;
printf("GC Info for method %s\n", compiler->info.compFullName);
printf("GC info size = %3u\n", compiler->compInfoBlkSize);
size = gcInfo.gcInfoBlockHdrDump(base, &dumpHeader, &methodSize);
// printf("size of header encoding is %3u\n", size);
printf("\n");
if (compiler->opts.dspGCtbls)
{
base += size;
size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
// printf("size of pointer table is %3u\n", size);
printf("\n");
noway_assert(compiler->compInfoBlkAddr == (base + size));
}
}
#endif // DUMP_GC_TABLES
/* Make sure we ended up generating the expected number of bytes */
noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
return infoPtr;
}
#else // !JIT32_GCENCODER
void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
// We keep the call count for the second call to gcMakeRegPtrTable() below.
unsigned callCnt = 0;
// First we figure out the encoder ID's for the stack slots and registers.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
// Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
gcInfoEncoder->FinalizeSlotIds();
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
if (compiler->opts.compDbgEnC)
{
// what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
// which is:
// -return address
// -saved off RBP
// -saved 'this' pointer and bool for synchronized methods
// 4 slots for RBP + return address + RSI + RDI
int preservedAreaSize = 4 * REGSIZE_BYTES;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
{
preservedAreaSize += REGSIZE_BYTES;
}
// bool in synchronized methods that tracks whether the lock has been taken (takes 4 bytes on stack)
preservedAreaSize += 4;
}
// Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
// frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
}
if (compiler->opts.IsReversePInvoke())
{
unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM);
const LclVarDsc* reversePInvokeFrameVar = compiler->lvaGetDesc(reversePInvokeFrameVarNumber);
gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar->GetStackOffset());
}
gcInfoEncoder->Build();
// GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
// let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
#endif // !JIT32_GCENCODER
/*****************************************************************************
* Emit a call to a helper function.
*
*/
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg)
{
void* addr = nullptr;
void* pAddr = nullptr;
emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
regNumber callTarget = REG_NA;
regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
if (!addr)
{
assert(pAddr != nullptr);
// Absolute indirect call addr
// Note: Order of checks is important. First always check for pc-relative and next
// zero-relative. Because the former encoding is 1-byte smaller than the latter.
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)pAddr) ||
genCodeIndirAddrCanBeEncodedAsZeroRelOffset((size_t)pAddr))
{
// generate call whose target is specified by 32-bit offset relative to PC or zero.
callType = emitter::EC_FUNC_TOKEN_INDIR;
addr = pAddr;
}
else
{
#ifdef TARGET_AMD64
// If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
// load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to
// make the call.
// mov reg, addr
// call [reg]
if (callTargetReg == REG_NA)
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & killMask) == callTargetMask);
}
else
{
// The call target must not overwrite any live variable, though it may not be in the
// kill set for the call.
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & regSet.GetMaskVars()) == RBM_NONE);
}
#endif
callTarget = callTargetReg;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, callTarget, (ssize_t)pAddr);
callType = emitter::EC_INDIR_ARD;
}
}
// clang-format off
GetEmitter()->emitIns_Call(callType,
compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) addr,
argSize,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN),
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
callTarget, // ireg
REG_NA, 0, 0, // xreg, xmul, disp
false // isJump
);
// clang-format on
regSet.verifyRegistersUsed(killMask);
}
/*****************************************************************************
* Unit testing of the XArch emitter: generate a bunch of instructions into the prolog
* (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
* disassembler thinks the instructions as the same as we do.
*/
// Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
// After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
//#define ALL_XARCH_EMITTER_UNIT_TESTS
#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
void CodeGen::genAmd64EmitterUnitTests()
{
if (!verbose)
{
return;
}
if (!compiler->opts.altJit)
{
// No point doing this in a "real" JIT.
return;
}
// Mark the "fake" instructions in the output.
printf("*************** In genAmd64EmitterUnitTests()\n");
// We use this:
// genDefineTempLabel(genCreateTempLabel());
// to create artificial labels to help separate groups of tests.
//
// Loads
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef ALL_XARCH_EMITTER_UNIT_TESTS
genDefineTempLabel(genCreateTempLabel());
// vhaddpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd ymm0,ymm1,ymm2
GetEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
GetEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
#endif // ALL_XARCH_EMITTER_UNIT_TESTS
printf("*************** End of genAmd64EmitterUnitTests()\n");
}
#endif // defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
#ifdef PROFILING_SUPPORTED
#ifdef TARGET_X86
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. This variable remains unchanged.
//
// Return Value:
// None
//
// Notes:
// The x86 profile enter helper has the following requirements (see ProfileEnterNaked in
// VM\i386\asmhelpers.asm for details):
// 1. The calling sequence for calling the helper is:
// push FunctionIDOrClientID
// call ProfileEnterHelper
// 2. The calling function has an EBP frame.
// 3. EBP points to the saved ESP which is the first thing saved in the function. Thus,
// the following prolog is assumed:
// push ESP
// mov EBP, ESP
// 4. All registers are preserved.
// 5. The helper pops the FunctionIDOrClientID argument from the stack.
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
unsigned saveStackLvl2 = genStackLevel;
// Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK()
// for x86 stack unwinding
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
#endif // UNIX_X86_ABI
// Push the profilerHandle
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
inst_IV(INS_push, (size_t)compiler->compProfilerMethHnd);
}
// This will emit either
// "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER,
0, // argSize. Again, we have to lie about it
EA_UNKNOWN); // retSize
// Check that we have place for the push.
assert(compiler->fgGetPtrArgCntMax() >= 1);
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
#endif // UNIX_X86_ABI
/* Restore the stack level */
SetStackLevel(saveStackLvl2);
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
// Notes:
// The x86 profile leave/tailcall helper has the following requirements (see ProfileLeaveNaked and
// ProfileTailcallNaked in VM\i386\asmhelpers.asm for details):
// 1. The calling sequence for calling the helper is:
// push FunctionIDOrClientID
// call ProfileLeaveHelper or ProfileTailcallHelper
// 2. The calling function has an EBP frame.
// 3. EBP points to the saved ESP which is the first thing saved in the function. Thus,
// the following prolog is assumed:
// push ESP
// mov EBP, ESP
// 4. helper == CORINFO_HELP_PROF_FCN_LEAVE: All registers are preserved.
// helper == CORINFO_HELP_PROF_FCN_TAILCALL: Only argument registers are preserved.
// 5. The helper pops the FunctionIDOrClientID argument from the stack.
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
// Need to save on to the stack level, since the helper call will pop the argument
unsigned saveStackLvl2 = genStackLevel;
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
AddStackLevel(0xC);
AddNestedAlignment(0xC);
#endif // UNIX_X86_ABI
//
// Push the profilerHandle
//
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
inst_IV(INS_push, (size_t)compiler->compProfilerMethHnd);
}
genSinglePush();
#if defined(UNIX_X86_ABI)
int argSize = -REGSIZE_BYTES; // negative means caller-pop (cdecl)
#else
int argSize = REGSIZE_BYTES;
#endif
genEmitHelperCall(helper, argSize, EA_UNKNOWN /* retSize */);
// Check that we have place for the push.
assert(compiler->fgGetPtrArgCntMax() >= 1);
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
SubtractStackLevel(0x10);
SubtractNestedAlignment(0xC);
#endif // UNIX_X86_ABI
/* Restore the stack level */
SetStackLevel(saveStackLvl2);
}
#endif // TARGET_X86
#ifdef TARGET_AMD64
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
#if !defined(UNIX_AMD64_ABI)
unsigned varNum;
LclVarDsc* varDsc;
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
noway_assert(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES));
// Home all arguments passed in arg registers (RCX, RDX, R8 and R9).
// In case of vararg methods, arg regs are already homed.
//
// Note: Here we don't need to worry about updating gc'info since enter
// callback is generated as part of prolog which is non-gc interruptible.
// Moreover GC cannot kick while executing inside profiler callback which is a
// profiler requirement so it can examine arguments which could be obj refs.
if (!compiler->info.compIsVarArgs)
{
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (!varDsc->lvIsRegArg)
{
continue;
}
var_types storeType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg();
instruction store_ins = ins_Store(storeType);
#ifdef FEATURE_SIMD
if ((storeType == TYP_SIMD8) && genIsValidIntReg(argReg))
{
store_ins = INS_mov;
}
#endif // FEATURE_SIMD
GetEmitter()->emitIns_S_R(store_ins, emitTypeSize(storeType), argReg, varNum, 0);
}
}
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
// RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_8BYTE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RDX = caller's SP
// Notes
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
// This will emit either
// "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN);
// TODO-AMD64-CQ: Rather than reloading, see if this could be optimized by combining with prolog
// generation logic that moves args around as required by first BB entry point conditions
// computed by LSRA. Code pointers for investigating this further: genFnPrologCalleeRegArgs()
// and genEnregisterIncomingStackArgs().
//
// Now reload arg registers from home locations.
// Vararg methods:
// - we need to reload only known (i.e. fixed) reg args.
// - if floating point type, also reload it into corresponding integer reg
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (!varDsc->lvIsRegArg)
{
continue;
}
var_types loadType = varDsc->GetRegisterType();
regNumber argReg = varDsc->GetArgReg();
instruction load_ins = ins_Load(loadType);
#ifdef FEATURE_SIMD
if ((loadType == TYP_SIMD8) && genIsValidIntReg(argReg))
{
load_ins = INS_mov;
}
#endif // FEATURE_SIMD
GetEmitter()->emitIns_R_S(load_ins, emitTypeSize(loadType), argReg, varNum, 0);
if (compFeatureVarArg() && compiler->info.compIsVarArgs && varTypeIsFloating(loadType))
{
regNumber intArgReg = compiler->getCallArgIntRegister(argReg);
inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType));
}
}
// If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using.
if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0)
{
*pInitRegZeroed = false;
}
#else // !defined(UNIX_AMD64_ABI)
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
// R14 = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_0,
(ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_PROFILER_ENTER_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// R15 = caller's SP
// Notes
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_1, genFramePointerReg(), -callerSPOffset);
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
// We use R11 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r11, helper addr; call r11"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET);
// If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using.
if ((RBM_CALLEE_TRASH & genRegMask(initReg)) != 0)
{
*pInitRegZeroed = false;
}
#endif // !defined(UNIX_AMD64_ABI)
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
#if !defined(UNIX_AMD64_ABI)
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
noway_assert(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES));
// If thisPtr needs to be kept alive and reported, it cannot be one of the callee trash
// registers that profiler callback kills.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaGetDesc(compiler->info.compThisArg)->lvIsInReg())
{
regMaskTP thisPtrMask = genRegMask(compiler->lvaGetDesc(compiler->info.compThisArg)->GetRegNum());
noway_assert((RBM_PROFILER_LEAVE_TRASH & thisPtrMask) == 0);
}
// At this point return value is computed and stored in RAX or XMM0.
// On Amd64, Leave callback preserves the return register. We keep
// RAX alive by not reporting as trashed by helper call. Also note
// that GC cannot kick-in while executing inside profiler callback,
// which is a requirement of profiler as well since it needs to examine
// return value which could be an obj ref.
// RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of an address.
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RDX = caller's SP
// TODO-AMD64-Cleanup: Once we start doing codegen after final frame layout, retain the "if" portion
// of the stmnts to execute unconditionally and clean-up rest.
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
// Caller's SP relative offset to FramePointer will be negative. We need to add absolute
// value of that offset to FramePointer to obtain caller's SP value.
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
// If we are here means that it is a tentative frame layout during which we
// cannot use caller's SP offset since it is an estimate. For now we require the
// method to have at least a single arg so that we can use it to obtain caller's
// SP.
LclVarDsc* varDsc = compiler->lvaGetDesc(0U);
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RCX, RDX) for call target.
// We use R8 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r8, helper addr; call r8"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_ARG_2);
#else // !defined(UNIX_AMD64_ABI)
// RDI = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
// RSI = caller's SP
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
LclVarDsc* varDsc = compiler->lvaGetDesc(0U);
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
// We use R11 here. This will emit either
// "call ip-relative 32-bit offset" or
// "mov r11, helper addr; call r11"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_DEFAULT_PROFILER_CALL_TARGET);
#endif // !defined(UNIX_AMD64_ABI)
}
#endif // TARGET_AMD64
#endif // PROFILING_SUPPORTED
#ifdef TARGET_AMD64
//------------------------------------------------------------------------
// genOSRRecordTier0CalleeSavedRegistersAndFrame: for OSR methods, record the
// subset of callee saves already saved by the Tier0 method, and the frame
// created by Tier0.
//
void CodeGen::genOSRRecordTier0CalleeSavedRegistersAndFrame()
{
assert(compiler->compGeneratingProlog);
assert(compiler->opts.IsOSR());
assert(compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// Figure out which set of int callee saves was already saved by Tier0.
// Emit appropriate unwind.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
int const tier0IntCalleeSaveUsedSize = genCountBits(tier0IntCalleeSaves) * REGSIZE_BYTES;
JITDUMP("--OSR--- tier0 has already saved ");
JITDUMPEXEC(dspRegMask(tier0IntCalleeSaves));
JITDUMP("\n");
// We must account for the Tier0 callee saves.
//
// These have already happened at method entry; all these
// unwind records should be at offset 0.
//
// RBP is always aved by Tier0 and always pushed first.
//
assert((tier0IntCalleeSaves & RBM_FPBASE) == RBM_FPBASE);
compiler->unwindPush(REG_RBP);
tier0IntCalleeSaves &= ~RBM_FPBASE;
// Now the rest of the Tier0 callee saves.
//
for (regNumber reg = REG_INT_LAST; tier0IntCalleeSaves != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & tier0IntCalleeSaves) != 0)
{
compiler->unwindPush(reg);
}
tier0IntCalleeSaves &= ~regBit;
}
// We must account for the post-callee-saves push SP movement
// done by the Tier0 frame and by the OSR transition.
//
// tier0FrameSize is the Tier0 FP-SP delta plus the fake call slot added by
// JIT_Patchpoint. We add one slot to account for the saved FP.
//
// We then need to subtract off the size the Tier0 callee saves as SP
// adjusts for those will have been modelled by the unwind pushes above.
//
int const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES;
int const tier0NetSize = tier0FrameSize - tier0IntCalleeSaveUsedSize;
compiler->unwindAllocStack(tier0NetSize);
}
//------------------------------------------------------------------------
// genOSRSaveRemainingCalleeSavedRegisters: save any callee save registers
// that Tier0 didn't save.
//
// Notes:
// This must be invoked after SP has been adjusted to allocate the local
// frame, because of how the UnwindSave records are interpreted.
//
// We rely on the fact that other "local frame" allocation actions (like
// stack probing) will not trash callee saves registers.
//
void CodeGen::genOSRSaveRemainingCalleeSavedRegisters()
{
// We should be generating the prolog of an OSR root frame.
//
assert(compiler->compGeneratingProlog);
assert(compiler->opts.IsOSR());
assert(compiler->funCurrentFunc()->funKind == FuncKind::FUNC_ROOT);
// x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack
// here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not
// here.
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// Figure out which set of int callee saves still needs saving.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
unsigned const tier0IntCalleeSaveUsedSize = genCountBits(tier0IntCalleeSaves) * REGSIZE_BYTES;
regMaskTP const osrIntCalleeSaves = rsPushRegs & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP osrAdditionalIntCalleeSaves = osrIntCalleeSaves & ~tier0IntCalleeSaves;
JITDUMP("---OSR--- int callee saves are ");
JITDUMPEXEC(dspRegMask(osrIntCalleeSaves));
JITDUMP("; tier0 already saved ");
JITDUMPEXEC(dspRegMask(tier0IntCalleeSaves));
JITDUMP("; so only saving ");
JITDUMPEXEC(dspRegMask(osrAdditionalIntCalleeSaves));
JITDUMP("\n");
// These remaining callee saves will be stored in the Tier0 callee save area
// below any saves already done by Tier0. Compute the offset.
//
// The OSR method doesn't actually use its callee save area.
//
int const osrFrameSize = compiler->compLclFrameSize;
int const tier0FrameSize = patchpointInfo->TotalFrameSize();
int const osrCalleeSaveSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
int const osrFramePointerSize = isFramePointerUsed() ? REGSIZE_BYTES : 0;
int offset = osrFrameSize + osrCalleeSaveSize + osrFramePointerSize + tier0FrameSize - tier0IntCalleeSaveUsedSize;
// The tier0 frame is always an RBP frame, so the OSR method should never need to save RBP.
//
assert((tier0CalleeSaves & RBM_FPBASE) == RBM_FPBASE);
assert((osrAdditionalIntCalleeSaves & RBM_FPBASE) == RBM_NONE);
// The OSR method must use MOVs to save additional callee saves.
//
for (regNumber reg = REG_INT_LAST; osrAdditionalIntCalleeSaves != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & osrAdditionalIntCalleeSaves) != 0)
{
GetEmitter()->emitIns_AR_R(INS_mov, EA_8BYTE, reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
offset -= REGSIZE_BYTES;
}
osrAdditionalIntCalleeSaves &= ~regBit;
}
}
#endif // TARGET_AMD64
//------------------------------------------------------------------------
// genPushCalleeSavedRegisters: Push any callee-saved registers we have used.
//
void CodeGen::genPushCalleeSavedRegisters()
{
assert(compiler->compGeneratingProlog);
#if DEBUG
// OSR root frames must handle this differently. See
// genOSRRecordTier0CalleeSavedRegisters()
// genOSRSaveRemainingCalleeSavedRegisters()
//
if (compiler->opts.IsOSR())
{
assert(compiler->funCurrentFunc()->funKind != FuncKind::FUNC_ROOT);
}
#endif
// x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack
// here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not
// here.
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// On X86/X64 we have already pushed the FP (frame-pointer) prior to calling this method
if (isFramePointerUsed())
{
rsPushRegs &= ~RBM_FPBASE;
}
#ifdef DEBUG
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
}
#endif // DEBUG
// Push backwards so we match the order we will pop them in the epilog
// and all the other code that expects it to be in this order.
for (regNumber reg = REG_INT_LAST; rsPushRegs != RBM_NONE; reg = REG_PREV(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & rsPushRegs) != 0)
{
inst_RV(INS_push, reg, TYP_REF);
compiler->unwindPush(reg);
#ifdef USING_SCOPE_INFO
if (!doubleAlignOrFramePointerUsed())
{
psiAdjustStackLevel(REGSIZE_BYTES);
}
#endif // USING_SCOPE_INFO
rsPushRegs &= ~regBit;
}
}
}
void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
#ifdef TARGET_AMD64
const bool isFunclet = compiler->funCurrentFunc()->funKind != FuncKind::FUNC_ROOT;
const bool doesSupersetOfNormalPops = compiler->opts.IsOSR() && !isFunclet;
// OSR methods must restore all registers saved by either the OSR or
// the Tier0 method. First restore any callee save not saved by
// Tier0, then the callee saves done by Tier0.
//
// OSR funclets do normal restores.
//
if (doesSupersetOfNormalPops)
{
regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP tier0CalleeSaves =
((regMaskTP)compiler->info.compPatchpointInfo->CalleeSaveRegisters()) & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP additionalCalleeSaves = rsPopRegs & ~tier0CalleeSaves;
// Registers saved by the OSR prolog.
//
genPopCalleeSavedRegistersFromMask(additionalCalleeSaves);
// Registers saved by the Tier0 prolog.
// Tier0 frame pointer will be restored separately.
//
genPopCalleeSavedRegistersFromMask(tier0CalleeSaves & ~RBM_FPBASE);
return;
}
#endif // TARGET_AMD64
// Registers saved by a normal prolog
//
regMaskTP rsPopRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED;
const unsigned popCount = genPopCalleeSavedRegistersFromMask(rsPopRegs);
noway_assert(compiler->compCalleeRegsPushed == popCount);
}
//------------------------------------------------------------------------
// genPopCalleeSavedRegistersFromMask: pop specified set of callee saves
// in the "standard" order
//
unsigned CodeGen::genPopCalleeSavedRegistersFromMask(regMaskTP rsPopRegs)
{
unsigned popCount = 0;
if ((rsPopRegs & RBM_EBX) != 0)
{
popCount++;
inst_RV(INS_pop, REG_EBX, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_FPBASE) != 0)
{
// EBP cannot be directly modified for EBP frame and double-aligned frames
assert(!doubleAlignOrFramePointerUsed());
popCount++;
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#ifndef UNIX_AMD64_ABI
// For System V AMD64 calling convention ESI and EDI are volatile registers.
if ((rsPopRegs & RBM_ESI) != 0)
{
popCount++;
inst_RV(INS_pop, REG_ESI, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_EDI) != 0)
{
popCount++;
inst_RV(INS_pop, REG_EDI, TYP_I_IMPL);
}
#endif // !defined(UNIX_AMD64_ABI)
#ifdef TARGET_AMD64
if ((rsPopRegs & RBM_R12) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R12, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R13) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R13, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R14) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R14, TYP_I_IMPL);
}
if ((rsPopRegs & RBM_R15) != 0)
{
popCount++;
inst_RV(INS_pop, REG_R15, TYP_I_IMPL);
}
#endif // TARGET_AMD64
// Amd64/x86 doesn't support push/pop of xmm registers.
// These will get saved to stack separately after allocating
// space on stack in prolog sequence. PopCount is essentially
// tracking the count of integer registers pushed.
return popCount;
}
/*****************************************************************************
*
* Generates code for a function epilog.
*
* Please consult the "debugger team notification" comment in genFnProlog().
*/
void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFnEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
noway_assert(!compiler->opts.MinOpts() || isFramePointerUsed()); // FPO not allowed with minOpts
#ifdef DEBUG
genInterruptibleUsed = true;
#endif
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
#ifdef DEBUG
if (compiler->opts.dspCode)
{
printf("\n__epilog:\n");
}
if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif
// Restore float registers that were saved to stack before SP is modified.
genRestoreCalleeSavedFltRegs(compiler->compLclFrameSize);
#ifdef JIT32_GCENCODER
// When using the JIT32 GC encoder, we do not start the OS-reported portion of the epilog until after
// the above call to `genRestoreCalleeSavedFltRegs` because that function
// a) does not actually restore any registers: there are none when targeting the Windows x86 ABI,
// which is the only target that uses the JIT32 GC encoder
// b) may issue a `vzeroupper` instruction to eliminate AVX -> SSE transition penalties.
// Because the `vzeroupper` instruction is not recognized by the VM's unwinder and there are no
// callee-save FP restores that the unwinder would need to see, we can avoid the need to change the
// unwinder (and break binary compat with older versions of the runtime) by starting the epilog
// after any `vzeroupper` instruction has been emitted. If either of the above conditions changes,
// we will need to rethink this.
GetEmitter()->emitStartEpilog();
#endif
/* Compute the size in bytes we've pushed/popped */
bool removeEbpFrame = doubleAlignOrFramePointerUsed();
#ifdef TARGET_AMD64
// We only remove the EBP frame using the frame pointer (using `lea rsp, [rbp + const]`)
// if we reported the frame pointer in the prolog. The Windows x64 unwinding ABI specifically
// disallows this `lea` form:
//
// See https://docs.microsoft.com/en-us/cpp/build/prolog-and-epilog?view=msvc-160#epilog-code
//
// "When a frame pointer is not used, the epilog must use add RSP,constant to deallocate the fixed part of the
// stack. It may not use lea RSP,constant[RSP] instead. This restriction exists so the unwind code has fewer
// patterns to recognize when searching for epilogs."
//
// Otherwise, we must use `add RSP, constant`, as stated. So, we need to use the same condition
// as genFnProlog() used in determining whether to report the frame pointer in the unwind data.
// This is a subset of the `doubleAlignOrFramePointerUsed()` cases.
//
if (removeEbpFrame)
{
const bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
removeEbpFrame = removeEbpFrame && reportUnwindData;
}
#endif // TARGET_AMD64
if (!removeEbpFrame)
{
// We have an ESP frame */
noway_assert(compiler->compLocallocUsed == false); // Only used with frame-pointer
/* Get rid of our local variables */
unsigned int frameSize = compiler->compLclFrameSize;
#ifdef TARGET_AMD64
// OSR must remove the entire OSR frame and the Tier0 frame down to the bottom
// of the used part of the Tier0 callee save area.
//
if (compiler->opts.IsOSR())
{
// The patchpoint TotalFrameSize is SP-FP delta (plus "call" slot added by JIT_Patchpoint)
// so does not account for the Tier0 push of FP, so we add in an extra stack slot to get the
// offset to the top of the Tier0 callee saves area.
//
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
regMaskTP const tier0CalleeSaves = (regMaskTP)patchpointInfo->CalleeSaveRegisters();
regMaskTP const tier0IntCalleeSaves = tier0CalleeSaves & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP const osrIntCalleeSaves = regSet.rsGetModifiedRegsMask() & RBM_OSR_INT_CALLEE_SAVED;
regMaskTP const allIntCalleeSaves = osrIntCalleeSaves | tier0IntCalleeSaves;
unsigned const tier0FrameSize = patchpointInfo->TotalFrameSize() + REGSIZE_BYTES;
unsigned const tier0IntCalleeSaveUsedSize = genCountBits(allIntCalleeSaves) * REGSIZE_BYTES;
unsigned const osrCalleeSaveSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
unsigned const osrFramePointerSize = isFramePointerUsed() ? REGSIZE_BYTES : 0;
unsigned const osrAdjust =
tier0FrameSize - tier0IntCalleeSaveUsedSize + osrCalleeSaveSize + osrFramePointerSize;
JITDUMP("OSR epilog adjust factors: tier0 frame %u, tier0 callee saves -%u, osr callee saves %u, osr "
"framePointer %u\n",
tier0FrameSize, tier0IntCalleeSaveUsedSize, osrCalleeSaveSize, osrFramePointerSize);
JITDUMP(" OSR frame size %u; net osr adjust %u, result %u\n", frameSize, osrAdjust,
frameSize + osrAdjust);
frameSize += osrAdjust;
}
#endif // TARGET_AMD64
if (frameSize > 0)
{
#ifdef TARGET_X86
/* Add 'compiler->compLclFrameSize' to ESP */
/* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */
if ((frameSize == TARGET_POINTER_SIZE) && !compiler->compJmpOpUsed)
{
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
else
#endif // TARGET_X86
{
/* Add 'compiler->compLclFrameSize' to ESP */
/* Generate "add esp, <stack-size>" */
inst_RV_IV(INS_add, REG_SPBASE, frameSize, EA_PTRSIZE);
}
}
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
// In the case where we have an RSP frame, and no frame pointer reported in the OS unwind info,
// but we do have a pushed frame pointer and established frame chain, we do need to pop RBP.
//
// OSR methods must always pop RBP (pushed by Tier0 frame)
if (doubleAlignOrFramePointerUsed() || compiler->opts.IsOSR())
{
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#endif // TARGET_AMD64
}
else
{
noway_assert(doubleAlignOrFramePointerUsed());
// We don't support OSR for methods that must report an FP in unwind.
//
assert(!compiler->opts.IsOSR());
/* Tear down the stack frame */
bool needMovEspEbp = false;
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
{
//
// add esp, compLclFrameSize
//
// We need not do anything (except the "mov esp, ebp") if
// compiler->compCalleeRegsPushed==0. However, this is unlikely, and it
// also complicates the code manager. Hence, we ignore that case.
noway_assert(compiler->compLclFrameSize != 0);
inst_RV_IV(INS_add, REG_SPBASE, compiler->compLclFrameSize, EA_PTRSIZE);
needMovEspEbp = true;
}
else
#endif // DOUBLE_ALIGN
{
bool needLea = false;
if (compiler->compLocallocUsed)
{
// OSR not yet ready for localloc
assert(!compiler->opts.IsOSR());
// ESP may be variable if a localloc was actually executed. Reset it.
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
needLea = true;
}
else if (!regSet.rsRegsModified(RBM_CALLEE_SAVED))
{
if (compiler->compLclFrameSize != 0)
{
#ifdef TARGET_AMD64
// AMD64 can't use "mov esp, ebp", according to the ABI specification describing epilogs. So,
// do an LEA to "pop off" the frame allocation.
needLea = true;
#else // !TARGET_AMD64
// We will just generate "mov esp, ebp" and be done with it.
needMovEspEbp = true;
#endif // !TARGET_AMD64
}
}
else if (compiler->compLclFrameSize == 0)
{
// do nothing before popping the callee-saved registers
}
#ifdef TARGET_X86
else if (compiler->compLclFrameSize == REGSIZE_BYTES)
{
// "pop ecx" will make ESP point to the callee-saved registers
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
#endif // TARGET_X86
else
{
// We need to make ESP point to the callee-saved registers
needLea = true;
}
if (needLea)
{
int offset;
#ifdef TARGET_AMD64
// lea esp, [ebp + compiler->compLclFrameSize - genSPtoFPdelta]
//
// Case 1: localloc not used.
// genSPToFPDelta = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize
// offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
// The amount to be subtracted from RBP to point at callee saved int regs.
//
// Case 2: localloc used
// genSPToFPDelta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize)
// Offset = Amount to be added to RBP to point at callee saved int regs.
offset = genSPtoFPdelta() - compiler->compLclFrameSize;
// Offset should fit within a byte if localloc is not used.
if (!compiler->compLocallocUsed)
{
noway_assert(offset < UCHAR_MAX);
}
#else
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
noway_assert(offset < UCHAR_MAX); // the offset fits in a byte
#endif
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -offset);
}
}
//
// Pop the callee-saved registers (if any)
//
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
// Extra OSR adjust to get to where RBP was saved by the tier0 frame.
//
// Note the other callee saves made in that frame are dead, the current method
// will save and restore what it needs.
if (compiler->opts.IsOSR())
{
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
const int tier0FrameSize = patchpointInfo->TotalFrameSize();
// Use add since we know the SP-to-FP delta of the original method.
// We also need to skip over the slot where we pushed RBP.
//
// If we ever allow the original method to have localloc this will
// need to change.
inst_RV_IV(INS_add, REG_SPBASE, tier0FrameSize + TARGET_POINTER_SIZE, EA_PTRSIZE);
}
assert(!needMovEspEbp); // "mov esp, ebp" is not allowed in AMD64 epilogs
#else // !TARGET_AMD64
if (needMovEspEbp)
{
// mov esp, ebp
inst_Mov(TYP_I_IMPL, REG_SPBASE, REG_FPBASE, /* canSkip */ false);
}
#endif // !TARGET_AMD64
// pop ebp
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
GetEmitter()->emitStartExitSeq(); // Mark the start of the "return" sequence
/* Check if this a special return block i.e.
* CEE_JMP instruction */
if (jmpEpilog)
{
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->GetFirstLIRNode());
// figure out what jump we have
GenTree* jmpNode = block->lastNode();
#if !FEATURE_FASTTAILCALL
// x86
noway_assert(jmpNode->gtOper == GT_JMP);
#else
// amd64
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
noway_assert((jmpNode->gtOper == GT_JMP) ||
((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
#endif
{
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
CORINFO_METHOD_HANDLE methHnd = (CORINFO_METHOD_HANDLE)jmpNode->AsVal()->gtVal1;
CORINFO_CONST_LOOKUP addrInfo;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
if (addrInfo.accessType != IAT_VALUE && addrInfo.accessType != IAT_PVALUE)
{
NO_WAY("Unsupported JMP indirection");
}
// If we have IAT_PVALUE we might need to jump via register indirect, as sometimes the
// indirection cell can't be reached by the jump.
emitter::EmitCallType callType;
void* addr;
regNumber indCallReg;
if (addrInfo.accessType == IAT_PVALUE)
{
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)addrInfo.addr))
{
// 32 bit displacement will work
callType = emitter::EC_FUNC_TOKEN_INDIR;
addr = addrInfo.addr;
indCallReg = REG_NA;
}
else
{
// 32 bit displacement won't work
callType = emitter::EC_INDIR_ARD;
indCallReg = REG_RAX;
addr = nullptr;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
regSet.verifyRegUsed(indCallReg);
}
}
else
{
callType = emitter::EC_FUNC_TOKEN;
addr = addrInfo.addr;
indCallReg = REG_NA;
}
// clang-format off
GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
0, // argSize
EA_UNKNOWN // retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN), // secondRetSize
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
indCallReg, REG_NA, 0, 0, /* ireg, xreg, xmul, disp */
true /* isJump */
);
// clang-format on
}
#if FEATURE_FASTTAILCALL
else
{
genCallInstruction(jmpNode->AsCall());
}
#endif // FEATURE_FASTTAILCALL
}
else
{
unsigned stkArgSize = 0; // Zero on all platforms except x86
#if defined(TARGET_X86)
bool fCalleePop = true;
// varargs has caller pop
if (compiler->info.compIsVarArgs)
fCalleePop = false;
if (IsCallerPop(compiler->info.compCallConv))
fCalleePop = false;
if (fCalleePop)
{
noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand
}
#ifdef UNIX_X86_ABI
// The called function must remove hidden address argument from the stack before returning
// in case of struct returning according to cdecl calling convention on linux.
// Details: http://www.sco.com/developers/devspecs/abi386-4.pdf pages 40-43
if (compiler->info.compCallConv == CorInfoCallConvExtension::C && compiler->info.compRetBuffArg != BAD_VAR_NUM)
stkArgSize += TARGET_POINTER_SIZE;
#endif // UNIX_X86_ABI
#endif // TARGET_X86
/* Return, popping our arguments (if any) */
instGen_Return(stkArgSize);
}
}
#if defined(FEATURE_EH_FUNCLETS)
#if defined(TARGET_AMD64)
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
* Funclets have the following incoming arguments:
*
* catch/filter-handler: rcx = InitialSP, rdx = the exception object that was caught (see GT_CATCH_ARG)
* filter: rcx = InitialSP, rdx = the exception object to filter (see GT_CATCH_ARG)
* finally/fault: rcx = InitialSP
*
* Funclets set the following registers on exit:
*
* catch/filter-handler: rax = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: rax = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* The AMD64 funclet prolog sequence is:
*
* push ebp
* push callee-saved regs
* ; TODO-AMD64-CQ: We probably only need to save any callee-save registers that we actually use
* ; in the funclet. Currently, we save the same set of callee-saved regs calculated for
* ; the entire function.
* sub sp, XXX ; Establish the rest of the frame.
* ; XXX is determined by lvaOutgoingArgSpaceSize plus space for the PSP slot, aligned
* ; up to preserve stack alignment. If we push an odd number of registers, we also
* ; generate this, to keep the stack aligned.
*
* ; Fill the PSP slot, for use by the VM (it gets reported with the GC info), or by code generation of nested
* ; filters.
* ; This is not part of the "OS prolog"; it has no associated unwind data, and is not reversed in the funclet
* ; epilog.
* ; Also, re-establish the frame pointer from the PSP.
*
* mov rbp, [rcx + PSP_slot_InitialSP_offset] ; Load the PSP (InitialSP of the main function stored in the
* ; PSP of the dynamically containing funclet or function)
* mov [rsp + PSP_slot_InitialSP_offset], rbp ; store the PSP in our frame
* lea ebp, [rbp + Function_InitialSP_to_FP_delta] ; re-establish the frame pointer of the parent frame. If
* ; Function_InitialSP_to_FP_delta==0, we don't need this
* ; instruction.
*
* The epilog sequence is then:
*
* add rsp, XXX
* pop callee-saved regs ; if necessary
* pop rbp
* ret
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming |
* | arguments |
* +=======================+ <---- Caller's SP
* | Return address |
* |-----------------------|
* | Saved EBP |
* |-----------------------|
* |Callee saved registers |
* |-----------------------|
* ~ possible 8 byte pad ~
* ~ for alignment ~
* |-----------------------|
* | PSP slot | // Omitted in CoreRT ABI
* |-----------------------|
* | Outgoing arg space | // this only exists if the function makes a call
* |-----------------------| <---- Initial SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* TODO-AMD64-Bug?: the frame pointer should really point to the PSP slot (the debugger seems to assume this
* in DacDbiInterfaceImpl::InitParentFrameInfo()), or someplace above Initial-SP. There is an AMD64
* UNWIND_INFO restriction that it must be within 240 bytes of Initial-SP. See jit64\amd64\inc\md.h
* "FRAMEPTR OFFSETS" for details.
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletProlog()\n");
}
#endif
assert(!regSet.rsRegsModified(RBM_FPBASE));
assert(block != nullptr);
assert(block->bbFlags & BBF_FUNCLET_BEG);
assert(isFramePointerUsed());
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
// We need to push ebp, since it's callee-saved.
// We need to push the callee-saved registers. We only need to push the ones that we need, but we don't
// keep track of that on a per-funclet basis, so we push the same set as in the main function.
// The only fixed-size frame we need to allocate is whatever is big enough for the PSPSym, since nothing else
// is stored here (all temps are allocated in the parent frame).
// We do need to allocate the outgoing argument space, in case there are calls here. This must be the same
// size as the parent frame's outgoing argument space, to keep the PSPSym offset the same.
inst_RV(INS_push, REG_FPBASE, TYP_REF);
compiler->unwindPush(REG_FPBASE);
// Callee saved int registers are pushed to stack.
genPushCalleeSavedRegisters();
regMaskTP maskArgRegsLiveIn;
if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
{
maskArgRegsLiveIn = RBM_ARG_0;
}
else
{
maskArgRegsLiveIn = RBM_ARG_0 | RBM_ARG_2;
}
regNumber initReg = REG_EBP; // We already saved EBP, so it can be trashed
bool initRegZeroed = false;
genAllocLclFrame(genFuncletInfo.fiSpDelta, initReg, &initRegZeroed, maskArgRegsLiveIn);
// Callee saved float registers are copied to stack in their assigned stack slots
// after allocating space for them as part of funclet frame.
genPreserveCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// If there is no PSPSym (CoreRT ABI), we are done.
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
GetEmitter()->emitIns_R_AR(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_ARG_0, genFuncletInfo.fiPSP_slot_InitialSP_offset);
regSet.verifyRegUsed(REG_FPBASE);
GetEmitter()->emitIns_AR_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, genFuncletInfo.fiPSP_slot_InitialSP_offset);
if (genFuncletInfo.fiFunction_InitialSP_to_FP_delta != 0)
{
GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_FPBASE,
genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
}
// We've modified EBP, but not really. Say that we haven't...
regSet.rsRemoveRegsModified(RBM_FPBASE);
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*
* Note that we don't do anything with unwind codes, because AMD64 only cares about unwind codes for the prolog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Restore callee saved XMM regs from their stack slots before modifying SP
// to position at callee saved int regs.
genRestoreCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
inst_RV_IV(INS_add, REG_SPBASE, genFuncletInfo.fiSpDelta, EA_PTRSIZE);
genPopCalleeSavedRegisters();
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
instGen_Return(0);
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
{
return;
}
// Note that compLclFrameSize can't be used (for can we call functions that depend on it),
// because we're not going to allocate the same size frame as the parent.
assert(isFramePointerUsed());
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be
// finalized
assert(compiler->compCalleeFPRegsSavedMask != (regMaskTP)-1); // The float registers to be preserved is finalized
// Even though lvaToInitialSPRelativeOffset() depends on compLclFrameSize,
// that's ok, because we're figuring out an offset in the parent frame.
genFuncletInfo.fiFunction_InitialSP_to_FP_delta =
compiler->lvaToInitialSPRelativeOffset(0, true); // trick to find the Initial-SP-relative offset of the frame
// pointer.
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
#ifndef UNIX_AMD64_ABI
// No 4 slots for outgoing params on the stack for System V systems.
assert((compiler->lvaOutgoingArgSpaceSize == 0) ||
(compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES))); // On AMD64, we always have 4 outgoing argument
// slots if there are any calls in the function.
#endif // UNIX_AMD64_ABI
unsigned offset = compiler->lvaOutgoingArgSpaceSize;
genFuncletInfo.fiPSP_slot_InitialSP_offset = offset;
// How much stack do we allocate in the funclet?
// We need to 16-byte align the stack.
unsigned totalFrameSize =
REGSIZE_BYTES // return address
+ REGSIZE_BYTES // pushed EBP
+ (compiler->compCalleeRegsPushed * REGSIZE_BYTES); // pushed callee-saved int regs, not including EBP
// Entire 128-bits of XMM register is saved to stack due to ABI encoding requirement.
// Copying entire XMM register to/from memory will be performant if SP is aligned at XMM_REGSIZE_BYTES boundary.
unsigned calleeFPRegsSavedSize = genCountBits(compiler->compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES;
unsigned FPRegsPad = (calleeFPRegsSavedSize > 0) ? AlignmentPad(totalFrameSize, XMM_REGSIZE_BYTES) : 0;
unsigned PSPSymSize = (compiler->lvaPSPSym != BAD_VAR_NUM) ? REGSIZE_BYTES : 0;
totalFrameSize += FPRegsPad // Padding before pushing entire xmm regs
+ calleeFPRegsSavedSize // pushed callee-saved float regs
// below calculated 'pad' will go here
+ PSPSymSize // PSPSym
+ compiler->lvaOutgoingArgSpaceSize // outgoing arg space
;
unsigned pad = AlignmentPad(totalFrameSize, 16);
genFuncletInfo.fiSpDelta = FPRegsPad // Padding to align SP on XMM_REGSIZE_BYTES boundary
+ calleeFPRegsSavedSize // Callee saved xmm regs
+ pad + PSPSymSize // PSPSym
+ compiler->lvaOutgoingArgSpaceSize // outgoing arg space
;
#ifdef DEBUG
if (verbose)
{
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Function InitialSP-to-FP delta: %d\n", genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
printf(" SP delta: %d\n", genFuncletInfo.fiSpDelta);
printf(" PSP slot Initial SP offset: %d\n", genFuncletInfo.fiPSP_slot_InitialSP_offset);
}
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
assert(genFuncletInfo.fiPSP_slot_InitialSP_offset ==
compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and
// funclet!
}
#endif // DEBUG
}
#elif defined(TARGET_X86)
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
*
* Funclets have the following incoming arguments:
*
* catch/filter-handler: eax = the exception object that was caught (see GT_CATCH_ARG)
* filter: eax = the exception object that was caught (see GT_CATCH_ARG)
* finally/fault: none
*
* Funclets set the following registers on exit:
*
* catch/filter-handler: eax = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: eax = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* Funclet prolog/epilog sequence and funclet frame layout are TBD.
*
*/
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletProlog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// TODO We may need EBP restore sequence here if we introduce PSPSym
// Add a padding for 16-byte alignment
inst_RV_IV(INS_sub, REG_SPBASE, 12, EA_PTRSIZE);
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In genFuncletEpilog()\n");
}
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
// Revert a padding that was added for 16-byte alignment
inst_RV_IV(INS_add, REG_SPBASE, 12, EA_PTRSIZE);
instGen_Return(0);
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
{
return;
}
}
#endif // TARGET_X86
void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
#if defined(TARGET_AMD64)
// The PSP sym value is Initial-SP, not Caller-SP!
// We assume that RSP is Initial-SP when this function is called. That is, the stack frame
// has been established.
//
// We generate:
// mov [rbp-20h], rsp // store the Initial-SP (our current rsp) in the PSPsym
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaPSPSym, 0);
#else // TARGET*
NYI("Set function PSP sym");
#endif // TARGET*
}
#endif // FEATURE_EH_FUNCLETS
//-----------------------------------------------------------------------------
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
// `genUseBlockInit` is set.
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
assert(genUseBlockInit);
assert(untrLclHi > untrLclLo);
assert(compiler->getSIMDSupportLevel() >= SIMD_SSE2_Supported);
emitter* emit = GetEmitter();
regNumber frameReg = genFramePointerReg();
regNumber zeroReg = REG_NA;
int blkSize = untrLclHi - untrLclLo;
int minSimdSize = XMM_REGSIZE_BYTES;
assert(blkSize >= 0);
noway_assert((blkSize % sizeof(int)) == 0);
// initReg is not a live incoming argument reg
assert((genRegMask(initReg) & intRegState.rsCalleeRegArgMaskLiveIn) == 0);
#if defined(TARGET_AMD64)
// We will align on x64 so can use the aligned mov
instruction simdMov = simdAlignedMovIns();
// Aligning low we want to move up to next boundary
int alignedLclLo = (untrLclLo + (XMM_REGSIZE_BYTES - 1)) & -XMM_REGSIZE_BYTES;
if ((untrLclLo != alignedLclLo) && (blkSize < 2 * XMM_REGSIZE_BYTES))
{
// If unaligned and smaller then 2 x SIMD size we won't bother trying to align
assert((alignedLclLo - untrLclLo) < XMM_REGSIZE_BYTES);
simdMov = simdUnalignedMovIns();
}
#else // !defined(TARGET_AMD64)
// We aren't going to try and align on x86
instruction simdMov = simdUnalignedMovIns();
int alignedLclLo = untrLclLo;
#endif // !defined(TARGET_AMD64)
if (blkSize < minSimdSize)
{
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= blkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, untrLclLo + i);
}
#if defined(TARGET_AMD64)
assert((i == blkSize) || (i + (int)sizeof(int) == blkSize));
if (i != blkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, untrLclLo + i);
i += sizeof(int);
}
#endif // defined(TARGET_AMD64)
assert(i == blkSize);
}
else
{
// Grab a non-argument, non-callee saved XMM reg
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
// System V x64 first temp reg is xmm8
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM8);
#else
// Windows first temp reg is xmm4
regNumber zeroSIMDReg = genRegNumFromMask(RBM_XMM4);
#endif // UNIX_AMD64_ABI
#if defined(TARGET_AMD64)
int alignedLclHi;
int alignmentHiBlkSize;
if ((blkSize < 2 * XMM_REGSIZE_BYTES) || (untrLclLo == alignedLclLo))
{
// Either aligned or smaller then 2 x SIMD size so we won't try to align
// However, we still want to zero anything that is not in a 16 byte chunk at end
int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES;
alignmentHiBlkSize = blkSize - alignmentBlkSize;
alignedLclHi = untrLclLo + alignmentBlkSize;
alignedLclLo = untrLclLo;
blkSize = alignmentBlkSize;
assert((blkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
}
else
{
// We are going to align
// Aligning high we want to move down to previous boundary
alignedLclHi = untrLclHi & -XMM_REGSIZE_BYTES;
// Zero out the unaligned portions
alignmentHiBlkSize = untrLclHi - alignedLclHi;
int alignmentLoBlkSize = alignedLclLo - untrLclLo;
blkSize = alignedLclHi - alignedLclLo;
assert((blkSize + alignmentLoBlkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
assert(alignmentLoBlkSize > 0);
assert(alignmentLoBlkSize < XMM_REGSIZE_BYTES);
assert((alignedLclLo - alignmentLoBlkSize) == untrLclLo);
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= alignmentLoBlkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, untrLclLo + i);
}
assert((i == alignmentLoBlkSize) || (i + (int)sizeof(int) == alignmentLoBlkSize));
if (i != alignmentLoBlkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, untrLclLo + i);
i += sizeof(int);
}
assert(i == alignmentLoBlkSize);
}
#else // !defined(TARGET_AMD64)
// While we aren't aligning the start, we still want to
// zero anything that is not in a 16 byte chunk at end
int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES;
int alignmentHiBlkSize = blkSize - alignmentBlkSize;
int alignedLclHi = untrLclLo + alignmentBlkSize;
blkSize = alignmentBlkSize;
assert((blkSize + alignmentHiBlkSize) == (untrLclHi - untrLclLo));
#endif // !defined(TARGET_AMD64)
// The loop is unrolled 3 times so we do not move to the loop block until it
// will loop at least once so the threshold is 6.
if (blkSize < (6 * XMM_REGSIZE_BYTES))
{
// Generate the following code:
//
// xorps xmm4, xmm4
// movups xmmword ptr [ebp/esp-OFFS], xmm4
// ...
// movups xmmword ptr [ebp/esp-OFFS], xmm4
// mov qword ptr [ebp/esp-OFFS], rax
emit->emitIns_R_R(INS_xorps, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, zeroSIMDReg);
int i = 0;
for (; i < blkSize; i += XMM_REGSIZE_BYTES)
{
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, alignedLclLo + i);
}
assert(i == blkSize);
}
else
{
// Generate the following code:
//
// xorps xmm4, xmm4
// ;movaps xmmword ptr[ebp/esp-loOFFS], xmm4 ; alignment to 3x
// ;movaps xmmword ptr[ebp/esp-loOFFS + 10H], xmm4 ;
// mov rax, - <size> ; start offset from hi
// movaps xmmword ptr[rbp + rax + hiOFFS ], xmm4 ; <--+
// movaps xmmword ptr[rbp + rax + hiOFFS + 10H], xmm4 ; |
// movaps xmmword ptr[rbp + rax + hiOFFS + 20H], xmm4 ; | Loop
// add rax, 48 ; |
// jne SHORT -5 instr ; ---+
emit->emitIns_R_R(INS_xorps, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, zeroSIMDReg);
// How many extra don't fit into the 3x unroll
int extraSimd = (blkSize % (XMM_REGSIZE_BYTES * 3)) / XMM_REGSIZE_BYTES;
if (extraSimd != 0)
{
blkSize -= XMM_REGSIZE_BYTES;
// Not a multiple of 3 so add stores at low end of block
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, alignedLclLo);
if (extraSimd == 2)
{
blkSize -= XMM_REGSIZE_BYTES;
// one more store needed
emit->emitIns_AR_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg,
alignedLclLo + XMM_REGSIZE_BYTES);
}
}
// Exact multiple of 3 simd lengths (or loop end condition will not be met)
noway_assert((blkSize % (3 * XMM_REGSIZE_BYTES)) == 0);
// At least 3 simd lengths remain (as loop is 3x unrolled and we want it to loop at least once)
assert(blkSize >= (3 * XMM_REGSIZE_BYTES));
// In range at start of loop
assert((alignedLclHi - blkSize) >= untrLclLo);
assert(((alignedLclHi - blkSize) + (XMM_REGSIZE_BYTES * 2)) < (untrLclHi - XMM_REGSIZE_BYTES));
// In range at end of loop
assert((alignedLclHi - (3 * XMM_REGSIZE_BYTES) + (2 * XMM_REGSIZE_BYTES)) <=
(untrLclHi - XMM_REGSIZE_BYTES));
assert((alignedLclHi - (blkSize + extraSimd * XMM_REGSIZE_BYTES)) == alignedLclLo);
// Set loop counter
emit->emitIns_R_I(INS_mov, EA_PTRSIZE, initReg, -(ssize_t)blkSize);
// Loop start
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1, alignedLclHi);
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1,
alignedLclHi + XMM_REGSIZE_BYTES);
emit->emitIns_ARX_R(simdMov, EA_ATTR(XMM_REGSIZE_BYTES), zeroSIMDReg, frameReg, initReg, 1,
alignedLclHi + 2 * XMM_REGSIZE_BYTES);
emit->emitIns_R_I(INS_add, EA_PTRSIZE, initReg, XMM_REGSIZE_BYTES * 3);
// Loop until counter is 0
emit->emitIns_J(INS_jne, nullptr, -5);
// initReg will be zero at end of the loop
*pInitRegZeroed = true;
}
if (untrLclHi != alignedLclHi)
{
assert(alignmentHiBlkSize > 0);
assert(alignmentHiBlkSize < XMM_REGSIZE_BYTES);
assert((alignedLclHi + alignmentHiBlkSize) == untrLclHi);
zeroReg = genGetZeroReg(initReg, pInitRegZeroed);
int i = 0;
for (; i + REGSIZE_BYTES <= alignmentHiBlkSize; i += REGSIZE_BYTES)
{
emit->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, frameReg, alignedLclHi + i);
}
#if defined(TARGET_AMD64)
assert((i == alignmentHiBlkSize) || (i + (int)sizeof(int) == alignmentHiBlkSize));
if (i != alignmentHiBlkSize)
{
emit->emitIns_AR_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, frameReg, alignedLclHi + i);
i += sizeof(int);
}
#endif // defined(TARGET_AMD64)
assert(i == alignmentHiBlkSize);
}
}
}
// Save compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE]
// Here offset = 16-byte aligned offset after pushing integer registers.
//
// Params
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize)
{
genVzeroupperIfNeeded(false);
regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
if (regMask == RBM_NONE)
{
return;
}
#ifdef TARGET_AMD64
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
unsigned offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
// Offset is 16-byte aligned since we use movaps for preserving xmm regs.
assert((offset % 16) == 0);
instruction copyIns = ins_Copy(TYP_FLOAT);
#else // !TARGET_AMD64
unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES;
instruction copyIns = INS_movupd;
#endif // !TARGET_AMD64
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
// ABI requires us to preserve lower 128-bits of YMM register.
GetEmitter()->emitIns_AR_R(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
}
}
// Save/Restore compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE]
// Here offset = 16-byte aligned offset after pushing integer registers.
//
// Params
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
{
regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
if (regMask == RBM_NONE)
{
genVzeroupperIfNeeded();
return;
}
#ifdef TARGET_AMD64
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
instruction copyIns = ins_Copy(TYP_FLOAT);
#else // !TARGET_AMD64
unsigned firstFPRegPadding = 0;
instruction copyIns = INS_movupd;
#endif // !TARGET_AMD64
unsigned offset;
regNumber regBase;
if (compiler->compLocallocUsed)
{
// localloc frame: use frame pointer relative offset
assert(isFramePointerUsed());
regBase = REG_FPBASE;
offset = lclFrameSize - genSPtoFPdelta() - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
else
{
regBase = REG_SPBASE;
offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
#ifdef TARGET_AMD64
// Offset is 16-byte aligned since we use movaps for restoring xmm regs
assert((offset % 16) == 0);
#endif // TARGET_AMD64
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
// ABI requires us to restore lower 128-bits of YMM register.
GetEmitter()->emitIns_R_AR(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, regBase, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
}
genVzeroupperIfNeeded();
}
// Generate Vzeroupper instruction as needed to zero out upper 128b-bit of all YMM registers so that the
// AVX/Legacy SSE transition penalties can be avoided. This function is been used in genPreserveCalleeSavedFltRegs
// (prolog) and genRestoreCalleeSavedFltRegs (epilog). Issue VZEROUPPER in Prolog if the method contains
// 128-bit or 256-bit AVX code, to avoid legacy SSE to AVX transition penalty, which could happen when native
// code contains legacy SSE code calling into JIT AVX code (e.g. reverse pinvoke). Issue VZEROUPPER in Epilog
// if the method contains 256-bit AVX code, to avoid AVX to legacy SSE transition penalty.
//
// Params
// check256bitOnly - true to check if the function contains 256-bit AVX instruction and generate Vzeroupper
// instruction, false to check if the function contains AVX instruciton (either 128-bit or 256-bit).
//
void CodeGen::genVzeroupperIfNeeded(bool check256bitOnly /* = true*/)
{
bool emitVzeroUpper = false;
if (check256bitOnly)
{
emitVzeroUpper = GetEmitter()->Contains256bitAVX();
}
else
{
emitVzeroUpper = GetEmitter()->ContainsAVX();
}
if (emitVzeroUpper)
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
}
}
//-----------------------------------------------------------------------------------
// instGen_MemoryBarrier: Emit a MemoryBarrier instruction
//
// Arguments:
// barrierKind - kind of barrier to emit (Load-only is no-op on xarch)
//
// Notes:
// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1
//
void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind)
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
{
return;
}
#endif // DEBUG
// only full barrier needs to be emitted on Xarch
if (barrierKind == BARRIER_FULL)
{
instGen(INS_lock);
GetEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
}
}
#ifdef TARGET_AMD64
// Returns relocation type hint for an addr.
// Note that there are no reloc hints on x86.
//
// Arguments
// addr - data address
//
// Returns
// relocation type hint
//
unsigned short CodeGenInterface::genAddrRelocTypeHint(size_t addr)
{
return compiler->eeGetRelocTypeHint((void*)addr);
}
#endif // TARGET_AMD64
// Return true if an absolute indirect data address can be encoded as IP-relative.
// offset. Note that this method should be used only when the caller knows that
// the address is an icon value that VM has given and there is no GenTree node
// representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - an absolute indirect data address
//
// Returns
// true if indir data addr could be encoded as IP-relative offset.
//
bool CodeGenInterface::genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef TARGET_AMD64
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return false;
#endif
}
// Return true if an indirect code address can be encoded as IP-relative offset.
// Note that this method should be used only when the caller knows that the
// address is an icon value that VM has given and there is no GenTree node
// representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - an absolute indirect code address
//
// Returns
// true if indir code addr could be encoded as IP-relative offset.
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef TARGET_AMD64
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return true;
#endif
}
// Return true if an indirect code address can be encoded as 32-bit displacement
// relative to zero. Note that this method should be used only when the caller
// knows that the address is an icon value that VM has given and there is no
// GenTree node representing it. Otherwise, one should always use FitsInAddrBase().
//
// Arguments
// addr - absolute indirect code address
//
// Returns
// true if absolute indir code addr could be encoded as 32-bit displacement relative to zero.
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsZeroRelOffset(size_t addr)
{
return GenTreeIntConCommon::FitsInI32((ssize_t)addr);
}
// Return true if an absolute indirect code address needs a relocation recorded with VM.
//
// Arguments
// addr - an absolute indirect code address
//
// Returns
// true if indir code addr needs a relocation recorded with VM
//
bool CodeGenInterface::genCodeIndirAddrNeedsReloc(size_t addr)
{
// If generating relocatable ngen code, then all code addr should go through relocation
if (compiler->opts.compReloc)
{
return true;
}
#ifdef TARGET_AMD64
// See if the code indir addr can be encoded as 32-bit displacement relative to zero.
// We don't need a relocation in that case.
if (genCodeIndirAddrCanBeEncodedAsZeroRelOffset(addr))
{
return false;
}
// Else we need a relocation.
return true;
#else // TARGET_X86
// On x86 there is no need to record or ask for relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif // TARGET_X86
}
// Return true if a direct code address needs to be marked as relocatable.
//
// Arguments
// addr - absolute direct code address
//
// Returns
// true if direct code addr needs a relocation recorded with VM
//
bool CodeGenInterface::genCodeAddrNeedsReloc(size_t addr)
{
// If generating relocatable ngen code, then all code addr should go through relocation
if (compiler->opts.compReloc)
{
return true;
}
#ifdef TARGET_AMD64
// By default all direct code addresses go through relocation so that VM will setup
// a jump stub if addr cannot be encoded as pc-relative offset.
return true;
#else // TARGET_X86
// On x86 there is no need for recording relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif // TARGET_X86
}
#endif // TARGET_XARCH
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/compiler.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Compiler XX
XX XX
XX Represents the method data we are currently JIT-compiling. XX
XX An instance of this class is created for every method we JIT. XX
XX This contains all the info needed for the method. So allocating a XX
XX a new instance per method makes it thread-safe. XX
XX It should be used to do all the memory management for the compiler run. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _COMPILER_H_
#define _COMPILER_H_
/*****************************************************************************/
#include "jit.h"
#include "opcode.h"
#include "varset.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "gentree.h"
#include "debuginfo.h"
#include "lir.h"
#include "block.h"
#include "inline.h"
#include "jiteh.h"
#include "instr.h"
#include "regalloc.h"
#include "sm.h"
#include "cycletimer.h"
#include "blockset.h"
#include "arraystack.h"
#include "hashbv.h"
#include "jitexpandarray.h"
#include "tinyarray.h"
#include "valuenum.h"
#include "jittelemetry.h"
#include "namedintrinsiclist.h"
#ifdef LATE_DISASM
#include "disasm.h"
#endif
#include "codegeninterface.h"
#include "regset.h"
#include "jitgcinfo.h"
#if DUMP_GC_TABLES && defined(JIT32_GCENCODER)
#include "gcdump.h"
#endif
#include "emit.h"
#include "hwintrinsic.h"
#include "simd.h"
#include "simdashwintrinsic.h"
// This is only used locally in the JIT to indicate that
// a verification block should be inserted
#define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER
/*****************************************************************************
* Forward declarations
*/
struct InfoHdr; // defined in GCInfo.h
struct escapeMapping_t; // defined in fgdiagnostic.cpp
class emitter; // defined in emit.h
struct ShadowParamVarInfo; // defined in GSChecks.cpp
struct InitVarDscInfo; // defined in register_arg_convention.h
class FgStack; // defined in fgbasic.cpp
class Instrumentor; // defined in fgprofile.cpp
class SpanningTreeVisitor; // defined in fgprofile.cpp
class CSE_DataFlow; // defined in OptCSE.cpp
class OptBoolsDsc; // defined in optimizer.cpp
#ifdef DEBUG
struct IndentStack;
#endif
class Lowering; // defined in lower.h
// The following are defined in this file, Compiler.h
class Compiler;
/*****************************************************************************
* Unwind info
*/
#include "unwind.h"
/*****************************************************************************/
//
// Declare global operator new overloads that use the compiler's arena allocator
//
// I wanted to make the second argument optional, with default = CMK_Unknown, but that
// caused these to be ambiguous with the global placement new operators.
void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk);
void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk);
void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference);
// Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions.
#include "loopcloning.h"
/*****************************************************************************/
/* This is included here and not earlier as it needs the definition of "CSE"
* which is defined in the section above */
/*****************************************************************************/
unsigned genLog2(unsigned value);
unsigned genLog2(unsigned __int64 value);
unsigned ReinterpretHexAsDecimal(unsigned in);
/*****************************************************************************/
const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC);
#ifdef DEBUG
const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs
#endif
//------------------------------------------------------------------------
// HFA info shared by LclVarDsc and fgArgTabEntry
//------------------------------------------------------------------------
inline bool IsHfa(CorInfoHFAElemType kind)
{
return kind != CORINFO_HFA_ELEM_NONE;
}
inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind)
{
switch (kind)
{
case CORINFO_HFA_ELEM_FLOAT:
return TYP_FLOAT;
case CORINFO_HFA_ELEM_DOUBLE:
return TYP_DOUBLE;
#ifdef FEATURE_SIMD
case CORINFO_HFA_ELEM_VECTOR64:
return TYP_SIMD8;
case CORINFO_HFA_ELEM_VECTOR128:
return TYP_SIMD16;
#endif
case CORINFO_HFA_ELEM_NONE:
return TYP_UNDEF;
default:
assert(!"Invalid HfaElemKind");
return TYP_UNDEF;
}
}
inline CorInfoHFAElemType HfaElemKindFromType(var_types type)
{
switch (type)
{
case TYP_FLOAT:
return CORINFO_HFA_ELEM_FLOAT;
case TYP_DOUBLE:
return CORINFO_HFA_ELEM_DOUBLE;
#ifdef FEATURE_SIMD
case TYP_SIMD8:
return CORINFO_HFA_ELEM_VECTOR64;
case TYP_SIMD16:
return CORINFO_HFA_ELEM_VECTOR128;
#endif
case TYP_UNDEF:
return CORINFO_HFA_ELEM_NONE;
default:
assert(!"Invalid HFA Type");
return CORINFO_HFA_ELEM_NONE;
}
}
// The following holds the Local var info (scope information)
typedef const char* VarName; // Actual ASCII string
struct VarScopeDsc
{
unsigned vsdVarNum; // (remapped) LclVarDsc number
unsigned vsdLVnum; // 'which' in eeGetLVinfo().
// Also, it is the index of this entry in the info.compVarScopes array,
// which is useful since the array is also accessed via the
// compEnterScopeList and compExitScopeList sorted arrays.
IL_OFFSET vsdLifeBeg; // instr offset of beg of life
IL_OFFSET vsdLifeEnd; // instr offset of end of life
#ifdef DEBUG
VarName vsdName; // name of the var
#endif
};
// This class stores information associated with a LclVar SSA definition.
class LclSsaVarDsc
{
// The basic block where the definition occurs. Definitions of uninitialized variables
// are considered to occur at the start of the first basic block (fgFirstBB).
//
// TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by
// SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to
// investigate and perhaps eliminate this rather unexpected behavior.
BasicBlock* m_block;
// The GT_ASG node that generates the definition, or nullptr for definitions
// of uninitialized variables.
GenTreeOp* m_asg;
public:
LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr)
{
}
LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg)
{
assert((asg == nullptr) || asg->OperIs(GT_ASG));
}
BasicBlock* GetBlock() const
{
return m_block;
}
void SetBlock(BasicBlock* block)
{
m_block = block;
}
GenTreeOp* GetAssignment() const
{
return m_asg;
}
void SetAssignment(GenTreeOp* asg)
{
assert((asg == nullptr) || asg->OperIs(GT_ASG));
m_asg = asg;
}
ValueNumPair m_vnPair;
};
// This class stores information associated with a memory SSA definition.
class SsaMemDef
{
public:
ValueNumPair m_vnPair;
};
//------------------------------------------------------------------------
// SsaDefArray: A resizable array of SSA definitions.
//
// Unlike an ordinary resizable array implementation, this allows only element
// addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM
// (basically it's a 1-based array). The array doesn't impose any particular
// requirements on the elements it stores and AllocSsaNum forwards its arguments
// to the array element constructor, this way the array supports both LclSsaVarDsc
// and SsaMemDef elements.
//
template <typename T>
class SsaDefArray
{
T* m_array;
unsigned m_arraySize;
unsigned m_count;
static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0);
static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1);
// Get the minimum valid SSA number.
unsigned GetMinSsaNum() const
{
return SsaConfig::FIRST_SSA_NUM;
}
// Increase (double) the size of the array.
void GrowArray(CompAllocator alloc)
{
unsigned oldSize = m_arraySize;
unsigned newSize = max(2, oldSize * 2);
T* newArray = alloc.allocate<T>(newSize);
for (unsigned i = 0; i < oldSize; i++)
{
newArray[i] = m_array[i];
}
m_array = newArray;
m_arraySize = newSize;
}
public:
// Construct an empty SsaDefArray.
SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0)
{
}
// Reset the array (used only if the SSA form is reconstructed).
void Reset()
{
m_count = 0;
}
// Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM).
template <class... Args>
unsigned AllocSsaNum(CompAllocator alloc, Args&&... args)
{
if (m_count == m_arraySize)
{
GrowArray(alloc);
}
unsigned ssaNum = GetMinSsaNum() + m_count;
m_array[m_count++] = T(std::forward<Args>(args)...);
// Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM
assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1));
return ssaNum;
}
// Get the number of SSA definitions in the array.
unsigned GetCount() const
{
return m_count;
}
// Get a pointer to the SSA definition at the specified index.
T* GetSsaDefByIndex(unsigned index)
{
assert(index < m_count);
return &m_array[index];
}
// Check if the specified SSA number is valid.
bool IsValidSsaNum(unsigned ssaNum) const
{
return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count));
}
// Get a pointer to the SSA definition associated with the specified SSA number.
T* GetSsaDef(unsigned ssaNum)
{
assert(ssaNum != SsaConfig::RESERVED_SSA_NUM);
return GetSsaDefByIndex(ssaNum - GetMinSsaNum());
}
// Get an SSA number associated with the specified SSA def (that must be in this array).
unsigned GetSsaNum(T* ssaDef)
{
assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count]));
return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]);
}
};
enum RefCountState
{
RCS_INVALID, // not valid to get/set ref counts
RCS_EARLY, // early counts for struct promotion and struct passing
RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward)
};
#ifdef DEBUG
// Reasons why we can't enregister a local.
enum class DoNotEnregisterReason
{
None,
AddrExposed, // the address of this local is exposed.
DontEnregStructs, // struct enregistration is disabled.
NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big.
LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals.
VMNeedsStackAddr,
LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def.
BlockOp, // Is read or written via a block operation.
IsStructArg, // Is a struct passed as an argument in a way that requires a stack location.
DepField, // It is a field of a dependently promoted struct
NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set
MinOptsGC, // It is a GC Ref and we are compiling MinOpts
#if !defined(TARGET_64BIT)
LongParamField, // It is a decomposed field of a long parameter.
#endif
#ifdef JIT32_GCENCODER
PinningRef,
#endif
LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD.
CastTakesAddr,
StoreBlkSrc, // the local is used as STORE_BLK source.
OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister.
SwizzleArg, // the local is passed using LCL_FLD as another type.
BlockOpRet, // the struct is returned and it promoted or there is a cast.
ReturnSpCheck, // the local is used to do SP check
SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted
};
enum class AddressExposedReason
{
NONE,
PARENT_EXPOSED, // This is a promoted field but the parent is exposed.
TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places.
ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument.
WIDE_INDIR, // We access via indirection with wider type.
OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it.
STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed.
COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed.
DISPATCH_RET_BUF // Caller return buffer dispatch.
};
#endif // DEBUG
class LclVarDsc
{
public:
// The constructor. Most things can just be zero'ed.
//
// Initialize the ArgRegs to REG_STK.
// Morph will update if this local is passed in a register.
LclVarDsc()
: _lvArgReg(REG_STK)
,
#if FEATURE_MULTIREG_ARGS
_lvOtherArgReg(REG_STK)
,
#endif // FEATURE_MULTIREG_ARGS
lvClassHnd(NO_CLASS_HANDLE)
, lvRefBlks(BlockSetOps::UninitVal())
, lvPerSsaData()
{
}
// note this only packs because var_types is a typedef of unsigned char
var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF
unsigned char lvIsParam : 1; // is this a parameter?
unsigned char lvIsRegArg : 1; // is this an argument that was passed by register?
unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP)
unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame
unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the
// variable is in the same register for the entire function.
unsigned char lvTracked : 1; // is this a tracked variable?
bool lvTrackedNonStruct()
{
return lvTracked && lvType != TYP_STRUCT;
}
unsigned char lvPinned : 1; // is this a pinned variable?
unsigned char lvMustInit : 1; // must be initialized
private:
bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a
// global location, etc.
// We cannot reason reliably about the value of the variable.
public:
unsigned char lvDoNotEnregister : 1; // Do not enregister this variable.
unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects
// struct promotion.
unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must
// be on the stack (at least at those boundaries.)
unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder)
unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable.
unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local.
unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local
// stack frame.
unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local
unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local
unsigned char lvIsTemp : 1; // Short-lifetime compiler temp
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref.
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
unsigned char lvIsBoolean : 1; // set if variable is boolean
unsigned char lvSingleDef : 1; // variable has a single def
// before lvaMarkLocalVars: identifies ref type locals that can get type updates
// after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies
unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate
// Currently, this is only used to decide if an EH variable can be
// a register candiate or not.
unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register
// candidancy
unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan)
// and is spilled making it candidate to spill right after the
// first (and only) definition.
// Note: We cannot reuse lvSingleDefRegCandidate because it is set
// in earlier phase and the information might not be appropriate
// in LSRA.
unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization
unsigned char lvVolatileHint : 1; // hint for AssertionProp
#ifndef TARGET_64BIT
unsigned char lvStructDoubleAlign : 1; // Must we double align this struct?
#endif // !TARGET_64BIT
#ifdef TARGET_64BIT
unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long
#endif
#ifdef DEBUG
unsigned char lvKeepType : 1; // Don't change the type of this variable
unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one
#endif
unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security
// checks)
unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks?
unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a
// 32-bit target. For implicit byref parameters, this gets hijacked between
// fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether
// references to the arg are being rewritten as references to a promoted shadow local.
unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local?
unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields
unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes
unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout"
unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context
unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call
#ifdef FEATURE_HFA_FIELDS_PRESENT
CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
#endif // FEATURE_HFA_FIELDS_PRESENT
#ifdef DEBUG
// TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct
// types, and is needed because of cases where TYP_STRUCT is bashed to an integral type.
// Consider cleaning this up so this workaround is not required.
unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals.
// I.e. there is no longer any reference to the struct directly.
// In this case we can simply remove this struct local.
unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no
// reference to the fields of this struct.
#endif
unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes
#ifdef FEATURE_SIMD
// Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD*.
unsigned char lvSIMDType : 1; // This is a SIMD struct
unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic
unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)lvSimdBaseJitType;
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
assert(simdBaseJitType < (1 << 5));
lvSimdBaseJitType = (unsigned char)simdBaseJitType;
}
var_types GetSimdBaseType() const;
#endif // FEATURE_SIMD
unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct.
unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type
#ifdef DEBUG
unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness
#endif
unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc,
// eh)
unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop
unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in
// the prolog. If the local has gc pointers, there are no gc-safe points
// between the prolog and the explicit initialization.
union {
unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct
// local. For implicit byref parameters, this gets hijacked between
// fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the
// struct local created to model the parameter's struct promotion, if any.
unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local).
// Valid on promoted struct local fields.
};
unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc.
unsigned char lvFldOffset;
unsigned char lvFldOrdinal;
#ifdef DEBUG
unsigned char lvSingleDefDisqualifyReason = 'H';
#endif
#if FEATURE_MULTIREG_ARGS
regNumber lvRegNumForSlot(unsigned slotNum)
{
if (slotNum == 0)
{
return (regNumber)_lvArgReg;
}
else if (slotNum == 1)
{
return GetOtherArgReg();
}
else
{
assert(false && "Invalid slotNum!");
}
unreached();
}
#endif // FEATURE_MULTIREG_ARGS
CorInfoHFAElemType GetLvHfaElemKind() const
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
return _lvHfaElemKind;
#else
NOWAY_MSG("GetLvHfaElemKind");
return CORINFO_HFA_ELEM_NONE;
#endif // FEATURE_HFA_FIELDS_PRESENT
}
void SetLvHfaElemKind(CorInfoHFAElemType elemKind)
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
_lvHfaElemKind = elemKind;
#else
NOWAY_MSG("SetLvHfaElemKind");
#endif // FEATURE_HFA_FIELDS_PRESENT
}
bool lvIsHfa() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetLvHfaElemKind());
}
else
{
return false;
}
}
bool lvIsHfaRegArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return lvIsRegArg && lvIsHfa();
}
else
{
return false;
}
}
//------------------------------------------------------------------------------
// lvHfaSlots: Get the number of slots used by an HFA local
//
// Return Value:
// On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA
// On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8
//
unsigned lvHfaSlots() const
{
assert(lvIsHfa());
assert(varTypeIsStruct(lvType));
unsigned slots = 0;
#ifdef TARGET_ARM
slots = lvExactSize / sizeof(float);
assert(slots <= 8);
#elif defined(TARGET_ARM64)
switch (GetLvHfaElemKind())
{
case CORINFO_HFA_ELEM_NONE:
assert(!"lvHfaSlots called for non-HFA");
break;
case CORINFO_HFA_ELEM_FLOAT:
assert((lvExactSize % 4) == 0);
slots = lvExactSize >> 2;
break;
case CORINFO_HFA_ELEM_DOUBLE:
case CORINFO_HFA_ELEM_VECTOR64:
assert((lvExactSize % 8) == 0);
slots = lvExactSize >> 3;
break;
case CORINFO_HFA_ELEM_VECTOR128:
assert((lvExactSize % 16) == 0);
slots = lvExactSize >> 4;
break;
default:
unreached();
}
assert(slots <= 4);
#endif // TARGET_ARM64
return slots;
}
// lvIsMultiRegArgOrRet()
// returns true if this is a multireg LclVar struct used in an argument context
// or if this is a multireg LclVar struct assigned from a multireg call
bool lvIsMultiRegArgOrRet()
{
return lvIsMultiRegArg || lvIsMultiRegRet;
}
#if defined(DEBUG)
private:
DoNotEnregisterReason m_doNotEnregReason;
AddressExposedReason m_addrExposedReason;
public:
void SetDoNotEnregReason(DoNotEnregisterReason reason)
{
m_doNotEnregReason = reason;
}
DoNotEnregisterReason GetDoNotEnregReason() const
{
return m_doNotEnregReason;
}
AddressExposedReason GetAddrExposedReason() const
{
return m_addrExposedReason;
}
#endif // DEBUG
public:
void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason))
{
m_addrExposed = value;
INDEBUG(m_addrExposedReason = reason);
}
void CleanAddressExposed()
{
m_addrExposed = false;
}
bool IsAddressExposed() const
{
return m_addrExposed;
}
private:
regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a
// register pair). It is set during codegen any time the
// variable is enregistered (lvRegister is only set
// to non-zero if the variable gets the same register assignment for its entire
// lifetime).
#if !defined(TARGET_64BIT)
regNumberSmall _lvOtherReg; // Used for "upper half" of long var.
#endif // !defined(TARGET_64BIT)
regNumberSmall _lvArgReg; // The (first) register in which this argument is passed.
#if FEATURE_MULTIREG_ARGS
regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register.
// Note this is defined but not used by ARM32
#endif // FEATURE_MULTIREG_ARGS
regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
/////////////////////
regNumber GetRegNum() const
{
return (regNumber)_lvRegNum;
}
void SetRegNum(regNumber reg)
{
_lvRegNum = (regNumberSmall)reg;
assert(_lvRegNum == reg);
}
/////////////////////
#if defined(TARGET_64BIT)
regNumber GetOtherReg() const
{
assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
// "unreachable code" warnings
return REG_NA;
}
void SetOtherReg(regNumber reg)
{
assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
// "unreachable code" warnings
}
#else // !TARGET_64BIT
regNumber GetOtherReg() const
{
return (regNumber)_lvOtherReg;
}
void SetOtherReg(regNumber reg)
{
_lvOtherReg = (regNumberSmall)reg;
assert(_lvOtherReg == reg);
}
#endif // !TARGET_64BIT
/////////////////////
regNumber GetArgReg() const
{
return (regNumber)_lvArgReg;
}
void SetArgReg(regNumber reg)
{
_lvArgReg = (regNumberSmall)reg;
assert(_lvArgReg == reg);
}
#if FEATURE_MULTIREG_ARGS
regNumber GetOtherArgReg() const
{
return (regNumber)_lvOtherArgReg;
}
void SetOtherArgReg(regNumber reg)
{
_lvOtherArgReg = (regNumberSmall)reg;
assert(_lvOtherArgReg == reg);
}
#endif // FEATURE_MULTIREG_ARGS
#ifdef FEATURE_SIMD
// Is this is a SIMD struct?
bool lvIsSIMDType() const
{
return lvSIMDType;
}
// Is this is a SIMD struct which is used for SIMD intrinsic?
bool lvIsUsedInSIMDIntrinsic() const
{
return lvUsedInSIMDIntrinsic;
}
#else
// If feature_simd not enabled, return false
bool lvIsSIMDType() const
{
return false;
}
bool lvIsUsedInSIMDIntrinsic() const
{
return false;
}
#endif
/////////////////////
regNumber GetArgInitReg() const
{
return (regNumber)_lvArgInitReg;
}
void SetArgInitReg(regNumber reg)
{
_lvArgInitReg = (regNumberSmall)reg;
assert(_lvArgInitReg == reg);
}
/////////////////////
bool lvIsRegCandidate() const
{
return lvLRACandidate != 0;
}
bool lvIsInReg() const
{
return lvIsRegCandidate() && (GetRegNum() != REG_STK);
}
regMaskTP lvRegMask() const
{
regMaskTP regMask = RBM_NONE;
if (varTypeUsesFloatReg(TypeGet()))
{
if (GetRegNum() != REG_STK)
{
regMask = genRegMaskFloat(GetRegNum(), TypeGet());
}
}
else
{
if (GetRegNum() != REG_STK)
{
regMask = genRegMask(GetRegNum());
}
}
return regMask;
}
unsigned short lvVarIndex; // variable tracking index
private:
unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference
// parameters, this gets hijacked from fgResetImplicitByRefRefCount
// through fgMarkDemotedImplicitByRefArgs, to provide a static
// appearance count (computed during address-exposed analysis)
// that fgMakeOutgoingStructArgCopy consults during global morph
// to determine if eliding its copy is legal.
weight_t m_lvRefCntWtd; // weighted reference count
public:
unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const;
void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL);
void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL);
weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const;
void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL);
void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL);
private:
int lvStkOffs; // stack offset of home in bytes.
public:
int GetStackOffset() const
{
return lvStkOffs;
}
void SetStackOffset(int offset)
{
lvStkOffs = offset;
}
unsigned lvExactSize; // (exact) size of the type in bytes
// Is this a promoted struct?
// This method returns true only for structs (including SIMD structs), not for
// locals that are split on a 32-bit target.
// It is only necessary to use this:
// 1) if only structs are wanted, and
// 2) if Lowering has already been done.
// Otherwise lvPromoted is valid.
bool lvPromotedStruct()
{
#if !defined(TARGET_64BIT)
return (lvPromoted && !varTypeIsLong(lvType));
#else // defined(TARGET_64BIT)
return lvPromoted;
#endif // defined(TARGET_64BIT)
}
unsigned lvSize() const;
size_t lvArgStackSize() const;
unsigned lvSlotNum; // original slot # (if remapped)
typeInfo lvVerTypeInfo; // type info needed for verification
// class handle for the local or null if not known or not a class,
// for a struct handle use `GetStructHnd()`.
CORINFO_CLASS_HANDLE lvClassHnd;
// Get class handle for a struct local or implicitByRef struct local.
CORINFO_CLASS_HANDLE GetStructHnd() const
{
#ifdef FEATURE_SIMD
if (lvSIMDType && (m_layout == nullptr))
{
return NO_CLASS_HANDLE;
}
#endif
assert(m_layout != nullptr);
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF)));
#else
assert(varTypeIsStruct(TypeGet()));
#endif
CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle();
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields
private:
ClassLayout* m_layout; // layout info for structs
public:
BlockSet lvRefBlks; // Set of blocks that contain refs
Statement* lvDefStmt; // Pointer to the statement with the single definition
void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies
var_types TypeGet() const
{
return (var_types)lvType;
}
bool lvStackAligned() const
{
assert(lvIsStructField);
return ((lvFldOffset % TARGET_POINTER_SIZE) == 0);
}
bool lvNormalizeOnLoad() const
{
return varTypeIsSmall(TypeGet()) &&
// lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
(lvIsParam || m_addrExposed || lvIsStructField);
}
bool lvNormalizeOnStore() const
{
return varTypeIsSmall(TypeGet()) &&
// lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
!(lvIsParam || m_addrExposed || lvIsStructField);
}
void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true);
var_types GetHfaType() const
{
if (GlobalJitOptions::compFeatureHfa)
{
assert(lvIsHfa());
return HfaTypeFromElemKind(GetLvHfaElemKind());
}
else
{
return TYP_UNDEF;
}
}
void SetHfaType(var_types type)
{
if (GlobalJitOptions::compFeatureHfa)
{
CorInfoHFAElemType elemKind = HfaElemKindFromType(type);
SetLvHfaElemKind(elemKind);
// Ensure we've allocated enough bits.
assert(GetLvHfaElemKind() == elemKind);
}
}
// Returns true if this variable contains GC pointers (including being a GC pointer itself).
bool HasGCPtr() const
{
return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr());
}
// Returns the layout of a struct variable.
ClassLayout* GetLayout() const
{
assert(varTypeIsStruct(lvType));
return m_layout;
}
// Sets the layout of a struct variable.
void SetLayout(ClassLayout* layout)
{
assert(varTypeIsStruct(lvType));
assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout));
m_layout = layout;
}
SsaDefArray<LclSsaVarDsc> lvPerSsaData;
// Returns the address of the per-Ssa data for the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
LclSsaVarDsc* GetPerSsaData(unsigned ssaNum)
{
return lvPerSsaData.GetSsaDef(ssaNum);
}
// Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition
// of this variable.
unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef)
{
return lvPerSsaData.GetSsaNum(ssaDef);
}
var_types GetRegisterType(const GenTreeLclVarCommon* tree) const;
var_types GetRegisterType() const;
var_types GetActualRegisterType() const;
bool IsEnregisterableType() const
{
return GetRegisterType() != TYP_UNDEF;
}
bool IsEnregisterableLcl() const
{
if (lvDoNotEnregister)
{
return false;
}
return IsEnregisterableType();
}
//-----------------------------------------------------------------------------
// IsAlwaysAliveInMemory: Determines if this variable's value is always
// up-to-date on stack. This is possible if this is an EH-var or
// we decided to spill after single-def.
//
bool IsAlwaysAliveInMemory() const
{
return lvLiveInOutOfHndlr || lvSpillAtSingleDef;
}
bool CanBeReplacedWithItsField(Compiler* comp) const;
#ifdef DEBUG
public:
const char* lvReason;
void PrintVarReg() const
{
printf("%s", getRegName(GetRegNum()));
}
#endif // DEBUG
}; // class LclVarDsc
enum class SymbolicIntegerValue : int32_t
{
LongMin,
IntMin,
ShortMin,
ByteMin,
Zero,
One,
ByteMax,
UByteMax,
ShortMax,
UShortMax,
IntMax,
UIntMax,
LongMax,
};
inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) > static_cast<int32_t>(right);
}
inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) >= static_cast<int32_t>(right);
}
inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) < static_cast<int32_t>(right);
}
inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) <= static_cast<int32_t>(right);
}
// Represents an integral range useful for reasoning about integral casts.
// It uses a symbolic representation for lower and upper bounds so
// that it can efficiently handle integers of all sizes on all hosts.
//
// Note that the ranges represented by this class are **always** in the
// "signed" domain. This is so that if we know the range a node produces, it
// can be trivially used to determine if a cast above the node does or does not
// overflow, which requires that the interpretation of integers be the same both
// for the "input" and "output". We choose signed interpretation here because it
// produces nice continuous ranges and because IR uses sign-extension for constants.
//
// Some examples of how ranges are computed for casts:
// 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the
// same range - all casts that do not change the representation, i. e. have the same
// "actual" input and output type, have the same "input" and "output" range.
// 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX]
// (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32
// bit integers zero-extended to 64 bits).
// 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0
// when interpreting as signed => the "input" range is [0..INT_MAX], the same range
// being the produced one as the node does not change the width of the integer.
//
class IntegralRange
{
private:
SymbolicIntegerValue m_lowerBound;
SymbolicIntegerValue m_upperBound;
public:
IntegralRange() = default;
IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound)
: m_lowerBound(lowerBound), m_upperBound(upperBound)
{
assert(lowerBound <= upperBound);
}
bool Contains(int64_t value) const;
bool Contains(IntegralRange other) const
{
return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound);
}
bool IsPositive()
{
return m_lowerBound >= SymbolicIntegerValue::Zero;
}
bool Equals(IntegralRange other) const
{
return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound);
}
static int64_t SymbolicToRealValue(SymbolicIntegerValue value);
static SymbolicIntegerValue LowerBoundForType(var_types type);
static SymbolicIntegerValue UpperBoundForType(var_types type);
static IntegralRange ForType(var_types type)
{
return {LowerBoundForType(type), UpperBoundForType(type)};
}
static IntegralRange ForNode(GenTree* node, Compiler* compiler);
static IntegralRange ForCastInput(GenTreeCast* cast);
static IntegralRange ForCastOutput(GenTreeCast* cast);
#ifdef DEBUG
static void Print(IntegralRange range);
#endif // DEBUG
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX TempsInfo XX
XX XX
XX The temporary lclVars allocated by the compiler for code generation XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************
*
* The following keeps track of temporaries allocated in the stack frame
* during code-generation (after register allocation). These spill-temps are
* only used if we run out of registers while evaluating a tree.
*
* These are different from the more common temps allocated by lvaGrabTemp().
*/
class TempDsc
{
public:
TempDsc* tdNext;
private:
int tdOffs;
#ifdef DEBUG
static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG
#endif // DEBUG
int tdNum;
BYTE tdSize;
var_types tdType;
public:
TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType)
{
#ifdef DEBUG
// temps must have a negative number (so they have a different number from all local variables)
assert(tdNum < 0);
tdOffs = BAD_TEMP_OFFSET;
#endif // DEBUG
if (tdNum != _tdNum)
{
IMPL_LIMITATION("too many spill temps");
}
}
#ifdef DEBUG
bool tdLegalOffset() const
{
return tdOffs != BAD_TEMP_OFFSET;
}
#endif // DEBUG
int tdTempOffs() const
{
assert(tdLegalOffset());
return tdOffs;
}
void tdSetTempOffs(int offs)
{
tdOffs = offs;
assert(tdLegalOffset());
}
void tdAdjustTempOffs(int offs)
{
tdOffs += offs;
assert(tdLegalOffset());
}
int tdTempNum() const
{
assert(tdNum < 0);
return tdNum;
}
unsigned tdTempSize() const
{
return tdSize;
}
var_types tdTempType() const
{
return tdType;
}
};
// interface to hide linearscan implementation from rest of compiler
class LinearScanInterface
{
public:
virtual void doLinearScan() = 0;
virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0;
virtual bool willEnregisterLocalVars() const = 0;
#if TRACK_LSRA_STATS
virtual void dumpLsraStatsCsv(FILE* file) = 0;
virtual void dumpLsraStatsSummary(FILE* file) = 0;
#endif // TRACK_LSRA_STATS
};
LinearScanInterface* getLinearScanAllocator(Compiler* comp);
// Information about arrays: their element type and size, and the offset of the first element.
// We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes,
// associate an array info via the map retrieved by GetArrayInfoMap(). This information is used,
// for example, in value numbering of array index expressions.
struct ArrayInfo
{
var_types m_elemType;
CORINFO_CLASS_HANDLE m_elemStructType;
unsigned m_elemSize;
unsigned m_elemOffset;
ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0)
{
}
ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType)
: m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset)
{
}
};
// This enumeration names the phases into which we divide compilation. The phases should completely
// partition a compilation.
enum Phases
{
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm,
#include "compphases.h"
PHASE_NUMBER_OF
};
extern const char* PhaseNames[];
extern const char* PhaseEnums[];
extern const LPCWSTR PhaseShortNames[];
// Specify which checks should be run after each phase
//
enum class PhaseChecks
{
CHECK_NONE,
CHECK_ALL
};
// Specify compiler data that a phase might modify
enum class PhaseStatus : unsigned
{
MODIFIED_NOTHING,
MODIFIED_EVERYTHING
};
// The following enum provides a simple 1:1 mapping to CLR API's
enum API_ICorJitInfo_Names
{
#define DEF_CLR_API(name) API_##name,
#include "ICorJitInfo_API_names.h"
API_COUNT
};
//---------------------------------------------------------------
// Compilation time.
//
// A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods.
// We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles
// of the compilation, as well as the cycles for each phase. We also track the number of bytecodes.
// If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated
// by "m_timerFailure" being true.
// If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile.
struct CompTimeInfo
{
#ifdef FEATURE_JIT_METHOD_PERF
// The string names of the phases.
static const char* PhaseNames[];
static bool PhaseHasChildren[];
static int PhaseParent[];
static bool PhaseReportsIRSize[];
unsigned m_byteCodeBytes;
unsigned __int64 m_totalCycles;
unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF];
#if MEASURE_CLRAPI_CALLS
unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF];
#endif
unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF];
// For better documentation, we call EndPhase on
// non-leaf phases. We should also call EndPhase on the
// last leaf subphase; obviously, the elapsed cycles between the EndPhase
// for the last leaf subphase and the EndPhase for an ancestor should be very small.
// We add all such "redundant end phase" intervals to this variable below; we print
// it out in a report, so we can verify that it is, indeed, very small. If it ever
// isn't, this means that we're doing something significant between the end of the last
// declared subphase and the end of its parent.
unsigned __int64 m_parentPhaseEndSlop;
bool m_timerFailure;
#if MEASURE_CLRAPI_CALLS
// The following measures the time spent inside each individual CLR API call.
unsigned m_allClrAPIcalls;
unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT];
unsigned __int64 m_allClrAPIcycles;
unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT];
unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT];
#endif // MEASURE_CLRAPI_CALLS
CompTimeInfo(unsigned byteCodeBytes);
#endif
};
#ifdef FEATURE_JIT_METHOD_PERF
#if MEASURE_CLRAPI_CALLS
struct WrapICorJitInfo;
#endif
// This class summarizes the JIT time information over the course of a run: the number of methods compiled,
// and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above).
// The operation of adding a single method's timing to the summary may be performed concurrently by several
// threads, so it is protected by a lock.
// This class is intended to be used as a singleton type, with only a single instance.
class CompTimeSummaryInfo
{
// This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one).
static CritSecObject s_compTimeSummaryLock;
int m_numMethods;
int m_totMethods;
CompTimeInfo m_total;
CompTimeInfo m_maximum;
int m_numFilteredMethods;
CompTimeInfo m_filtered;
// This can use what ever data you want to determine if the value to be added
// belongs in the filtered section (it's always included in the unfiltered section)
bool IncludedInFilteredData(CompTimeInfo& info);
public:
// This is the unique CompTimeSummaryInfo object for this instance of the runtime.
static CompTimeSummaryInfo s_compTimeSummary;
CompTimeSummaryInfo()
: m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0)
{
}
// Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary.
// This is thread safe.
void AddInfo(CompTimeInfo& info, bool includePhases);
// Print the summary information to "f".
// This is not thread-safe; assumed to be called by only one thread.
void Print(FILE* f);
};
// A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation,
// and when the current phase started. This is intended to be part of a Compilation object.
//
class JitTimer
{
unsigned __int64 m_start; // Start of the compilation.
unsigned __int64 m_curPhaseStart; // Start of the current phase.
#if MEASURE_CLRAPI_CALLS
unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any).
unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far
unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far.
int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1).
static double s_cyclesPerSec; // Cached for speedier measurements
#endif
#ifdef DEBUG
Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start).
#endif
CompTimeInfo m_info; // The CompTimeInfo for this compilation.
static CritSecObject s_csvLock; // Lock to protect the time log file.
static FILE* s_csvFile; // The time log file handle.
void PrintCsvMethodStats(Compiler* comp);
private:
void* operator new(size_t);
void* operator new[](size_t);
void operator delete(void*);
void operator delete[](void*);
public:
// Initialized the timer instance
JitTimer(unsigned byteCodeSize);
static JitTimer* Create(Compiler* comp, unsigned byteCodeSize)
{
return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize);
}
static void PrintCsvHeader();
// Ends the current phase (argument is for a redundant check).
void EndPhase(Compiler* compiler, Phases phase);
#if MEASURE_CLRAPI_CALLS
// Start and end a timed CLR API call.
void CLRApiCallEnter(unsigned apix);
void CLRApiCallLeave(unsigned apix);
#endif // MEASURE_CLRAPI_CALLS
// Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode,
// and adds it to "sum".
void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases);
// Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets
// *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of
// "m_info" to true.
bool GetThreadCycles(unsigned __int64* cycles)
{
bool res = CycleTimer::GetThreadCyclesS(cycles);
if (!res)
{
m_info.m_timerFailure = true;
}
return res;
}
static void Shutdown();
};
#endif // FEATURE_JIT_METHOD_PERF
//------------------- Function/Funclet info -------------------------------
enum FuncKind : BYTE
{
FUNC_ROOT, // The main/root function (always id==0)
FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler)
FUNC_FILTER, // a funclet associated with an EH filter
FUNC_COUNT
};
class emitLocation;
struct FuncInfoDsc
{
FuncKind funKind;
BYTE funFlags; // Currently unused, just here for padding
unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this
// funclet. It is only valid if funKind field indicates this is a
// EH-related funclet: FUNC_HANDLER or FUNC_FILTER
#if defined(TARGET_AMD64)
// TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array.
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
UNWIND_INFO unwindHeader;
// Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd
// number of codes, the VM or Zapper will 4-byte align the whole thing.
BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))];
unsigned unwindCodeSlot;
#elif defined(TARGET_X86)
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
#elif defined(TARGET_ARMARCH)
UnwindInfo uwi; // Unwind information for this function/funclet's hot section
UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section
// Note: we only have a pointer here instead of the actual object,
// to save memory in the JIT case (compared to the NGEN case),
// where we don't have any cold section.
// Note 2: we currently don't support hot/cold splitting in functions
// with EH, so uwiCold will be NULL for all funclets.
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
#endif // TARGET_ARMARCH
#if defined(FEATURE_CFI_SUPPORT)
jitstd::vector<CFI_CODE>* cfiCodes;
#endif // FEATURE_CFI_SUPPORT
// Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else
// that isn't shared between the main function body and funclets.
};
struct fgArgTabEntry
{
GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg.
GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any.
// Get the node that coresponds to this argument entry.
// This is the "real" node and not a placeholder or setup node.
GenTree* GetNode() const
{
return lateUse == nullptr ? use->GetNode() : lateUse->GetNode();
}
unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL
private:
regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for
// arguments passed on the stack
public:
unsigned numRegs; // Count of number of registers that this argument uses.
// Note that on ARM, if we have a double hfa, this reflects the number
// of DOUBLE registers.
#if defined(UNIX_AMD64_ABI)
// Unix amd64 will split floating point types and integer types in structs
// between floating point and general purpose registers. Keep track of that
// information so we do not need to recompute it later.
unsigned structIntRegs;
unsigned structFloatRegs;
#endif // UNIX_AMD64_ABI
#if defined(DEBUG_ARG_SLOTS)
// These fields were used to calculate stack size in stack slots for arguments
// but now they are replaced by precise `m_byteOffset/m_byteSize` because of
// arm64 apple abi requirements.
// A slot is a pointer sized region in the OutArg area.
unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area
unsigned numSlots; // Count of number of slots that this argument uses
#endif // DEBUG_ARG_SLOTS
// Return number of stack slots that this argument is taking.
// TODO-Cleanup: this function does not align with arm64 apple model,
// delete it. In most cases we just want to know if we it is using stack or not
// but in some cases we are checking if it is a multireg arg, like:
// `numRegs + GetStackSlotsNumber() > 1` that is harder to replace.
//
unsigned GetStackSlotsNumber() const
{
return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
}
private:
unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg.
public:
unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg
var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a
// struct is passed as a scalar type, this is that type.
// Note that if a struct is passed by reference, this will still be the struct type.
bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar
bool needPlace : 1; // True when we must replace this argument with a placeholder node
bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct
bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs
bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of
// previous arguments.
NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced
// to be in certain registers or on the stack, regardless of where they
// appear in the arg list.
bool isStruct : 1; // True if this is a struct arg
bool _isVararg : 1; // True if the argument is in a vararg context.
bool passedByRef : 1; // True iff the argument is passed by reference.
#if FEATURE_ARG_SPLIT
bool _isSplit : 1; // True when this argument is split between the registers and OutArg area
#endif // FEATURE_ARG_SPLIT
#ifdef FEATURE_HFA_FIELDS_PRESENT
CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
#endif
CorInfoHFAElemType GetHfaElemKind() const
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
return _hfaElemKind;
#else
NOWAY_MSG("GetHfaElemKind");
return CORINFO_HFA_ELEM_NONE;
#endif
}
void SetHfaElemKind(CorInfoHFAElemType elemKind)
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
_hfaElemKind = elemKind;
#else
NOWAY_MSG("SetHfaElemKind");
#endif
}
bool isNonStandard() const
{
return nonStandardArgKind != NonStandardArgKind::None;
}
// Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo.
// In this case, it must be removed by GenTreeCall::ResetArgInfo.
bool isNonStandardArgAddedLate() const
{
switch (static_cast<NonStandardArgKind>(nonStandardArgKind))
{
case NonStandardArgKind::None:
case NonStandardArgKind::PInvokeFrame:
case NonStandardArgKind::ShiftLow:
case NonStandardArgKind::ShiftHigh:
case NonStandardArgKind::FixedRetBuffer:
case NonStandardArgKind::ValidateIndirectCallTarget:
return false;
case NonStandardArgKind::WrapperDelegateCell:
case NonStandardArgKind::VirtualStubCell:
case NonStandardArgKind::PInvokeCookie:
case NonStandardArgKind::PInvokeTarget:
case NonStandardArgKind::R2RIndirectionCell:
return true;
default:
unreached();
}
}
bool isLateArg() const
{
bool isLate = (_lateArgInx != UINT_MAX);
return isLate;
}
unsigned GetLateArgInx() const
{
assert(isLateArg());
return _lateArgInx;
}
void SetLateArgInx(unsigned inx)
{
_lateArgInx = inx;
}
regNumber GetRegNum() const
{
return (regNumber)regNums[0];
}
regNumber GetOtherRegNum() const
{
return (regNumber)regNums[1];
}
#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif
void setRegNum(unsigned int i, regNumber regNum)
{
assert(i < MAX_ARG_REG_COUNT);
regNums[i] = (regNumberSmall)regNum;
}
regNumber GetRegNum(unsigned int i)
{
assert(i < MAX_ARG_REG_COUNT);
return (regNumber)regNums[i];
}
bool IsSplit() const
{
#if FEATURE_ARG_SPLIT
return compFeatureArgSplit() && _isSplit;
#else // FEATURE_ARG_SPLIT
return false;
#endif
}
void SetSplit(bool value)
{
#if FEATURE_ARG_SPLIT
_isSplit = value;
#endif
}
bool IsVararg() const
{
return compFeatureVarArg() && _isVararg;
}
void SetIsVararg(bool value)
{
if (compFeatureVarArg())
{
_isVararg = value;
}
}
bool IsHfaArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetHfaElemKind());
}
else
{
return false;
}
}
bool IsHfaRegArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetHfaElemKind()) && isPassedInRegisters();
}
else
{
return false;
}
}
unsigned intRegCount() const
{
#if defined(UNIX_AMD64_ABI)
if (this->isStruct)
{
return this->structIntRegs;
}
#endif // defined(UNIX_AMD64_ABI)
if (!this->isPassedInFloatRegisters())
{
return this->numRegs;
}
return 0;
}
unsigned floatRegCount() const
{
#if defined(UNIX_AMD64_ABI)
if (this->isStruct)
{
return this->structFloatRegs;
}
#endif // defined(UNIX_AMD64_ABI)
if (this->isPassedInFloatRegisters())
{
return this->numRegs;
}
return 0;
}
// Get the number of bytes that this argument is occupying on the stack,
// including padding up to the target pointer size for platforms
// where a stack argument can't take less.
unsigned GetStackByteSize() const
{
if (!IsSplit() && numRegs > 0)
{
return 0;
}
assert(!IsHfaArg() || !IsSplit());
assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs);
const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs;
return stackByteSize;
}
var_types GetHfaType() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return HfaTypeFromElemKind(GetHfaElemKind());
}
else
{
return TYP_UNDEF;
}
}
void SetHfaType(var_types type, unsigned hfaSlots)
{
if (GlobalJitOptions::compFeatureHfa)
{
if (type != TYP_UNDEF)
{
// We must already have set the passing mode.
assert(numRegs != 0 || GetStackByteSize() != 0);
// We originally set numRegs according to the size of the struct, but if the size of the
// hfaType is not the same as the pointer size, we need to correct it.
// Note that hfaSlots is the number of registers we will use. For ARM, that is twice
// the number of "double registers".
unsigned numHfaRegs = hfaSlots;
#ifdef TARGET_ARM
if (type == TYP_DOUBLE)
{
// Must be an even number of registers.
assert((numRegs & 1) == 0);
numHfaRegs = hfaSlots / 2;
}
#endif // TARGET_ARM
if (!IsHfaArg())
{
// We haven't previously set this; do so now.
CorInfoHFAElemType elemKind = HfaElemKindFromType(type);
SetHfaElemKind(elemKind);
// Ensure we've allocated enough bits.
assert(GetHfaElemKind() == elemKind);
if (isPassedInRegisters())
{
numRegs = numHfaRegs;
}
}
else
{
// We've already set this; ensure that it's consistent.
if (isPassedInRegisters())
{
assert(numRegs == numHfaRegs);
}
assert(type == HfaTypeFromElemKind(GetHfaElemKind()));
}
}
}
}
#ifdef TARGET_ARM
void SetIsBackFilled(bool backFilled)
{
isBackFilled = backFilled;
}
bool IsBackFilled() const
{
return isBackFilled;
}
#else // !TARGET_ARM
void SetIsBackFilled(bool backFilled)
{
}
bool IsBackFilled() const
{
return false;
}
#endif // !TARGET_ARM
bool isPassedInRegisters() const
{
return !IsSplit() && (numRegs != 0);
}
bool isPassedInFloatRegisters() const
{
#ifdef TARGET_X86
return false;
#else
return isValidFloatArgReg(GetRegNum());
#endif
}
// Can we replace the struct type of this node with a primitive type for argument passing?
bool TryPassAsPrimitive() const
{
return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE));
}
#if defined(DEBUG_ARG_SLOTS)
// Returns the number of "slots" used, where for this purpose a
// register counts as a slot.
unsigned getSlotCount() const
{
if (isBackFilled)
{
assert(isPassedInRegisters());
assert(numRegs == 1);
}
else if (GetRegNum() == REG_STK)
{
assert(!isPassedInRegisters());
assert(numRegs == 0);
}
else
{
assert(numRegs > 0);
}
return numSlots + numRegs;
}
#endif
#if defined(DEBUG_ARG_SLOTS)
// Returns the size as a multiple of pointer-size.
// For targets without HFAs, this is the same as getSlotCount().
unsigned getSize() const
{
unsigned size = getSlotCount();
if (GlobalJitOptions::compFeatureHfa)
{
if (IsHfaRegArg())
{
#ifdef TARGET_ARM
// We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size.
if (GetHfaType() == TYP_DOUBLE)
{
assert(!IsSplit());
size <<= 1;
}
#elif defined(TARGET_ARM64)
// We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size,
// or if they are SIMD16 vector hfa regs we have to double the size.
if (GetHfaType() == TYP_FLOAT)
{
// Round up in case of odd HFA count.
size = (size + 1) >> 1;
}
#ifdef FEATURE_SIMD
else if (GetHfaType() == TYP_SIMD16)
{
size <<= 1;
}
#endif // FEATURE_SIMD
#endif // TARGET_ARM64
}
}
return size;
}
#endif // DEBUG_ARG_SLOTS
private:
unsigned m_byteOffset;
// byte size that this argument takes including the padding after.
// For example, 1-byte arg on x64 with 8-byte alignment
// will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`.
unsigned m_byteSize;
unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers).
public:
void SetByteOffset(unsigned byteOffset)
{
DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum);
m_byteOffset = byteOffset;
}
unsigned GetByteOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum);
return m_byteOffset;
}
void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa)
{
unsigned roundedByteSize;
if (compMacOsArm64Abi())
{
// Only struct types need extension or rounding to pointer size, but HFA<float> does not.
if (isStruct && !isFloatHfa)
{
roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE);
}
else
{
roundedByteSize = byteSize;
}
}
else
{
roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE);
}
#if !defined(TARGET_ARM)
// Arm32 could have a struct with 8 byte alignment
// which rounded size % 8 is not 0.
assert(m_byteAlignment != 0);
assert(roundedByteSize % m_byteAlignment == 0);
#endif // TARGET_ARM
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi() && !isStruct)
{
assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE);
}
#endif
m_byteSize = roundedByteSize;
}
unsigned GetByteSize() const
{
return m_byteSize;
}
void SetByteAlignment(unsigned byteAlignment)
{
m_byteAlignment = byteAlignment;
}
unsigned GetByteAlignment() const
{
return m_byteAlignment;
}
// Set the register numbers for a multireg argument.
// There's nothing to do on x64/Ux because the structDesc has already been used to set the
// register numbers.
void SetMultiRegNums()
{
#if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI)
if (numRegs == 1)
{
return;
}
regNumber argReg = GetRegNum(0);
#ifdef TARGET_ARM
unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1;
#else
unsigned int regSize = 1;
#endif
if (numRegs > MAX_ARG_REG_COUNT)
NO_WAY("Multireg argument exceeds the maximum length");
for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++)
{
argReg = (regNumber)(argReg + regSize);
setRegNum(regIndex, argReg);
}
#endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI)
}
#ifdef DEBUG
// Check that the value of 'isStruct' is consistent.
// A struct arg must be one of the following:
// - A node of struct type,
// - A GT_FIELD_LIST, or
// - A node of a scalar type, passed in a single register or slot
// (or two slots in the case of a struct pass on the stack as TYP_DOUBLE).
//
void checkIsStruct() const
{
GenTree* node = GetNode();
if (isStruct)
{
if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST))
{
// This is the case where we are passing a struct as a primitive type.
// On most targets, this is always a single register or slot.
// However, on ARM this could be two slots if it is TYP_DOUBLE.
bool isPassedAsPrimitiveType =
((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE)));
#ifdef TARGET_ARM
if (!isPassedAsPrimitiveType)
{
if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2))
{
isPassedAsPrimitiveType = true;
}
}
#endif // TARGET_ARM
assert(isPassedAsPrimitiveType);
}
}
else
{
assert(!varTypeIsStruct(node));
}
}
void Dump() const;
#endif
};
//-------------------------------------------------------------------------
//
// The class fgArgInfo is used to handle the arguments
// when morphing a GT_CALL node.
//
class fgArgInfo
{
Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory
GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo
unsigned argCount; // Updatable arg count value
#if defined(DEBUG_ARG_SLOTS)
unsigned nextSlotNum; // Updatable slot count value
#endif
unsigned nextStackByteOffset;
unsigned stkLevel; // Stack depth when we make this call (for x86)
#if defined(UNIX_X86_ABI)
bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment.
unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs().
unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call.
// Computed dynamically during codegen, based on stkSizeBytes and the current
// stack level (genStackLevel) when the first stack adjustment is made for
// this call.
#endif
#if FEATURE_FIXED_OUT_ARGS
unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL
#endif
unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs)
bool hasRegArgs; // true if we have one or more register arguments
bool hasStackArgs; // true if we have one or more stack arguments
bool argsComplete; // marker for state
bool argsSorted; // marker for state
bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps
fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize])
private:
void AddArg(fgArgTabEntry* curArgTabEntry);
public:
fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount);
fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall);
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg = false);
#ifdef UNIX_AMD64_ABI
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
const bool isStruct,
const bool isFloatHfa,
const bool isVararg,
const regNumber otherRegNum,
const unsigned structIntRegs,
const unsigned structFloatRegs,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr);
#endif // UNIX_AMD64_ABI
fgArgTabEntry* AddStkArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
unsigned numSlots,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg = false);
void RemorphReset();
void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing);
void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing);
void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots);
void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode);
void ArgsComplete();
void SortArgs();
void EvalArgsToTemps();
unsigned ArgCount() const
{
return argCount;
}
fgArgTabEntry** ArgTable() const
{
return argTable;
}
#if defined(DEBUG_ARG_SLOTS)
unsigned GetNextSlotNum() const
{
return nextSlotNum;
}
#endif
unsigned GetNextSlotByteOffset() const
{
return nextStackByteOffset;
}
bool HasRegArgs() const
{
return hasRegArgs;
}
bool NeedsTemps() const
{
return needsTemps;
}
bool HasStackArgs() const
{
return hasStackArgs;
}
bool AreArgsComplete() const
{
return argsComplete;
}
#if FEATURE_FIXED_OUT_ARGS
unsigned GetOutArgSize() const
{
return outArgSize;
}
void SetOutArgSize(unsigned newVal)
{
outArgSize = newVal;
}
#endif // FEATURE_FIXED_OUT_ARGS
#if defined(UNIX_X86_ABI)
void ComputeStackAlignment(unsigned curStackLevelInBytes)
{
padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN);
}
unsigned GetStkAlign() const
{
return padStkAlign;
}
void SetStkSizeBytes(unsigned newStkSizeBytes)
{
stkSizeBytes = newStkSizeBytes;
}
unsigned GetStkSizeBytes() const
{
return stkSizeBytes;
}
bool IsStkAlignmentDone() const
{
return alignmentDone;
}
void SetStkAlignmentDone()
{
alignmentDone = true;
}
#endif // defined(UNIX_X86_ABI)
// Get the fgArgTabEntry for the arg at position argNum.
fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const
{
fgArgTabEntry* curArgTabEntry = nullptr;
if (!reMorphing)
{
// The arg table has not yet been sorted.
curArgTabEntry = argTable[argNum];
assert(curArgTabEntry->argNum == argNum);
return curArgTabEntry;
}
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->argNum == argNum)
{
return curArgTabEntry;
}
}
noway_assert(!"GetArgEntry: argNum not found");
return nullptr;
}
void SetNeedsTemps()
{
needsTemps = true;
}
// Get the node for the arg at position argIndex.
// Caller must ensure that this index is a valid arg index.
GenTree* GetArgNode(unsigned argIndex) const
{
return GetArgEntry(argIndex)->GetNode();
}
void Dump(Compiler* compiler) const;
};
#ifdef DEBUG
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// We have the ability to mark source expressions with "Test Labels."
// These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions
// that should be CSE defs, and other expressions that should uses of those defs, with a shared label.
enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel.
{
TL_SsaName,
TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown).
TL_VNNorm, // Like above, but uses the non-exceptional value of the expression.
TL_CSE_Def, // This must be identified in the JIT as a CSE def
TL_CSE_Use, // This must be identified in the JIT as a CSE use
TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop.
};
struct TestLabelAndNum
{
TestLabel m_tl;
ssize_t m_num;
TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0)
{
}
};
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap;
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif // DEBUG
//-------------------------------------------------------------------------
// LoopFlags: flags for the loop table.
//
enum LoopFlags : unsigned short
{
LPFLG_EMPTY = 0,
// LPFLG_UNUSED = 0x0001,
// LPFLG_UNUSED = 0x0002,
LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++)
// LPFLG_UNUSED = 0x0008,
LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call
LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit)
LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit)
LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit)
LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit)
LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit)
LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit)
LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop
LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away)
LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop
LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed
LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet
// type are assigned to.
};
inline constexpr LoopFlags operator~(LoopFlags a)
{
return (LoopFlags)(~(unsigned short)a);
}
inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b)
{
return (LoopFlags)((unsigned short)a | (unsigned short)b);
}
inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b)
{
return (LoopFlags)((unsigned short)a & (unsigned short)b);
}
inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b)
{
return a = (LoopFlags)((unsigned short)a | (unsigned short)b);
}
inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b)
{
return a = (LoopFlags)((unsigned short)a & (unsigned short)b);
}
// The following holds information about instr offsets in terms of generated code.
enum class IPmappingDscKind
{
Prolog, // The mapping represents the start of a prolog.
Epilog, // The mapping represents the start of an epilog.
NoMapping, // This does not map to any IL offset.
Normal, // The mapping maps to an IL offset.
};
struct IPmappingDsc
{
emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset
IPmappingDscKind ipmdKind; // The kind of mapping
ILLocation ipmdLoc; // The location for normal mappings
bool ipmdIsLabel; // Can this code be a branch label?
};
struct PreciseIPMapping
{
emitLocation nativeLoc;
DebugInfo debugInfo;
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX The big guy. The sections are currently organized as : XX
XX XX
XX o GenTree and BasicBlock XX
XX o LclVarsInfo XX
XX o Importer XX
XX o FlowGraph XX
XX o Optimizer XX
XX o RegAlloc XX
XX o EEInterface XX
XX o TempsInfo XX
XX o RegSet XX
XX o GCInfo XX
XX o Instruction XX
XX o ScopeInfo XX
XX o PrologScopeInfo XX
XX o CodeGenerator XX
XX o UnwindInfo XX
XX o Compiler XX
XX o typeInfo XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
struct HWIntrinsicInfo;
class Compiler
{
friend class emitter;
friend class UnwindInfo;
friend class UnwindFragmentInfo;
friend class UnwindEpilogInfo;
friend class JitTimer;
friend class LinearScan;
friend class fgArgInfo;
friend class Rationalizer;
friend class Phase;
friend class Lowering;
friend class CSE_DataFlow;
friend class CSE_Heuristic;
friend class CodeGenInterface;
friend class CodeGen;
friend class LclVarDsc;
friend class TempDsc;
friend class LIR;
friend class ObjectAllocator;
friend class LocalAddressVisitor;
friend struct GenTree;
friend class MorphInitBlockHelper;
friend class MorphCopyBlockHelper;
#ifdef FEATURE_HW_INTRINSICS
friend struct HWIntrinsicInfo;
#endif // FEATURE_HW_INTRINSICS
#ifndef TARGET_64BIT
friend class DecomposeLongs;
#endif // !TARGET_64BIT
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Misc structs definitions XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package.
#ifdef DEBUG
bool verbose;
bool verboseTrees;
bool shouldUseVerboseTrees();
bool asciiTrees; // If true, dump trees using only ASCII characters
bool shouldDumpASCIITrees();
bool verboseSsa; // If true, produce especially verbose dump output in SSA construction.
bool shouldUseVerboseSsa();
bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id:
int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely.
bool doExtraSuperPmiQueries;
void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep.
const char* VarNameToStr(VarName name)
{
return name;
}
DWORD expensiveDebugCheckLevel;
#endif
#if FEATURE_MULTIREG_RET
GenTree* impAssignMultiRegTypeToVar(GenTree* op,
CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv));
#endif // FEATURE_MULTIREG_RET
#ifdef TARGET_X86
bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const;
#endif // TARGET_X86
//-------------------------------------------------------------------------
// Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64.
// HFAs are one to four element structs where each element is the same
// type, either all float or all double. We handle HVAs (one to four elements of
// vector types) uniformly with HFAs. HFAs are treated specially
// in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in
// floating-point registers instead of the general purpose registers.
//
bool IsHfa(CORINFO_CLASS_HANDLE hClass);
bool IsHfa(GenTree* tree);
var_types GetHfaType(GenTree* tree);
unsigned GetHfaCount(GenTree* tree);
var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass);
bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv);
//-------------------------------------------------------------------------
// The following is used for validating format of EH table
//
struct EHNodeDsc;
typedef struct EHNodeDsc* pEHNodeDsc;
EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes.
EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes.
struct EHNodeDsc
{
enum EHBlockType
{
TryNode,
FilterNode,
HandlerNode,
FinallyNode,
FaultNode
};
EHBlockType ehnBlockType; // kind of EH block
IL_OFFSET ehnStartOffset; // IL offset of start of the EH block
IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to
// the last IL offset, not "one past the last one", i.e., the range Start to End is
// inclusive).
pEHNodeDsc ehnNext; // next (non-nested) block in sequential order
pEHNodeDsc ehnChild; // leftmost nested block
union {
pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node
pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node
};
pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0
pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same,
void ehnSetTryNodeType()
{
ehnBlockType = TryNode;
}
void ehnSetFilterNodeType()
{
ehnBlockType = FilterNode;
}
void ehnSetHandlerNodeType()
{
ehnBlockType = HandlerNode;
}
void ehnSetFinallyNodeType()
{
ehnBlockType = FinallyNode;
}
void ehnSetFaultNodeType()
{
ehnBlockType = FaultNode;
}
bool ehnIsTryBlock()
{
return ehnBlockType == TryNode;
}
bool ehnIsFilterBlock()
{
return ehnBlockType == FilterNode;
}
bool ehnIsHandlerBlock()
{
return ehnBlockType == HandlerNode;
}
bool ehnIsFinallyBlock()
{
return ehnBlockType == FinallyNode;
}
bool ehnIsFaultBlock()
{
return ehnBlockType == FaultNode;
}
// returns true if there is any overlap between the two nodes
static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2)
{
if (node1->ehnStartOffset < node2->ehnStartOffset)
{
return (node1->ehnEndOffset >= node2->ehnStartOffset);
}
else
{
return (node1->ehnStartOffset <= node2->ehnEndOffset);
}
}
// fails with BADCODE if inner is not completely nested inside outer
static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer)
{
return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset));
}
};
//-------------------------------------------------------------------------
// Exception handling functions
//
#if !defined(FEATURE_EH_FUNCLETS)
bool ehNeedsShadowSPslots()
{
return (info.compXcptnsCount || opts.compDbgEnC);
}
// 0 for methods with no EH
// 1 for methods with non-nested EH, or where only the try blocks are nested
// 2 for a method with a catch within a catch
// etc.
unsigned ehMaxHndNestingCount;
#endif // !FEATURE_EH_FUNCLETS
static bool jitIsBetween(unsigned value, unsigned start, unsigned end);
static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end);
bool bbInCatchHandlerILRange(BasicBlock* blk);
bool bbInFilterILRange(BasicBlock* blk);
bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk);
unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo);
unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex);
unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex);
// Returns true if "block" is the start of a try region.
bool bbIsTryBeg(BasicBlock* block);
// Returns true if "block" is the start of a handler or filter region.
bool bbIsHandlerBeg(BasicBlock* block);
// Returns true iff "block" is where control flows if an exception is raised in the
// try region, and sets "*regionIndex" to the index of the try for the handler.
// Differs from "IsHandlerBeg" in the case of filters, where this is true for the first
// block of the filter, but not for the filter's handler.
bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex);
bool ehHasCallableHandlers();
// Return the EH descriptor for the given region index.
EHblkDsc* ehGetDsc(unsigned regionIndex);
// Return the EH index given a region descriptor.
unsigned ehGetIndex(EHblkDsc* ehDsc);
// Return the EH descriptor index of the enclosing try, for the given region index.
unsigned ehGetEnclosingTryIndex(unsigned regionIndex);
// Return the EH descriptor index of the enclosing handler, for the given region index.
unsigned ehGetEnclosingHndIndex(unsigned regionIndex);
// Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this
// block is not in a 'try' region).
EHblkDsc* ehGetBlockTryDsc(BasicBlock* block);
// Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr
// if this block is not in a filter or handler region).
EHblkDsc* ehGetBlockHndDsc(BasicBlock* block);
// Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or
// nullptr if this block's exceptions propagate to caller).
EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block);
EHblkDsc* ehIsBlockTryLast(BasicBlock* block);
EHblkDsc* ehIsBlockHndLast(BasicBlock* block);
bool ehIsBlockEHLast(BasicBlock* block);
bool ehBlockHasExnFlowDsc(BasicBlock* block);
// Return the region index of the most nested EH region this block is in.
unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion);
// Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check.
unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex);
// Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX
// if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion'
// is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler.
// (It can never be a filter.)
unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion);
// A block has been deleted. Update the EH table appropriately.
void ehUpdateForDeletedBlock(BasicBlock* block);
// Determine whether a block can be deleted while preserving the EH normalization rules.
bool ehCanDeleteEmptyBlock(BasicBlock* block);
// Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region.
void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast);
// For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler,
// or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index
// is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the
// BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function
// body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the
// BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never
// lives in a filter.)
unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion);
// Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's
// handler. Set begBlk to the first block, and endBlk to the block after the last block of the range
// (nullptr if the last block is the last block in the program).
// Precondition: 'finallyIndex' is the EH region of a try/finally clause.
void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk);
#ifdef DEBUG
// Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return
// 'true' if the BBJ_CALLFINALLY is in the correct EH region.
bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex);
#endif // DEBUG
#if defined(FEATURE_EH_FUNCLETS)
// Do we need a PSPSym in the main function? For codegen purposes, we only need one
// if there is a filter that protects a region with a nested EH clause (such as a
// try/catch nested in the 'try' body of a try/filter/filter-handler). See
// genFuncletProlog() for more details. However, the VM seems to use it for more
// purposes, maybe including debugging. Until we are sure otherwise, always create
// a PSPSym for functions with any EH.
bool ehNeedsPSPSym() const
{
#ifdef TARGET_X86
return false;
#else // TARGET_X86
return compHndBBtabCount > 0;
#endif // TARGET_X86
}
bool ehAnyFunclets(); // Are there any funclets in this function?
unsigned ehFuncletCount(); // Return the count of funclets in the function
unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks
#else // !FEATURE_EH_FUNCLETS
bool ehAnyFunclets()
{
return false;
}
unsigned ehFuncletCount()
{
return 0;
}
unsigned bbThrowIndex(BasicBlock* blk)
{
return blk->bbTryIndex;
} // Get the index to use as the cache key for sharing throw blocks
#endif // !FEATURE_EH_FUNCLETS
// Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of
// "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first
// first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor,
// for example, we want to consider that the immediate dominator of the catch clause start block, so it's
// convenient to also consider it a predecessor.)
flowList* BlockPredsWithEH(BasicBlock* blk);
// This table is useful for memoization of the method above.
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap;
BlockToFlowListMap* m_blockToEHPreds;
BlockToFlowListMap* GetBlockToEHPreds()
{
if (m_blockToEHPreds == nullptr)
{
m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator());
}
return m_blockToEHPreds;
}
void* ehEmitCookie(BasicBlock* block);
UNATIVE_OFFSET ehCodeOffset(BasicBlock* block);
EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter);
EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd);
EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter);
EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast);
void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg);
void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast);
void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast);
void fgSkipRmvdBlocks(EHblkDsc* handlerTab);
void fgAllocEHTable();
void fgRemoveEHTableEntry(unsigned XTnum);
#if defined(FEATURE_EH_FUNCLETS)
EHblkDsc* fgAddEHTableEntry(unsigned XTnum);
#endif // FEATURE_EH_FUNCLETS
#if !FEATURE_EH
void fgRemoveEH();
#endif // !FEATURE_EH
void fgSortEHTable();
// Causes the EH table to obey some well-formedness conditions, by inserting
// empty BB's when necessary:
// * No block is both the first block of a handler and the first block of a try.
// * No block is the first block of multiple 'try' regions.
// * No block is the last block of multiple EH regions.
void fgNormalizeEH();
bool fgNormalizeEHCase1();
bool fgNormalizeEHCase2();
bool fgNormalizeEHCase3();
void fgCheckForLoopsInHandlers();
#ifdef DEBUG
void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
void fgVerifyHandlerTab();
void fgDispHandlerTab();
#endif // DEBUG
bool fgNeedToSortEHTable;
void verInitEHTree(unsigned numEHClauses);
void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab);
void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node);
void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node);
void verCheckNestingLevel(EHNodeDsc* initRoot);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree and BasicBlock XX
XX XX
XX Functions to allocate and display the GenTrees and BasicBlocks XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// Functions to create nodes
Statement* gtNewStmt(GenTree* expr = nullptr);
Statement* gtNewStmt(GenTree* expr, const DebugInfo& di);
// For unary opers.
GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE);
// For binary opers.
GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2);
GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon);
GenTree* gtNewLargeOperNode(genTreeOps oper,
var_types type = TYP_I_IMPL,
GenTree* op1 = nullptr,
GenTree* op2 = nullptr);
GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT);
GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq);
GenTree* gtNewPhysRegNode(regNumber reg, var_types type);
GenTree* gtNewJmpTableNode();
GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant);
GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr);
GenTreeFlags gtTokenToIconFlags(unsigned token);
GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle);
GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd);
GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd);
GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd);
GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd);
GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue);
GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node);
GenTree* gtNewLconNode(__int64 value);
GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE);
GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle);
GenTree* gtNewZeroConNode(var_types type);
GenTree* gtNewOneConNode(var_types type);
GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src);
#ifdef FEATURE_SIMD
GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize);
#endif
GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock);
GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg);
GenTree* gtNewBitCastNode(var_types type, GenTree* arg);
protected:
void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile);
public:
GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr);
void gtSetObjGcInfo(GenTreeObj* objNode);
GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr);
GenTree* gtNewBlockVal(GenTree* addr, unsigned size);
GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile);
GenTreeCall::Use* gtNewCallArgs(GenTree* node);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4);
GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args);
GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after);
GenTreeCall* gtNewCallNode(gtCallTypes callType,
CORINFO_METHOD_HANDLE handle,
var_types type,
GenTreeCall::Use* args,
const DebugInfo& di = DebugInfo());
GenTreeCall* gtNewIndCallNode(GenTree* addr,
var_types type,
GenTreeCall::Use* args,
const DebugInfo& di = DebugInfo());
GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr);
GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup,
GenTree* ctxTree,
void* compileTimeHandle);
GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET));
GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET));
GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL);
GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum,
unsigned lclOffs,
FieldSeqNode* fieldSeq,
var_types type = TYP_I_IMPL);
#ifdef FEATURE_SIMD
GenTreeSIMD* gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize);
GenTreeSIMD* gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize);
void SetOpLclRelatedToSIMDIntrinsic(GenTree* op);
#endif
#ifdef FEATURE_HW_INTRINSICS
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(
var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
GenTree* gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID);
CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType);
CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType);
#endif // FEATURE_HW_INTRINSICS
GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd);
GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset);
GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags);
GenTree* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0);
GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp);
GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block);
GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr);
GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock);
var_types gtTypeForNullCheck(GenTree* tree);
void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block);
static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum);
static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node);
fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx);
static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx);
GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src);
GenTree* gtNewTempAssign(unsigned tmp,
GenTree* val,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg);
GenTree* gtNewNothingNode();
GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd);
GenTree* gtUnusedValNode(GenTree* expr);
GenTree* gtNewKeepAliveNode(GenTree* op);
GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType);
GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType);
GenTreeAllocObj* gtNewAllocObjNode(
unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1);
GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent);
GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree);
GenTreeIndir* gtNewMethodTableLookup(GenTree* obj);
//------------------------------------------------------------------------
// Other GenTree functions
GenTree* gtClone(GenTree* tree, bool complexOK = false);
// If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise,
// create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with
// IntCnses with value `deepVarVal`.
GenTree* gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal);
// Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local
// `varNum` to int constants with value `varVal`.
GenTree* gtCloneExpr(GenTree* tree,
GenTreeFlags addFlags = GTF_EMPTY,
unsigned varNum = BAD_VAR_NUM,
int varVal = 0)
{
return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal);
}
Statement* gtCloneStmt(Statement* stmt)
{
GenTree* exprClone = gtCloneExpr(stmt->GetRootNode());
return gtNewStmt(exprClone, stmt->GetDebugInfo());
}
// Internal helper for cloning a call
GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call,
GenTreeFlags addFlags = GTF_EMPTY,
unsigned deepVarNum = BAD_VAR_NUM,
int deepVarVal = 0);
// Create copy of an inline or guarded devirtualization candidate tree.
GenTreeCall* gtCloneCandidateCall(GenTreeCall* call);
void gtUpdateSideEffects(Statement* stmt, GenTree* tree);
void gtUpdateTreeAncestorsSideEffects(GenTree* tree);
void gtUpdateStmtSideEffects(Statement* stmt);
void gtUpdateNodeSideEffects(GenTree* tree);
void gtUpdateNodeOperSideEffects(GenTree* tree);
void gtUpdateNodeOperSideEffectsPost(GenTree* tree);
// Returns "true" iff the complexity (not formally defined, but first interpretation
// is #of nodes in subtree) of "tree" is greater than "limit".
// (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used
// before they have been set.)
bool gtComplexityExceeds(GenTree** tree, unsigned limit);
GenTree* gtReverseCond(GenTree* tree);
static bool gtHasRef(GenTree* tree, ssize_t lclNum);
bool gtHasLocalsWithAddrOp(GenTree* tree);
unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz);
unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp);
void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly);
#ifdef DEBUG
unsigned gtHashValue(GenTree* tree);
GenTree* gtWalkOpEffectiveVal(GenTree* op);
#endif
void gtPrepareCost(GenTree* tree);
bool gtIsLikelyRegVar(GenTree* tree);
// Returns true iff the secondNode can be swapped with firstNode.
bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode);
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type);
unsigned gtSetEvalOrder(GenTree* tree);
void gtSetStmtInfo(Statement* stmt);
// Returns "true" iff "node" has any of the side effects in "flags".
bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags);
// Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags".
bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags);
// Appends 'expr' in front of 'list'
// 'list' will typically start off as 'nullptr'
// when 'list' is non-null a GT_COMMA node is used to insert 'expr'
GenTree* gtBuildCommaList(GenTree* list, GenTree* expr);
void gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT,
bool ignoreRoot = false);
GenTree* gtGetThisArg(GenTreeCall* call);
// Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the
// static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but
// complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing
// the given "fldHnd", is such an object pointer.
bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd);
// Return true if call is a recursive call; return false otherwise.
// Note when inlining, this looks for calls back to the root method.
bool gtIsRecursiveCall(GenTreeCall* call)
{
return gtIsRecursiveCall(call->gtCallMethHnd);
}
bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle)
{
return (callMethodHandle == impInlineRoot()->info.compMethodHnd);
}
//-------------------------------------------------------------------------
GenTree* gtFoldExpr(GenTree* tree);
GenTree* gtFoldExprConst(GenTree* tree);
GenTree* gtFoldExprSpecial(GenTree* tree);
GenTree* gtFoldBoxNullable(GenTree* tree);
GenTree* gtFoldExprCompare(GenTree* tree);
GenTree* gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult);
GenTree* gtFoldExprCall(GenTreeCall* call);
GenTree* gtFoldTypeCompare(GenTree* tree);
GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2);
// Options to control behavior of gtTryRemoveBoxUpstreamEffects
enum BoxRemovalOptions
{
BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree
BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree
BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree
BR_DONT_REMOVE, // check if removal is possible, return copy source tree
BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree
BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address
};
GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW);
GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp);
//-------------------------------------------------------------------------
// Get the handle, if any.
CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree);
// Get the handle, and assert if not found.
CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree);
// Get the handle for a ref type.
CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull);
// Get the class handle for an helper call
CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull);
// Get the element handle for an array of ref type.
CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array);
// Get a class handle from a helper call argument
CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array);
// Get the class handle for a field
CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull);
// Check if this tree is a gc static base helper call
bool gtIsStaticGCBaseHelperCall(GenTree* tree);
//-------------------------------------------------------------------------
// Functions to display the trees
#ifdef DEBUG
void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR);
void gtDispConst(GenTree* tree);
void gtDispLeaf(GenTree* tree, IndentStack* indentStack);
void gtDispNodeName(GenTree* tree);
#if FEATURE_MULTIREG_RET
unsigned gtDispRegCount(GenTree* tree);
#endif
void gtDispRegVal(GenTree* tree);
void gtDispZeroFieldSeq(GenTree* tree);
void gtDispVN(GenTree* tree);
void gtDispCommonEndLine(GenTree* tree);
enum IndentInfo
{
IINone,
IIArc,
IIArcTop,
IIArcBottom,
IIEmbedded,
IIError,
IndentInfoCount
};
void gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg = nullptr,
bool topOnly = false);
void gtDispTree(GenTree* tree,
IndentStack* indentStack = nullptr,
_In_opt_ const char* msg = nullptr,
bool topOnly = false,
bool isLIR = false);
void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut);
int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining);
char* gtGetLclVarName(unsigned lclNum);
void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true);
void gtDispLclVarStructType(unsigned lclNum);
void gtDispClassLayout(ClassLayout* layout, var_types type);
void gtDispILLocation(const ILLocation& loc);
void gtDispStmt(Statement* stmt, const char* msg = nullptr);
void gtDispBlockStmts(BasicBlock* block);
void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength);
void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength);
void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack);
void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq);
void gtDispFieldSeq(FieldSeqNode* pfsn);
void gtDispRange(LIR::ReadOnlyRange const& range);
void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree);
void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr);
#endif
// For tree walks
enum fgWalkResult
{
WALK_CONTINUE,
WALK_SKIP_SUBTREES,
WALK_ABORT
};
struct fgWalkData;
typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data);
typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data);
static fgWalkPreFn gtMarkColonCond;
static fgWalkPreFn gtClearColonCond;
struct FindLinkData
{
GenTree* nodeToFind;
GenTree** result;
GenTree* parent;
};
FindLinkData gtFindLink(Statement* stmt, GenTree* node);
bool gtHasCatchArg(GenTree* tree);
typedef ArrayStack<GenTree*> GenTreeStack;
static bool gtHasCallOnStack(GenTreeStack* parentStack);
//=========================================================================
// BasicBlock functions
#ifdef DEBUG
// This is a debug flag we will use to assert when creating block during codegen
// as this interferes with procedure splitting. If you know what you're doing, set
// it to true before creating the block. (DEBUG only)
bool fgSafeBasicBlockCreation;
#endif
BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind);
void placeLoopAlignInstructions();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX LclVarsInfo XX
XX XX
XX The variables to be used by the code generator. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//
// For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will
// be placed in the stack frame and it's fields must be laid out sequentially.
//
// For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by
// a local variable that can be enregistered or placed in the stack frame.
// The fields do not need to be laid out sequentially
//
enum lvaPromotionType
{
PROMOTION_TYPE_NONE, // The struct local is not promoted
PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted,
// and its field locals are independent of its parent struct local.
PROMOTION_TYPE_DEPENDENT // The struct local is promoted,
// but its field locals depend on its parent struct local.
};
/*****************************************************************************/
enum FrameLayoutState
{
NO_FRAME_LAYOUT,
INITIAL_FRAME_LAYOUT,
PRE_REGALLOC_FRAME_LAYOUT,
REGALLOC_FRAME_LAYOUT,
TENTATIVE_FRAME_LAYOUT,
FINAL_FRAME_LAYOUT
};
public:
RefCountState lvaRefCountState; // Current local ref count state
bool lvaLocalVarRefCounted() const
{
return lvaRefCountState == RCS_NORMAL;
}
bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable
unsigned lvaCount; // total number of locals, which includes function arguments,
// special arguments, IL local variables, and JIT temporary variables
LclVarDsc* lvaTable; // variable descriptor table
unsigned lvaTableCnt; // lvaTable size (>= lvaCount)
unsigned lvaTrackedCount; // actual # of locals being tracked
unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked
#ifdef DEBUG
VARSET_TP lvaTrackedVars; // set of tracked variables
#endif
#ifndef TARGET_64BIT
VARSET_TP lvaLongVars; // set of long (64-bit) variables
#endif
VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables
unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices.
// It that changes, this changes. VarSets from different epochs
// cannot be meaningfully combined.
unsigned GetCurLVEpoch()
{
return lvaCurEpoch;
}
// reverse map of tracked number to var number
unsigned lvaTrackedToVarNumSize;
unsigned* lvaTrackedToVarNum;
#if DOUBLE_ALIGN
#ifdef DEBUG
// # of procs compiled a with double-aligned stack
static unsigned s_lvaDoubleAlignedProcsCount;
#endif
#endif
// Getters and setters for address-exposed and do-not-enregister local var properties.
bool lvaVarAddrExposed(unsigned varNum) const;
void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason));
void lvaSetVarLiveInOutOfHandler(unsigned varNum);
bool lvaVarDoNotEnregister(unsigned varNum);
void lvSetMinOptsDoNotEnreg();
bool lvaEnregEHVars;
bool lvaEnregMultiRegVars;
void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason));
unsigned lvaVarargsHandleArg;
#ifdef TARGET_X86
unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack
// arguments
#endif // TARGET_X86
unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame
unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame
#if FEATURE_FIXED_OUT_ARGS
unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining.
#endif
unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods
// that tracks whether the lock has been taken
unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg.
// However, if there is a "ldarga 0" or "starg 0" in the IL,
// we will redirect all "ldarg(a) 0" and "starg 0" to this temp.
unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression
// in case there are multiple BBJ_RETURN blocks in the inlinee
// or if the inlinee has GC ref locals.
#if FEATURE_FIXED_OUT_ARGS
unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space
PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space
#endif // FEATURE_FIXED_OUT_ARGS
static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding)
{
return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE);
}
// Variable representing the return address. The helper-based tailcall
// mechanism passes the address of the return address to a runtime helper
// where it is used to detect tail-call chains.
unsigned lvaRetAddrVar;
#if defined(DEBUG) && defined(TARGET_XARCH)
unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return.
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#if defined(DEBUG) && defined(TARGET_X86)
unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call.
#endif // defined(DEBUG) && defined(TARGET_X86)
bool lvaGenericsContextInUse;
bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or
// CORINFO_GENERICS_CTXT_FROM_THIS?
bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG?
//-------------------------------------------------------------------------
// All these frame offsets are inter-related and must be kept in sync
#if !defined(FEATURE_EH_FUNCLETS)
// This is used for the callable handlers
unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots
#endif // FEATURE_EH_FUNCLETS
int lvaCachedGenericContextArgOffs;
int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as
// THIS pointer
#ifdef JIT32_GCENCODER
unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc
#endif // JIT32_GCENCODER
unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper
// TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps.
// after the reg predict we will use a computed maxTmpSize
// which is based upon the number of spill temps predicted by reg predict
// All this is necessary because if we under-estimate the size of the spill
// temps we could fail when encoding instructions that reference stack offsets for ARM.
//
// Pre codegen max spill temp size.
static const unsigned MAX_SPILL_TEMP_SIZE = 24;
//-------------------------------------------------------------------------
unsigned lvaGetMaxSpillTempSize();
#ifdef TARGET_ARM
bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask);
#endif // TARGET_ARM
void lvaAssignFrameOffsets(FrameLayoutState curState);
void lvaFixVirtualFrameOffsets();
void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc);
void lvaUpdateArgsWithInitialReg();
void lvaAssignVirtualFrameOffsetsToArgs();
#ifdef UNIX_AMD64_ABI
int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset);
#else // !UNIX_AMD64_ABI
int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs);
#endif // !UNIX_AMD64_ABI
void lvaAssignVirtualFrameOffsetsToLocals();
int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs);
#ifdef TARGET_AMD64
// Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even.
bool lvaIsCalleeSavedIntRegCountEven();
#endif
void lvaAlignFrame();
void lvaAssignFrameOffsetsToPromotedStructs();
int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign);
#ifdef DEBUG
void lvaDumpRegLocation(unsigned lclNum);
void lvaDumpFrameLocation(unsigned lclNum);
void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6);
void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame
// layout state defined by lvaDoneFrameLayout
#endif
// Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller
// to avoid bugs from borderline cases.
#define MAX_FrameSize 0x3FFFFFFF
void lvaIncrementFrameSize(unsigned size);
unsigned lvaFrameSize(FrameLayoutState curState);
// Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based.
int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const;
// Returns the caller-SP-relative offset for the local variable "varNum."
int lvaGetCallerSPRelativeOffset(unsigned varNum);
// Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc.
int lvaGetSPRelativeOffset(unsigned varNum);
int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased);
int lvaGetInitialSPRelativeOffset(unsigned varNum);
// True if this is an OSR compilation and this local is potentially
// located on the original method stack frame.
bool lvaIsOSRLocal(unsigned varNum);
//------------------------ For splitting types ----------------------------
void lvaInitTypeRef();
void lvaInitArgs(InitVarDscInfo* varDscInfo);
void lvaInitThisPtr(InitVarDscInfo* varDscInfo);
void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg);
void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs);
void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo);
void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo);
void lvaInitVarDsc(LclVarDsc* varDsc,
unsigned varNum,
CorInfoType corInfoType,
CORINFO_CLASS_HANDLE typeHnd,
CORINFO_ARG_LIST_HANDLE varList,
CORINFO_SIG_INFO* varSig);
static unsigned lvaTypeRefMask(var_types type);
var_types lvaGetActualType(unsigned lclNum);
var_types lvaGetRealType(unsigned lclNum);
//-------------------------------------------------------------------------
void lvaInit();
LclVarDsc* lvaGetDesc(unsigned lclNum)
{
assert(lclNum < lvaCount);
return &lvaTable[lclNum];
}
LclVarDsc* lvaGetDesc(unsigned lclNum) const
{
assert(lclNum < lvaCount);
return &lvaTable[lclNum];
}
LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar)
{
return lvaGetDesc(lclVar->GetLclNum());
}
unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex)
{
assert(trackedIndex < lvaTrackedCount);
unsigned lclNum = lvaTrackedToVarNum[trackedIndex];
assert(lclNum < lvaCount);
return lclNum;
}
LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex)
{
return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex));
}
unsigned lvaGetLclNum(const LclVarDsc* varDsc)
{
assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table
assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) ==
0); // varDsc better not point in the middle of a variable
unsigned varNum = (unsigned)(varDsc - lvaTable);
assert(varDsc == &lvaTable[varNum]);
return varNum;
}
unsigned lvaLclSize(unsigned varNum);
unsigned lvaLclExactSize(unsigned varNum);
bool lvaHaveManyLocals() const;
unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason));
unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason));
unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason));
void lvaSortByRefCount();
void lvaMarkLocalVars(); // Local variable ref-counting
void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers);
void lvaMarkLocalVars(BasicBlock* block, bool isRecompute);
void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar
VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt);
#ifdef DEBUG
struct lvaStressLclFldArgs
{
Compiler* m_pCompiler;
bool m_bFirstPass;
};
static fgWalkPreFn lvaStressLclFldCB;
void lvaStressLclFld();
void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars);
void lvaDispVarSet(VARSET_VALARG_TP set);
#endif
#ifdef TARGET_ARM
int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage);
#else
int lvaFrameAddress(int varNum, bool* pFPbased);
#endif
bool lvaIsParameter(unsigned varNum);
bool lvaIsRegArgument(unsigned varNum);
bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument?
bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code
// that writes to arg0
// For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference.
// For ARM64, this is structs larger than 16 bytes that are passed by reference.
bool lvaIsImplicitByRefLocal(unsigned varNum)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
LclVarDsc* varDsc = lvaGetDesc(varNum);
if (varDsc->lvIsImplicitByRef)
{
assert(varDsc->lvIsParam);
assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF));
return true;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return false;
}
// Returns true if this local var is a multireg struct
bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg);
// If the local is a TYP_STRUCT, get/set a class handle describing it
CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum);
void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true);
void lvaSetStructUsedAsVarArg(unsigned varNum);
// If the local is TYP_REF, set or update the associated class information.
void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false);
void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr);
void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false);
void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr);
#define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct
// Info about struct type fields.
struct lvaStructFieldInfo
{
CORINFO_FIELD_HANDLE fldHnd;
unsigned char fldOffset;
unsigned char fldOrdinal;
var_types fldType;
unsigned fldSize;
CORINFO_CLASS_HANDLE fldTypeHnd;
lvaStructFieldInfo()
: fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr)
{
}
};
// Info about a struct type, instances of which may be candidates for promotion.
struct lvaStructPromotionInfo
{
CORINFO_CLASS_HANDLE typeHnd;
bool canPromote;
bool containsHoles;
bool customLayout;
bool fieldsSorted;
unsigned char fieldCnt;
lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct];
lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr)
: typeHnd(typeHnd)
, canPromote(false)
, containsHoles(false)
, customLayout(false)
, fieldsSorted(false)
, fieldCnt(0)
{
}
};
struct lvaFieldOffsetCmp
{
bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2);
};
// This class is responsible for checking validity and profitability of struct promotion.
// If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes
// nessesary information for fgMorphStructField to use.
class StructPromotionHelper
{
public:
StructPromotionHelper(Compiler* compiler);
bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd);
bool TryPromoteStructVar(unsigned lclNum);
void Clear()
{
structPromotionInfo.typeHnd = NO_CLASS_HANDLE;
}
#ifdef DEBUG
void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType);
#endif // DEBUG
private:
bool CanPromoteStructVar(unsigned lclNum);
bool ShouldPromoteStructVar(unsigned lclNum);
void PromoteStructVar(unsigned lclNum);
void SortStructFields();
lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal);
bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo);
private:
Compiler* compiler;
lvaStructPromotionInfo structPromotionInfo;
#ifdef DEBUG
typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types>
RetypedAsScalarFieldsMap;
RetypedAsScalarFieldsMap retypedFieldsMap;
#endif // DEBUG
};
StructPromotionHelper* structPromotionHelper;
unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset);
lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc);
lvaPromotionType lvaGetPromotionType(unsigned varNum);
lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc);
lvaPromotionType lvaGetParentPromotionType(unsigned varNum);
bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc);
bool lvaIsGCTracked(const LclVarDsc* varDsc);
#if defined(FEATURE_SIMD)
bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc)
{
assert(varDsc->lvType == TYP_SIMD12);
assert(varDsc->lvExactSize == 12);
#if defined(TARGET_64BIT)
assert(compMacOsArm64Abi() || varDsc->lvSize() == 16);
#endif // defined(TARGET_64BIT)
// We make local variable SIMD12 types 16 bytes instead of just 12.
// lvSize() will return 16 bytes for SIMD12, even for fields.
// However, we can't do that mapping if the var is a dependently promoted struct field.
// Such a field must remain its exact size within its parent struct unless it is a single
// field *and* it is the only field in a struct of 16 bytes.
if (varDsc->lvSize() != 16)
{
return false;
}
if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl);
return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16);
}
return true;
}
#endif // defined(FEATURE_SIMD)
unsigned lvaGSSecurityCookie; // LclVar number
bool lvaTempsHaveLargerOffsetThanVars();
// Returns "true" iff local variable "lclNum" is in SSA form.
bool lvaInSsa(unsigned lclNum)
{
assert(lclNum < lvaCount);
return lvaTable[lclNum].lvInSsa;
}
unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX
#if defined(FEATURE_EH_FUNCLETS)
unsigned lvaPSPSym; // variable representing the PSPSym
#endif
InlineInfo* impInlineInfo; // Only present for inlinees
InlineStrategy* m_inlineStrategy;
InlineContext* compInlineContext; // Always present
// The Compiler* that is the root of the inlining tree of which "this" is a member.
Compiler* impInlineRoot();
#if defined(DEBUG) || defined(INLINE_DATA)
unsigned __int64 getInlineCycleCount()
{
return m_compCycles;
}
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method.
bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method.
//=========================================================================
// PROTECTED
//=========================================================================
protected:
//---------------- Local variable ref-counting ----------------------------
void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute);
bool IsDominatedByExceptionalEntry(BasicBlock* block);
void SetVolatileHint(LclVarDsc* varDsc);
// Keeps the mapping from SSA #'s to VN's for the implicit memory variables.
SsaDefArray<SsaMemDef> lvMemoryPerSsaData;
public:
// Returns the address of the per-Ssa data for memory at the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum)
{
return lvMemoryPerSsaData.GetSsaDef(ssaNum);
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Importer XX
XX XX
XX Imports the given method and converts it to semantic trees XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
private:
// For prefixFlags
enum
{
PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
PREFIX_TAILCALL_IMPLICIT =
0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
PREFIX_TAILCALL_STRESS =
0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress
PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS),
PREFIX_VOLATILE = 0x00001000,
PREFIX_UNALIGNED = 0x00010000,
PREFIX_CONSTRAINED = 0x00100000,
PREFIX_READONLY = 0x01000000
};
static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix);
static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp);
static bool impOpcodeIsCallOpcode(OPCODE opcode);
public:
void impInit();
void impImport();
CORINFO_CLASS_HANDLE impGetRefAnyClass();
CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle();
CORINFO_CLASS_HANDLE impGetTypeHandleClass();
CORINFO_CLASS_HANDLE impGetStringClass();
CORINFO_CLASS_HANDLE impGetObjectClass();
// Returns underlying type of handles returned by ldtoken instruction
var_types GetRuntimeHandleUnderlyingType()
{
// RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes
return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF;
}
void impDevirtualizeCall(GenTreeCall* call,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* contextHandle,
CORINFO_CONTEXT_HANDLE* exactContextHandle,
bool isLateDevirtualization,
bool isExplicitTailCall,
IL_OFFSET ilOffset = BAD_IL_OFFSET);
//=========================================================================
// PROTECTED
//=========================================================================
protected:
//-------------------- Stack manipulation ---------------------------------
unsigned impStkSize; // Size of the full stack
#define SMALL_STACK_SIZE 16 // number of elements in impSmallStack
struct SavedStack // used to save/restore stack contents.
{
unsigned ssDepth; // number of values on stack
StackEntry* ssTrees; // saved tree values
};
bool impIsPrimitive(CorInfoType type);
bool impILConsumesAddr(const BYTE* codeAddr);
void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind);
void impPushOnStack(GenTree* tree, typeInfo ti);
void impPushNullObjRefOnStack();
StackEntry impPopStack();
StackEntry& impStackTop(unsigned n = 0);
unsigned impStackHeight();
void impSaveStackState(SavedStack* savePtr, bool copy);
void impRestoreStackState(SavedStack* savePtr);
GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const BYTE* codeAddr,
const BYTE* codeEndp,
bool makeInlineObservation = false);
void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken);
void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
bool impCanPInvokeInline();
bool impCanPInvokeInlineCallSite(BasicBlock* block);
void impCheckForPInvokeCall(
GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block);
GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo());
void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig);
void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
var_types impImportCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a
// type parameter?
GenTree* newobjThis,
int prefixFlags,
CORINFO_CALL_INFO* callInfo,
IL_OFFSET rawILOffset);
CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle);
bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv);
GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd);
GenTree* impFixupStructReturnType(GenTree* op,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension unmgdCallConv);
#ifdef DEBUG
var_types impImportJitTestLabelMark(int numArgs);
#endif // DEBUG
GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken);
GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp);
GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp);
static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr);
GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp);
GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp);
void impImportLeave(BasicBlock* block);
void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr);
GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom);
GenTree* impIntrinsic(GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef,
bool readonlyCall,
bool tailCall,
CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken,
CORINFO_THIS_TRANSFORM constraintCallThisTransform,
NamedIntrinsic* pIntrinsicName,
bool* isSpecialIntrinsic = nullptr);
GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
var_types callType,
NamedIntrinsic intrinsicName,
bool tailCall);
NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method);
GenTree* impUnsupportedNamedIntrinsic(unsigned helper,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand);
#ifdef FEATURE_HW_INTRINSICS
GenTree* impHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand);
GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
GenTree* newobjThis);
protected:
bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa);
GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
var_types retType,
CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* newobjThis);
GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* getArgForHWIntrinsic(var_types argType,
CORINFO_CLASS_HANDLE argClass,
bool expectAddr = false,
GenTree* newobjThis = nullptr);
GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType);
GenTree* addRangeCheckIfNeeded(
NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound);
GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound);
#ifdef TARGET_XARCH
GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
#endif // TARGET_XARCH
#endif // FEATURE_HW_INTRINSICS
GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
int memberRef,
bool readonlyCall,
NamedIntrinsic intrinsicName);
GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig);
GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig);
GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive);
GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
GenTree* impTransformThis(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM transform);
//----------------- Manipulating the trees and stmts ----------------------
Statement* impStmtList; // Statements for the BB being imported.
Statement* impLastStmt; // The last statement for the current BB.
public:
enum
{
CHECK_SPILL_ALL = -1,
CHECK_SPILL_NONE = -2
};
void impBeginTreeList();
void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt);
void impEndTreeList(BasicBlock* block);
void impAppendStmtCheck(Statement* stmt, unsigned chkLevel);
void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true);
void impAppendStmt(Statement* stmt);
void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore);
Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true);
void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore);
void impAssignTempGen(unsigned tmp,
GenTree* val,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
void impAssignTempGen(unsigned tmpNum,
GenTree* val,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
Statement* impExtractLastStmt();
GenTree* impCloneExpr(GenTree* tree,
GenTree** clone,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt DEBUGARG(const char* reason));
GenTree* impAssignStruct(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* impAssignStructPtr(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref);
var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr);
GenTree* impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization = false);
GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup = nullptr,
bool mustRestoreHandle = false,
bool importParent = false);
GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup = nullptr,
bool mustRestoreHandle = false)
{
return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true);
}
GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags flags,
void* compileTimeHandle);
GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind);
GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle);
GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle);
GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CorInfoHelpFunc helper,
var_types type,
GenTreeCall::Use* args = nullptr,
CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr);
bool impIsCastHelperEligibleForClassProbe(GenTree* tree);
bool impIsCastHelperMayHaveProfileData(GenTree* tree);
GenTree* impCastClassOrIsInstToTree(
GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset);
GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass);
bool VarTypeIsMultiByteAndCanEnreg(var_types type,
CORINFO_CLASS_HANDLE typeClass,
unsigned* typeSize,
bool forReturn,
bool isVarArg,
CorInfoCallConvExtension callConv);
bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName);
bool IsTargetIntrinsic(NamedIntrinsic intrinsicName);
bool IsMathIntrinsic(NamedIntrinsic intrinsicName);
bool IsMathIntrinsic(GenTree* tree);
private:
//----------------- Importing the method ----------------------------------
CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens.
#ifdef DEBUG
unsigned impCurOpcOffs;
const char* impCurOpcName;
bool impNestedStackSpill;
// For displaying instrs with generated native code (-n:B)
Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset().
void impNoteLastILoffs();
#endif
// Debug info of current statement being imported. It gets set to contain
// no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been
// set in the appended trees. Then it gets updated at IL instructions for
// which we have to report mapping info.
// It will always contain the current inline context.
DebugInfo impCurStmtDI;
DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall);
void impCurStmtOffsSet(IL_OFFSET offs);
void impNoteBranchOffs();
unsigned impInitBlockLineInfo();
bool impIsThis(GenTree* obj);
bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
bool impIsAnySTLOC(OPCODE opcode)
{
return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) ||
((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3)));
}
GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr);
bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const;
GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0);
//---------------- Spilling the importer stack ----------------------------
// The maximum number of bytes of IL processed without clean stack state.
// It allows to limit the maximum tree size and depth.
static const unsigned MAX_TREE_SIZE = 200;
bool impCanSpillNow(OPCODE prevOpcode);
struct PendingDsc
{
PendingDsc* pdNext;
BasicBlock* pdBB;
SavedStack pdSavedStack;
ThisInitState pdThisPtrInit;
};
PendingDsc* impPendingList; // list of BBs currently waiting to be imported.
PendingDsc* impPendingFree; // Freed up dscs that can be reused
// We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation.
JitExpandArray<BYTE> impPendingBlockMembers;
// Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
BYTE impGetPendingBlockMember(BasicBlock* blk)
{
return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd());
}
// Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
void impSetPendingBlockMember(BasicBlock* blk, BYTE val)
{
impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val);
}
bool impCanReimport;
bool impSpillStackEntry(unsigned level,
unsigned varNum
#ifdef DEBUG
,
bool bAssertOnRecursion,
const char* reason
#endif
);
void impSpillStackEnsure(bool spillLeaves = false);
void impEvalSideEffects();
void impSpillSpecialSideEff();
void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason));
void impSpillValueClasses();
void impSpillEvalStack();
static fgWalkPreFn impFindValueClasses;
void impSpillLclRefs(ssize_t lclNum);
BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter);
bool impBlockIsInALoop(BasicBlock* block);
void impImportBlockCode(BasicBlock* block);
void impReimportMarkBlock(BasicBlock* block);
void impReimportMarkSuccessors(BasicBlock* block);
void impVerifyEHBlock(BasicBlock* block, bool isTryStart);
void impImportBlockPending(BasicBlock* block);
// Similar to impImportBlockPending, but assumes that block has already been imported once and is being
// reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState
// for the block, but instead, just re-uses the block's existing EntryState.
void impReimportBlockPending(BasicBlock* block);
var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2);
void impImportBlock(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values
// on the stack to local variables (the "spill temp" variables). The successor blocks will assume that
// its incoming stack contents are in those locals. This requires "block" and its successors to agree on
// the variables that will be used -- and for all the predecessors of those successors, and the
// successors of those predecessors, etc. Call such a set of blocks closed under alternating
// successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the
// clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill
// temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series
// of local variable numbers, so we represent them with the base local variable number), returns that.
// Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of
// which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps
// chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending
// on which kind of member of the clique the block is).
unsigned impGetSpillTmpBase(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We have previously
// assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks
// will assume that its incoming stack contents are in those locals. This requires "block" and its
// successors to agree on the variables and their types that will be used. The CLI spec allows implicit
// conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can
// push an int and another can push a native int. For 64-bit we have chosen to implement this by typing
// the "spill temp" as native int, and then importing (or re-importing as needed) so that all the
// predecessors in the "spill clique" push a native int (sign-extending if needed), and all the
// successors receive a native int. Similarly float and double are unified to double.
// This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark
// blocks for re-importation as appropriate (both successors, so they get the right incoming type, and
// predecessors, so they insert an upcast if needed).
void impReimportSpillClique(BasicBlock* block);
// When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic
// block, and represent the predecessor and successor members of the clique currently being computed.
// *** Access to these will need to be locked in a parallel compiler.
JitExpandArray<BYTE> impSpillCliquePredMembers;
JitExpandArray<BYTE> impSpillCliqueSuccMembers;
enum SpillCliqueDir
{
SpillCliquePred,
SpillCliqueSucc
};
// Abstract class for receiving a callback while walking a spill clique
class SpillCliqueWalker
{
public:
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0;
};
// This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique
class SetSpillTempsBase : public SpillCliqueWalker
{
unsigned m_baseTmp;
public:
SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp)
{
}
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This class is used for implementing impReimportSpillClique part on each block within the spill clique
class ReimportSpillClique : public SpillCliqueWalker
{
Compiler* m_pComp;
public:
ReimportSpillClique(Compiler* pComp) : m_pComp(pComp)
{
}
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each
// predecessor or successor within the spill clique
void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback);
// For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the
// incoming locals. This walks that list an resets the types of the GenTrees to match the types of
// the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique).
void impRetypeEntryStateTemps(BasicBlock* blk);
BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk);
void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val);
void impPushVar(GenTree* op, typeInfo tiRetVal);
GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset));
void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal);
void impLoadVar(unsigned lclNum, IL_OFFSET offset)
{
impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo);
}
void impLoadArg(unsigned ilArgNum, IL_OFFSET offset);
void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset);
bool impReturnInstruction(int prefixFlags, OPCODE& opcode);
#ifdef TARGET_ARM
void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass);
#endif
// A free list of linked list nodes used to represent to-do stacks of basic blocks.
struct BlockListNode
{
BasicBlock* m_blk;
BlockListNode* m_next;
BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next)
{
}
void* operator new(size_t sz, Compiler* comp);
};
BlockListNode* impBlockListNodeFreeList;
void FreeBlockListNode(BlockListNode* node);
bool impIsValueType(typeInfo* pTypeInfo);
var_types mangleVarArgsType(var_types type);
regNumber getCallArgIntRegister(regNumber floatReg);
regNumber getCallArgFloatRegister(regNumber intReg);
#if defined(DEBUG)
static unsigned jitTotalMethodCompiled;
#endif
#ifdef DEBUG
static LONG jitNestingLevel;
#endif // DEBUG
static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr);
void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult);
// STATIC inlining decision based on the IL code.
void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
CORINFO_METHOD_INFO* methInfo,
bool forceInline,
InlineResult* inlineResult);
void impCheckCanInline(GenTreeCall* call,
CORINFO_METHOD_HANDLE fncHandle,
unsigned methAttr,
CORINFO_CONTEXT_HANDLE exactContextHnd,
InlineCandidateInfo** ppInlineCandidateInfo,
InlineResult* inlineResult);
void impInlineRecordArgInfo(InlineInfo* pInlineInfo,
GenTree* curArgVal,
unsigned argNum,
InlineResult* inlineResult);
void impInlineInitVars(InlineInfo* pInlineInfo);
unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason));
GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo);
bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo);
bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree,
GenTreeCall::Use* additionalCallArgs,
GenTree* dereferencedAddress,
InlArgInfo* inlArgInfo);
void impMarkInlineCandidate(GenTree* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
void impMarkInlineCandidateHelper(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
bool impTailCallRetTypeCompatible(bool allowWidening,
var_types callerRetType,
CORINFO_CLASS_HANDLE callerRetTypeClass,
CorInfoCallConvExtension callerCallConv,
var_types calleeRetType,
CORINFO_CLASS_HANDLE calleeRetTypeClass,
CorInfoCallConvExtension calleeCallConv);
bool impIsTailCallILPattern(
bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive);
bool impIsImplicitTailCallCandidate(
OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive);
bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd);
bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array);
CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX FlowGraph XX
XX XX
XX Info about the basic-blocks, their contents and the flow analysis XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
BasicBlock* fgFirstBB; // Beginning of the basic block list
BasicBlock* fgLastBB; // End of the basic block list
BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section
BasicBlock* fgEntryBB; // For OSR, the original method's entry point
BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint)
#if defined(FEATURE_EH_FUNCLETS)
BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets)
#endif
BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been
// created.
BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks
unsigned fgEdgeCount; // # of control flow edges between the BBs
unsigned fgBBcount; // # of BBs in the method
#ifdef DEBUG
unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen
#endif
unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks
unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information
BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute
// dominance. Indexed by block number. Size: fgBBNumMax + 1.
// After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute
// dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and
// postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered
// starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely
// to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array
// index). The arrays are of size fgBBNumMax + 1.
unsigned* fgDomTreePreOrder;
unsigned* fgDomTreePostOrder;
// Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree
// in order to avoid the need for SSA reconstruction and an "out of SSA" phase).
DomTreeNode* fgSsaDomTree;
bool fgBBVarSetsInited;
// Allocate array like T* a = new T[fgBBNumMax + 1];
// Using helper so we don't keep forgetting +1.
template <typename T>
T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown)
{
return getAllocator(cmk).allocate<T>(fgBBNumMax + 1);
}
// BlockSets are relative to a specific set of BasicBlock numbers. If that changes
// (if the blocks are renumbered), this changes. BlockSets from different epochs
// cannot be meaningfully combined. Note that new blocks can be created with higher
// block numbers without changing the basic block epoch. These blocks *cannot*
// participate in a block set until the blocks are all renumbered, causing the epoch
// to change. This is useful if continuing to use previous block sets is valuable.
// If the epoch is zero, then it is uninitialized, and block sets can't be used.
unsigned fgCurBBEpoch;
unsigned GetCurBasicBlockEpoch()
{
return fgCurBBEpoch;
}
// The number of basic blocks in the current epoch. When the blocks are renumbered,
// this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains
// the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered.
unsigned fgCurBBEpochSize;
// The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize
// bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called.
unsigned fgBBSetCountInSizeTUnits;
void NewBasicBlockEpoch()
{
INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits);
// We have a new epoch. Compute and cache the size needed for new BlockSets.
fgCurBBEpoch++;
fgCurBBEpochSize = fgBBNumMax + 1;
fgBBSetCountInSizeTUnits =
roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8);
#ifdef DEBUG
// All BlockSet objects are now invalid!
fgReachabilitySetsValid = false; // the bbReach sets are now invalid!
fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid!
if (verbose)
{
unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t));
printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)",
fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long");
if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1)))
{
// If we're not just establishing the first epoch, and the epoch array size has changed such that we're
// going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an
// array of size_t bitsets), then print that out.
printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long");
}
printf("\n");
}
#endif // DEBUG
}
void EnsureBasicBlockEpoch()
{
if (fgCurBBEpochSize != fgBBNumMax + 1)
{
NewBasicBlockEpoch();
}
}
BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind);
void fgEnsureFirstBBisScratch();
bool fgFirstBBisScratch();
bool fgBBisScratch(BasicBlock* block);
void fgExtendEHRegionBefore(BasicBlock* block);
void fgExtendEHRegionAfter(BasicBlock* block);
BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
unsigned tryIndex,
unsigned hndIndex,
BasicBlock* nearBlk,
bool putInFilter = false,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
BasicBlock* srcBlk,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind);
BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind,
BasicBlock* afterBlk,
unsigned xcptnIndex,
bool putInTryRegion);
void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk);
void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk);
void fgUnlinkBlock(BasicBlock* block);
#ifdef FEATURE_JIT_METHOD_PERF
unsigned fgMeasureIR();
#endif // FEATURE_JIT_METHOD_PERF
bool fgModified; // True if the flow graph has been modified recently
bool fgComputePredsDone; // Have we computed the bbPreds list
bool fgCheapPredsValid; // Is the bbCheapPreds list valid?
bool fgDomsComputed; // Have we computed the dominator sets?
bool fgReturnBlocksComputed; // Have we computed the return blocks list?
bool fgOptimizedFinally; // Did we optimize any try-finallys?
bool fgHasSwitch; // any BBJ_SWITCH jumps?
BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler
// begin blocks.
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should
// never be removed due to a requirement to use the BBJ_ALWAYS for generating code and
// not have "retless" blocks.
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
#ifdef DEBUG
bool fgReachabilitySetsValid; // Are the bbReach sets valid?
bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid?
#endif // DEBUG
bool fgRemoveRestOfBlock; // true if we know that we will throw
bool fgStmtRemoved; // true if we remove statements -> need new DFA
// There are two modes for ordering of the trees.
// - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in
// each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order)
// by traversing the tree according to the order of the operands.
// - In FGOrderLinear, the dominant ordering is the linear order.
enum FlowGraphOrder
{
FGOrderTree,
FGOrderLinear
};
FlowGraphOrder fgOrder;
// The following are boolean flags that keep track of the state of internal data structures
bool fgStmtListThreaded; // true if the node list is now threaded
bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions
bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights
bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights
bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights
bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form
bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph
weight_t fgCalledCount; // count of the number of times this method was called
// This is derived from the profile data
// or is BB_UNITY_WEIGHT when we don't have profile data
#if defined(FEATURE_EH_FUNCLETS)
bool fgFuncletsCreated; // true if the funclet creation phase has been run
#endif // FEATURE_EH_FUNCLETS
bool fgGlobalMorph; // indicates if we are during the global morphing phase
// since fgMorphTree can be called from several places
bool impBoxTempInUse; // the temp below is valid and available
unsigned impBoxTemp; // a temporary that is used for boxing
#ifdef DEBUG
bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert,
// and we are trying to compile again in a "safer", minopts mode?
#endif
#if defined(DEBUG)
unsigned impInlinedCodeSize;
bool fgPrintInlinedMethods;
#endif
jitstd::vector<flowList*>* fgPredListSortVector;
//-------------------------------------------------------------------------
void fgInit();
PhaseStatus fgImport();
PhaseStatus fgTransformIndirectCalls();
PhaseStatus fgTransformPatchpoints();
PhaseStatus fgInline();
PhaseStatus fgRemoveEmptyTry();
PhaseStatus fgRemoveEmptyFinally();
PhaseStatus fgMergeFinallyChains();
PhaseStatus fgCloneFinally();
void fgCleanupContinuation(BasicBlock* continuation);
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
PhaseStatus fgUpdateFinallyTargetFlags();
void fgClearAllFinallyTargetBits();
void fgAddFinallyTargetFlags();
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
PhaseStatus fgTailMergeThrows();
void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
BasicBlock* nonCanonicalBlock,
BasicBlock* canonicalBlock,
flowList* predEdge);
void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock,
BasicBlock* nonCanonicalBlock,
BasicBlock* canonicalBlock,
flowList* predEdge);
GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType);
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
// Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals
// when this is necessary.
bool fgNeedToAddFinallyTargetBits;
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block,
BasicBlock* handler,
BlockToBlockMap& continuationMap);
GenTree* fgGetCritSectOfStaticMethod();
#if defined(FEATURE_EH_FUNCLETS)
void fgAddSyncMethodEnterExit();
GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter);
void fgConvertSyncReturnToLeave(BasicBlock* block);
#endif // FEATURE_EH_FUNCLETS
void fgAddReversePInvokeEnterExit();
bool fgMoreThanOneReturnBlock();
// The number of separate return points in the method.
unsigned fgReturnCount;
void fgAddInternal();
enum class FoldResult
{
FOLD_DID_NOTHING,
FOLD_CHANGED_CONTROL_FLOW,
FOLD_REMOVED_LAST_STMT,
FOLD_ALTERED_LAST_STMT,
};
FoldResult fgFoldConditional(BasicBlock* block);
void fgMorphStmts(BasicBlock* block);
void fgMorphBlocks();
void fgMergeBlockReturn(BasicBlock* block);
bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg));
void fgSetOptions();
#ifdef DEBUG
static fgWalkPreFn fgAssertNoQmark;
void fgPreExpandQmarkChecks(GenTree* expr);
void fgPostExpandQmarkChecks();
static void fgCheckQmarkAllowedForm(GenTree* tree);
#endif
IL_OFFSET fgFindBlockILOffset(BasicBlock* block);
void fgFixEntryFlowForOSR();
BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr);
BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr);
BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt);
BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR
BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ);
Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di);
Statement* fgNewStmtFromTree(GenTree* tree);
Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block);
Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di);
GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr);
void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt);
void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt);
void fgExpandQmarkNodes();
// Do "simple lowering." This functionality is (conceptually) part of "general"
// lowering that is distributed between fgMorph and the lowering phase of LSRA.
void fgSimpleLowering();
GenTree* fgInitThisClass();
GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper);
GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls);
bool backendRequiresLocalVarLifetimes()
{
return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars();
}
void fgLocalVarLiveness();
void fgLocalVarLivenessInit();
void fgPerNodeLocalVarLiveness(GenTree* node);
void fgPerBlockLocalVarLiveness();
VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block);
void fgLiveVarAnalysis(bool updateInternalOnly = false);
void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call);
void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node);
bool fgComputeLifeTrackedLocalDef(VARSET_TP& life,
VARSET_VALARG_TP keepAliveVars,
LclVarDsc& varDsc,
GenTreeLclVarCommon* node);
bool fgComputeLifeUntrackedLocal(VARSET_TP& life,
VARSET_VALARG_TP keepAliveVars,
LclVarDsc& varDsc,
GenTreeLclVarCommon* lclVarNode);
bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode);
void fgComputeLife(VARSET_TP& life,
GenTree* startNode,
GenTree* endNode,
VARSET_VALARG_TP volatileVars,
bool* pStmtInfoDirty DEBUGARG(bool* treeModf));
void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars);
bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange);
void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block);
bool fgRemoveDeadStore(GenTree** pTree,
LclVarDsc* varDsc,
VARSET_VALARG_TP life,
bool* doAgain,
bool* pStmtInfoDirty,
bool* pStoreRemoved DEBUGARG(bool* treeModf));
void fgInterBlockLocalVarLiveness();
// Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.:
// 1. for (BasicBlock* const block : compiler->Blocks()) ...
// 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ...
// 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ...
// In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3),
// both `startBlock` and `endBlock` must be non-null.
//
BasicBlockSimpleList Blocks() const
{
return BasicBlockSimpleList(fgFirstBB);
}
BasicBlockSimpleList Blocks(BasicBlock* startBlock) const
{
return BasicBlockSimpleList(startBlock);
}
BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const
{
return BasicBlockRangeList(startBlock, endBlock);
}
// The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name
// of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose
// whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us
// to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree.
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap;
NodeToUnsignedMap* m_opAsgnVarDefSsaNums;
NodeToUnsignedMap* GetOpAsgnVarDefSsaNums()
{
if (m_opAsgnVarDefSsaNums == nullptr)
{
m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator());
}
return m_opAsgnVarDefSsaNums;
}
// This map tracks nodes whose value numbers explicitly or implicitly depend on memory states.
// The map provides the entry block of the most closely enclosing loop that
// defines the memory region accessed when defining the nodes's VN.
//
// This information should be consulted when considering hoisting node out of a loop, as the VN
// for the node will only be valid within the indicated loop.
//
// It is not fine-grained enough to track memory dependence within loops, so cannot be used
// for more general code motion.
//
// If a node does not have an entry in the map we currently assume the VN is not memory dependent
// and so memory does not constrain hoisting.
//
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap;
NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap;
NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap()
{
if (m_nodeToLoopMemoryBlockMap == nullptr)
{
m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator());
}
return m_nodeToLoopMemoryBlockMap;
}
void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN);
void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree);
// Requires value numbering phase to have completed. Returns the value number ("gtVN") of the
// "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the
// "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's"
// VN.
inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree);
// Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl".
// Except: assumes that lcl is a def, and if it is
// a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def",
// rather than the "use" SSA number recorded in the tree "lcl".
inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl);
inline bool PreciseRefCountsRequired();
// Performs SSA conversion.
void fgSsaBuild();
// Reset any data structures to the state expected by "fgSsaBuild", so it can be run again.
void fgResetForSsa();
unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run.
// Returns "true" if this is a special variable that is never zero initialized in the prolog.
inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum);
// Returns "true" if the variable needs explicit zero initialization.
inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn);
// The value numbers for this compilation.
ValueNumStore* vnStore;
public:
ValueNumStore* GetValueNumStore()
{
return vnStore;
}
// Do value numbering (assign a value number to each
// tree node).
void fgValueNumber();
// Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN.
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// The 'indType' is the indirection type of the lhs of the assignment and will typically
// match the element type of the array or fldSeq. When this type doesn't match
// or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN]
//
ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
ValueNum arrVN,
ValueNum inxVN,
FieldSeqNode* fldSeq,
ValueNum rhsVN,
var_types indType);
// Requires that "tree" is a GT_IND marked as an array index, and that its address argument
// has been parsed to yield the other input arguments. If evaluation of the address
// can raise exceptions, those should be captured in the exception set "addrXvnp".
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique
// VN for the conservative VN.) Also marks the tree's argument as the address of an array element.
// The type tree->TypeGet() will typically match the element type of the array or fldSeq.
// When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN
//
ValueNum fgValueNumberArrIndexVal(GenTree* tree,
CORINFO_CLASS_HANDLE elemTypeEq,
ValueNum arrVN,
ValueNum inxVN,
ValueNumPair addrXvnp,
FieldSeqNode* fldSeq);
// Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown
// by evaluating the array index expression "tree". Returns the value number resulting from
// dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the
// "GT_IND" that does the dereference, and it is given the returned value number.
ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp);
// Compute the value number for a byref-exposed load of the given type via the given pointerVN.
ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN);
unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run.
// Utility functions for fgValueNumber.
// Perform value-numbering for the trees in "blk".
void fgValueNumberBlock(BasicBlock* blk);
// Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the
// innermost loop of which "entryBlock" is the entry. Returns the value number that should be
// assumed for the memoryKind at the start "entryBlk".
ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum);
// Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated.
// As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation.
void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg));
// Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be
// mutated.
void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg));
// For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap.
// As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store.
void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg));
// For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap.
void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg));
void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN);
// Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that
// value in that SSA #.
void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree);
// The input 'tree' is a leaf node that is a constant
// Assign the proper value number to the tree
void fgValueNumberTreeConst(GenTree* tree);
// If the VN store has been initialized, reassign the
// proper value number to the constant tree.
void fgUpdateConstTreeValueNumber(GenTree* tree);
// Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree.
// (With some exceptions: the VN of the lhs of an assignment is assigned as part of the
// assignment.)
void fgValueNumberTree(GenTree* tree);
void fgValueNumberAssignment(GenTreeOp* tree);
// Does value-numbering for a block assignment.
void fgValueNumberBlockAssignment(GenTree* tree);
bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src);
// Does value-numbering for a cast tree.
void fgValueNumberCastTree(GenTree* tree);
// Does value-numbering for an intrinsic tree.
void fgValueNumberIntrinsic(GenTree* tree);
#ifdef FEATURE_SIMD
// Does value-numbering for a GT_SIMD tree
void fgValueNumberSimd(GenTreeSIMD* tree);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
// Does value-numbering for a GT_HWINTRINSIC tree
void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree);
#endif // FEATURE_HW_INTRINSICS
// Does value-numbering for a call. We interpret some helper calls.
void fgValueNumberCall(GenTreeCall* call);
// Does value-numbering for a helper representing a cast operation.
void fgValueNumberCastHelper(GenTreeCall* call);
// Does value-numbering for a helper "call" that has a VN function symbol "vnf".
void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc);
// Requires "helpCall" to be a helper call. Assigns it a value number;
// we understand the semantics of some of the calls. Returns "true" if
// the call may modify the heap (we assume arbitrary memory side effects if so).
bool fgValueNumberHelperCall(GenTreeCall* helpCall);
// Requires that "helpFunc" is one of the pure Jit Helper methods.
// Returns the corresponding VNFunc to use for value numbering
VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc);
// Adds the exception set for the current tree node which has a memory indirection operation
void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr);
// Adds the exception sets for the current tree node which is performing a division or modulus operation
void fgValueNumberAddExceptionSetForDivision(GenTree* tree);
// Adds the exception set for the current tree node which is performing a overflow checking operation
void fgValueNumberAddExceptionSetForOverflow(GenTree* tree);
// Adds the exception set for the current tree node which is performing a bounds check operation
void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree);
// Adds the exception set for the current tree node which is performing a ckfinite operation
void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree);
// Adds the exception sets for the current tree node
void fgValueNumberAddExceptionSet(GenTree* tree);
#ifdef DEBUG
void fgDebugCheckExceptionSets();
void fgDebugCheckValueNumberedTree(GenTree* tree);
#endif
// These are the current value number for the memory implicit variables while
// doing value numbering. These are the value numbers under the "liberal" interpretation
// of memory values; the "conservative" interpretation needs no VN, since every access of
// memory yields an unknown value.
ValueNum fgCurMemoryVN[MemoryKindCount];
// Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT,
// requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit
// is 1, and the rest is an encoding of "elemTyp".
static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType)
{
if (elemStructType != nullptr)
{
assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF ||
varTypeIsIntegral(elemTyp));
assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid.
return elemStructType;
}
else
{
assert(elemTyp != TYP_STRUCT);
elemTyp = varTypeToSigned(elemTyp);
return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1);
}
}
// If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the
// var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is
// the struct type of the element).
static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd)
{
size_t clsHndVal = size_t(clsHnd);
if (clsHndVal & 0x1)
{
return var_types(clsHndVal >> 1);
}
else
{
return TYP_STRUCT;
}
}
// Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types
var_types getJitGCType(BYTE gcType);
// Returns true if the provided type should be treated as a primitive type
// for the unmanaged calling conventions.
bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd);
enum structPassingKind
{
SPK_Unknown, // Invalid value, never returned
SPK_PrimitiveType, // The struct is passed/returned using a primitive type.
SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that
// require a primitive type temp that is larger than the struct size.
// Currently used for structs of size 3, 5, 6, or 7 bytes.
SPK_ByValue, // The struct is passed/returned by value (using the ABI rules)
// for ARM64 and UNIX_X64 in multiple registers. (when all of the
// parameters registers are used, then the stack will be used)
// for X86 passed on the stack, for ARM32 passed in registers
// or the stack or split between registers and the stack.
SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers.
SPK_ByReference
}; // The struct is passed/returned by reference to a copy/buffer.
// Get the "primitive" type that is is used when we are given a struct of size 'structSize'.
// For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref.
// A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double
// If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned.
//
// isVarArg is passed for use on Windows Arm64 to change the decision returned regarding
// hfa types.
//
var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg);
// Get the type that is used to pass values of the given struct type.
// isVarArg is passed for use on Windows Arm64 to change the decision returned regarding
// hfa types.
//
var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
structPassingKind* wbPassStruct,
bool isVarArg,
unsigned structSize);
// Get the type that is used to return values of the given struct type.
// If the size is unknown, pass 0 and it will be determined from 'clsHnd'.
var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
CorInfoCallConvExtension callConv,
structPassingKind* wbPassStruct = nullptr,
unsigned structSize = 0);
#ifdef DEBUG
// Print a representation of "vnp" or "vn" on standard output.
// If "level" is non-zero, we also print out a partial expansion of the value.
void vnpPrint(ValueNumPair vnp, unsigned level);
void vnPrint(ValueNum vn, unsigned level);
#endif
bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2
// Dominator computation member functions
// Not exposed outside Compiler
protected:
bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2
// Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers.
void fgComputeDoms();
void fgCompDominatedByExceptionalEntryBlocks();
BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block.
// Note: this is relatively slow compared to calling fgDominate(),
// especially if dealing with a single block versus block check.
void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.)
void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks.
void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'.
bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets.
void fgComputeReachability(); // Perform flow graph node reachability analysis.
BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets.
void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be
// processed in topological sort, this function takes care of that.
void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count);
BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph.
// Returns this as a set.
INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds.
DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph
// (performed by fgComputeDoms), this procedure builds the dominance tree represented
// adjacency lists.
// In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder
// traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B)
// && postOrder(A) >= postOrder(B) making the computation O(1).
void fgNumberDomTree(DomTreeNode* domTree);
// When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets,
// dominators, and possibly loops.
void fgUpdateChangedFlowGraph(const bool computePreds = true,
const bool computeDoms = true,
const bool computeReturnBlocks = false,
const bool computeLoops = false);
public:
// Compute the predecessors of the blocks in the control flow graph.
void fgComputePreds();
// Remove all predecessor information.
void fgRemovePreds();
// Compute the cheap flow graph predecessors lists. This is used in some early phases
// before the full predecessors lists are computed.
void fgComputeCheapPreds();
private:
void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred);
void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred);
public:
enum GCPollType
{
GCPOLL_NONE,
GCPOLL_CALL,
GCPOLL_INLINE
};
// Initialize the per-block variable sets (used for liveness analysis).
void fgInitBlockVarSets();
PhaseStatus fgInsertGCPolls();
BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block);
// Requires that "block" is a block that returns from
// a finally. Returns the number of successors (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
unsigned fgNSuccsOfFinallyRet(BasicBlock* block);
// Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from
// a finally. Returns its "i"th successor (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
// Requires that "i" < fgNSuccsOfFinallyRet(block).
BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i);
private:
// Factor out common portions of the impls of the methods above.
void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres);
public:
// For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement,
// skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.)
// SwitchUniqueSuccSet contains the non-duplicated switch targets.
// (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget,
// which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already
// been computed for the switch block. If a switch block is deleted or is transformed into a non-switch,
// we leave the entry associated with the block, but it will no longer be accessed.)
struct SwitchUniqueSuccSet
{
unsigned numDistinctSuccs; // Number of distinct targets of the switch.
BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target
// successors.
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation.
void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
};
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap;
private:
// Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow
// iteration over only the distinct successors.
BlockToSwitchDescMap* m_switchDescMap;
public:
BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true)
{
if ((m_switchDescMap == nullptr) && createIfNull)
{
m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator());
}
return m_switchDescMap;
}
// Invalidate the map of unique switch block successors. For example, since the hash key of the map
// depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that
// we don't accidentally look up and return the wrong switch data.
void InvalidateUniqueSwitchSuccMap()
{
m_switchDescMap = nullptr;
}
// Requires "switchBlock" to be a block that ends in a switch. Returns
// the corresponding SwitchUniqueSuccSet.
SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk);
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member.
void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
// Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap.
void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk);
BasicBlock* fgFirstBlockOfHandler(BasicBlock* block);
bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block);
flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred);
flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred);
flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred);
flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred);
void fgRemoveBlockAsPred(BasicBlock* block);
void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock);
void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget);
void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget);
void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred);
flowList* fgAddRefPred(BasicBlock* block,
BasicBlock* blockPred,
flowList* oldEdge = nullptr,
bool initializingPreds = false); // Only set to 'true' when we are computing preds in
// fgComputePreds()
void fgFindBasicBlocks();
bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt);
bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion);
BasicBlock* fgFindInsertPoint(unsigned regionIndex,
bool putInTryRegion,
BasicBlock* startBlk,
BasicBlock* endBlk,
BasicBlock* nearBlk,
BasicBlock* jumpBlk,
bool runRarely);
unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr);
void fgPostImportationCleanup();
void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false));
void fgUnlinkStmt(BasicBlock* block, Statement* stmt);
bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt);
void fgCreateLoopPreHeader(unsigned lnum);
void fgUnreachableBlock(BasicBlock* block);
void fgRemoveConditionalJump(BasicBlock* block);
BasicBlock* fgLastBBInMainFunction();
BasicBlock* fgEndBBAfterMainFunction();
void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd);
void fgRemoveBlock(BasicBlock* block, bool unreachable);
bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext);
void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext);
void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext);
BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst);
bool fgRenumberBlocks();
bool fgExpandRarelyRunBlocks();
bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter);
void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk);
enum FG_RELOCATE_TYPE
{
FG_RELOCATE_TRY, // relocate the 'try' region
FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary)
};
BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType);
#if defined(FEATURE_EH_FUNCLETS)
#if defined(TARGET_ARM)
void fgClearFinallyTargetBit(BasicBlock* block);
#endif // defined(TARGET_ARM)
bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block);
bool fgAnyIntraHandlerPreds(BasicBlock* block);
void fgInsertFuncletPrologBlock(BasicBlock* block);
void fgCreateFuncletPrologBlocks();
void fgCreateFunclets();
#else // !FEATURE_EH_FUNCLETS
bool fgRelocateEHRegions();
#endif // !FEATURE_EH_FUNCLETS
bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target);
bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum);
bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum);
bool fgOptimizeEmptyBlock(BasicBlock* block);
bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest);
bool fgOptimizeBranch(BasicBlock* bJump);
bool fgOptimizeSwitchBranches(BasicBlock* block);
bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev);
bool fgOptimizeSwitchJumps();
#ifdef DEBUG
void fgPrintEdgeWeights();
#endif
void fgComputeBlockAndEdgeWeights();
weight_t fgComputeMissingBlockWeights();
void fgComputeCalledCount(weight_t returnWeight);
void fgComputeEdgeWeights();
bool fgReorderBlocks();
PhaseStatus fgDetermineFirstColdBlock();
bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr);
bool fgUpdateFlowGraph(bool doTailDup = false);
void fgFindOperOrder();
// method that returns if you should split here
typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data);
void fgSetBlockOrder();
void fgRemoveReturnBlock(BasicBlock* block);
/* Helper code that has been factored out */
inline void fgConvertBBToThrowBB(BasicBlock* block);
bool fgCastNeeded(GenTree* tree, var_types toType);
GenTree* fgDoNormalizeOnStore(GenTree* tree);
GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry);
// The following check for loops that don't execute calls
bool fgLoopCallMarked;
void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB);
void fgLoopCallMark();
void fgMarkLoopHead(BasicBlock* block);
unsigned fgGetCodeEstimate(BasicBlock* block);
#if DUMP_FLOWGRAPHS
enum class PhasePosition
{
PrePhase,
PostPhase
};
const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map);
static void fgDumpTree(FILE* fgxFile, GenTree* const tree);
FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type);
bool fgDumpFlowGraph(Phases phase, PhasePosition pos);
#endif // DUMP_FLOWGRAPHS
#ifdef DEBUG
void fgDispDoms();
void fgDispReach();
void fgDispBBLiveness(BasicBlock* block);
void fgDispBBLiveness();
void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0);
void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees);
void fgDispBasicBlocks(bool dumpTrees = false);
void fgDumpStmtTree(Statement* stmt, unsigned bbNum);
void fgDumpBlock(BasicBlock* block);
void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock);
static fgWalkPreFn fgStress64RsltMulCB;
void fgStress64RsltMul();
void fgDebugCheckUpdate();
void fgDebugCheckBBNumIncreasing();
void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true);
void fgDebugCheckBlockLinks();
void fgDebugCheckLinks(bool morphTrees = false);
void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees);
void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt);
void fgDebugCheckNodesUniqueness();
void fgDebugCheckLoopTable();
void fgDebugCheckFlags(GenTree* tree);
void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags);
void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags);
void fgDebugCheckTryFinallyExits();
void fgDebugCheckProfileData();
bool fgDebugCheckIncomingProfileData(BasicBlock* block);
bool fgDebugCheckOutgoingProfileData(BasicBlock* block);
#endif // DEBUG
static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2);
static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2);
static GenTree* fgGetFirstNode(GenTree* tree);
//--------------------- Walking the trees in the IR -----------------------
struct fgWalkData
{
Compiler* compiler;
fgWalkPreFn* wtprVisitorFn;
fgWalkPostFn* wtpoVisitorFn;
void* pCallbackData; // user-provided data
GenTree* parent; // parent of current node, provided to callback
GenTreeStack* parentStack; // stack of parent nodes, if asked for
bool wtprLclsOnly; // whether to only visit lclvar nodes
#ifdef DEBUG
bool printModified; // callback can use this
#endif
};
fgWalkResult fgWalkTreePre(GenTree** pTree,
fgWalkPreFn* visitor,
void* pCallBackData = nullptr,
bool lclVarsOnly = false,
bool computeStack = false);
fgWalkResult fgWalkTree(GenTree** pTree,
fgWalkPreFn* preVisitor,
fgWalkPostFn* postVisitor,
void* pCallBackData = nullptr);
void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData);
//----- Postorder
fgWalkResult fgWalkTreePost(GenTree** pTree,
fgWalkPostFn* visitor,
void* pCallBackData = nullptr,
bool computeStack = false);
// An fgWalkPreFn that looks for expressions that have inline throws in
// minopts mode. Basically it looks for tress with gtOverflowEx() or
// GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It
// returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags
// properly propagated to parent trees). It returns WALK_CONTINUE
// otherwise.
static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data);
static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data);
static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data);
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
friend class SsaBuilder;
friend struct ValueNumberState;
//--------------------- Detect the basic blocks ---------------------------
BasicBlock** fgBBs; // Table of pointers to the BBs
void fgInitBBLookup();
BasicBlock* fgLookupBB(unsigned addr);
bool fgCanSwitchToOptimized();
void fgSwitchToOptimized(const char* reason);
bool fgMayExplicitTailCall();
void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);
void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock);
void fgLinkBasicBlocks();
unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);
void fgCheckBasicBlockControlFlow();
void fgControlFlowPermitted(BasicBlock* blkSrc,
BasicBlock* blkDest,
bool IsLeave = false /* is the src a leave block */);
bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling);
void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining);
void fgAdjustForAddressExposedOrWrittenThis();
unsigned fgStressBBProf()
{
#ifdef DEBUG
unsigned result = JitConfig.JitStressBBProf();
if (result == 0)
{
if (compStressCompile(STRESS_BB_PROFILE, 15))
{
result = 1;
}
}
return result;
#else
return 0;
#endif
}
bool fgHaveProfileData();
bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight);
Instrumentor* fgCountInstrumentor;
Instrumentor* fgClassInstrumentor;
PhaseStatus fgPrepareToInstrumentMethod();
PhaseStatus fgInstrumentMethod();
PhaseStatus fgIncorporateProfileData();
void fgIncorporateBlockCounts();
void fgIncorporateEdgeCounts();
CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema,
UINT32 countSchemaItems,
BYTE* pInstrumentationData,
int32_t ilOffset,
CLRRandom* random);
public:
const char* fgPgoFailReason;
bool fgPgoDisabled;
ICorJitInfo::PgoSource fgPgoSource;
ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema;
BYTE* fgPgoData;
UINT32 fgPgoSchemaCount;
HRESULT fgPgoQueryResult;
UINT32 fgNumProfileRuns;
UINT32 fgPgoBlockCounts;
UINT32 fgPgoEdgeCounts;
UINT32 fgPgoClassProfiles;
unsigned fgPgoInlineePgo;
unsigned fgPgoInlineeNoPgo;
unsigned fgPgoInlineeNoPgoSingleBlock;
void WalkSpanningTree(SpanningTreeVisitor* visitor);
void fgSetProfileWeight(BasicBlock* block, weight_t weight);
void fgApplyProfileScale();
bool fgHaveSufficientProfileData();
bool fgHaveTrustedProfileData();
// fgIsUsingProfileWeights - returns true if we have real profile data for this method
// or if we have some fake profile data for the stress mode
bool fgIsUsingProfileWeights()
{
return (fgHaveProfileData() || fgStressBBProf());
}
// fgProfileRunsCount - returns total number of scenario runs for the profile data
// or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data.
unsigned fgProfileRunsCount()
{
return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED;
}
//-------- Insert a statement at the start or end of a basic block --------
#ifdef DEBUG
public:
static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true);
#endif
public:
Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt);
Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
private:
void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt);
void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt);
void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt);
public:
void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt);
private:
Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList);
// Create a new temporary variable to hold the result of *ppTree,
// and transform the graph accordingly.
GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr);
GenTree* fgMakeMultiUse(GenTree** ppTree);
private:
// Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node.
GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree);
bool fgOperIsBitwiseRotationRoot(genTreeOps oper);
#if !defined(TARGET_64BIT)
// Recognize and morph a long multiplication with 32 bit operands.
GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul);
GenTreeOp* fgMorphLongMul(GenTreeOp* mul);
#endif
//-------- Determine the order in which the trees will be evaluated -------
unsigned fgTreeSeqNum;
GenTree* fgTreeSeqLst;
GenTree* fgTreeSeqBeg;
GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false);
void fgSetTreeSeqHelper(GenTree* tree, bool isLIR);
void fgSetTreeSeqFinish(GenTree* tree, bool isLIR);
void fgSetStmtSeq(Statement* stmt);
void fgSetBlockOrder(BasicBlock* block);
//------------------------- Morphing --------------------------------------
unsigned fgPtrArgCntMax;
public:
//------------------------------------------------------------------------
// fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method
// can push on the stack. This value is calculated during morph.
//
// Return Value:
// Returns fgPtrArgCntMax, that is a private field.
//
unsigned fgGetPtrArgCntMax() const
{
return fgPtrArgCntMax;
}
//------------------------------------------------------------------------
// fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method
// can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations.
//
void fgSetPtrArgCntMax(unsigned argCntMax)
{
fgPtrArgCntMax = argCntMax;
}
bool compCanEncodePtrArgCntMax();
private:
hashBv* fgOutgoingArgTemps;
hashBv* fgCurrentlyInUseArgTemps;
void fgSetRngChkTarget(GenTree* tree, bool delay = true);
BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay);
#if REARRANGE_ADDS
void fgMoveOpsLeft(GenTree* tree);
#endif
bool fgIsCommaThrow(GenTree* tree, bool forFolding = false);
bool fgIsThrow(GenTree* tree);
bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2);
bool fgIsBlockCold(BasicBlock* block);
GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper);
GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true);
GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs);
// A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address,
// it is useful to know whether the address will be immediately dereferenced, or whether the address value will
// be used, perhaps by passing it as an argument to a called method. This affects how null checking is done:
// for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we
// know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that
// all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently
// small; hence the other fields of MorphAddrContext.
enum MorphAddrContextKind
{
MACK_Ind,
MACK_Addr,
};
struct MorphAddrContext
{
MorphAddrContextKind m_kind;
bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between
// top-level indirection and here have been constants.
size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true.
// In that case, is the sum of those constant offsets.
MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0)
{
}
};
// A MACK_CopyBlock context is immutable, so we can just make one of these and share it.
static MorphAddrContext s_CopyBlockMAC;
#ifdef FEATURE_SIMD
GenTree* getSIMDStructFromField(GenTree* tree,
CorInfoType* simdBaseJitTypeOut,
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic = false);
GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree);
GenTree* fgMorphFieldToSimdGetElement(GenTree* tree);
bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt);
void impMarkContiguousSIMDFieldAssignments(Statement* stmt);
// fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment
// in function: Complier::impMarkContiguousSIMDFieldAssignments.
Statement* fgPreviousCandidateSIMDFieldAsgStmt;
#endif // FEATURE_SIMD
GenTree* fgMorphArrayIndex(GenTree* tree);
GenTree* fgMorphExpandCast(GenTreeCast* tree);
GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl);
void fgInitArgInfo(GenTreeCall* call);
GenTreeCall* fgMorphArgs(GenTreeCall* call);
void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass);
GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph);
public:
bool fgAddrCouldBeNull(GenTree* addr);
private:
GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac);
bool fgCanFastTailCall(GenTreeCall* call, const char** failReason);
#if FEATURE_FASTTAILCALL
bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee);
#endif
bool fgCheckStmtAfterTailCall();
GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help);
bool fgCanTailCallViaJitHelper();
void fgMorphTailCallViaJitHelper(GenTreeCall* call);
GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall,
CORINFO_METHOD_HANDLE callTargetStubHnd,
CORINFO_METHOD_HANDLE dispatcherHnd);
GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle);
GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle);
GenTree* getVirtMethodPointerTree(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo);
GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent);
GenTree* fgMorphPotentialTailCall(GenTreeCall* call);
GenTree* fgGetStubAddrArg(GenTreeCall* call);
unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry);
void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall);
Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg,
fgArgTabEntry* argTabEntry,
unsigned lclParamNum,
BasicBlock* block,
const DebugInfo& callDI,
Statement* tmpAssignmentInsertionPoint,
Statement* paramAssignmentInsertionPoint);
GenTree* fgMorphCall(GenTreeCall* call);
GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call);
void fgMorphCallInline(GenTreeCall* call, InlineResult* result);
void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext);
#if DEBUG
void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call);
static fgWalkPreFn fgFindNonInlineCandidate;
#endif
GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE* ExactContextHnd,
CORINFO_RESOLVED_TOKEN* ldftnToken);
GenTree* fgMorphLeaf(GenTree* tree);
void fgAssignSetVarDef(GenTree* tree);
GenTree* fgMorphOneAsgBlockOp(GenTree* tree);
GenTree* fgMorphInitBlock(GenTree* tree);
GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize);
GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false);
GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd);
GenTree* fgMorphCopyBlock(GenTree* tree);
GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree);
GenTree* fgMorphForRegisterFP(GenTree* tree);
GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr);
GenTree* fgOptimizeCast(GenTreeCast* cast);
GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp);
GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp);
#ifdef FEATURE_HW_INTRINSICS
GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node);
#endif
GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree);
GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp);
GenTree* fgOptimizeAddition(GenTreeOp* add);
GenTree* fgOptimizeMultiply(GenTreeOp* mul);
GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp);
GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects);
GenTree* fgMorphRetInd(GenTreeUnOp* tree);
GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree);
GenTree* fgMorphSmpOpOptional(GenTreeOp* tree);
GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp);
GenTree* fgMorphConst(GenTree* tree);
bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2);
GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true);
GenTreeOp* fgMorphCommutative(GenTreeOp* tree);
GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree);
GenTree* fgMorphReduceAddOps(GenTree* tree);
public:
GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr);
private:
void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree));
void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree));
void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0));
Statement* fgMorphStmt;
unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be
// used when morphing big offset.
//----------------------- Liveness analysis -------------------------------
VARSET_TP fgCurUseSet; // vars used by block (before an assignment)
VARSET_TP fgCurDefSet; // vars assigned by block (before a use)
MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory.
MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory.
MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value.
bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points.
void fgMarkUseDef(GenTreeLclVarCommon* tree);
void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope);
void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope);
void fgExtendDbgScopes();
void fgExtendDbgLifetimes();
#ifdef DEBUG
void fgDispDebugScopes();
#endif // DEBUG
//-------------------------------------------------------------------------
//
// The following keeps track of any code we've added for things like array
// range checking or explicit calls to enable GC, and so on.
//
public:
struct AddCodeDsc
{
AddCodeDsc* acdNext;
BasicBlock* acdDstBlk; // block to which we jump
unsigned acdData;
SpecialCodeKind acdKind; // what kind of a special block is this?
#if !FEATURE_FIXED_OUT_ARGS
bool acdStkLvlInit; // has acdStkLvl value been already set?
unsigned acdStkLvl; // stack level in stack slots.
#endif // !FEATURE_FIXED_OUT_ARGS
};
private:
static unsigned acdHelper(SpecialCodeKind codeKind);
AddCodeDsc* fgAddCodeList;
bool fgAddCodeModf;
bool fgRngChkThrowAdded;
AddCodeDsc* fgExcptnTargetCache[SCK_COUNT];
BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind);
BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind);
public:
AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData);
bool fgUseThrowHelperBlocks();
AddCodeDsc* fgGetAdditionalCodeDescriptors()
{
return fgAddCodeList;
}
private:
bool fgIsCodeAdded();
bool fgIsThrowHlpBlk(BasicBlock* block);
#if !FEATURE_FIXED_OUT_ARGS
unsigned fgThrowHlpBlkStkLevel(BasicBlock* block);
#endif // !FEATURE_FIXED_OUT_ARGS
unsigned fgBigOffsetMorphingTemps[TYP_COUNT];
unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo);
void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext);
void fgInsertInlineeBlocks(InlineInfo* pInlineInfo);
Statement* fgInlinePrependStatements(InlineInfo* inlineInfo);
void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt);
#if FEATURE_MULTIREG_RET
GenTree* fgGetStructAsStructPtr(GenTree* tree);
GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd);
void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd);
#endif // FEATURE_MULTIREG_RET
static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder;
static fgWalkPostFn fgLateDevirtualization;
#ifdef DEBUG
static fgWalkPreFn fgDebugCheckInlineCandidates;
void CheckNoTransformableIndirectCallsRemain();
static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls;
#endif
void fgPromoteStructs();
void fgMorphStructField(GenTree* tree, GenTree* parent);
void fgMorphLocalField(GenTree* tree, GenTree* parent);
// Reset the refCount for implicit byrefs.
void fgResetImplicitByRefRefCount();
// Change implicit byrefs' types from struct to pointer, and for any that were
// promoted, create new promoted struct temps.
void fgRetypeImplicitByRefArgs();
// Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection).
bool fgMorphImplicitByRefArgs(GenTree* tree);
GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr);
// Clear up annotations for any struct promotion temps created for implicit byrefs.
void fgMarkDemotedImplicitByRefArgs();
void fgMarkAddressExposedLocals();
void fgMarkAddressExposedLocals(Statement* stmt);
PhaseStatus fgForwardSub();
bool fgForwardSubBlock(BasicBlock* block);
bool fgForwardSubStatement(Statement* statement);
static fgWalkPreFn fgUpdateSideEffectsPre;
static fgWalkPostFn fgUpdateSideEffectsPost;
// The given local variable, required to be a struct variable, is being assigned via
// a "lclField", to make it masquerade as an integral type in the ABI. Make sure that
// the variable is not enregistered, and is therefore not promoted independently.
void fgLclFldAssign(unsigned lclNum);
static fgWalkPreFn gtHasLocalsWithAddrOpCB;
enum TypeProducerKind
{
TPK_Unknown = 0, // May not be a RuntimeType
TPK_Handle = 1, // RuntimeType via handle
TPK_GetType = 2, // RuntimeType via Object.get_Type()
TPK_Null = 3, // Tree value is null
TPK_Other = 4 // RuntimeType via other means
};
TypeProducerKind gtGetTypeProducerKind(GenTree* tree);
bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call);
bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr);
bool gtIsActiveCSE_Candidate(GenTree* tree);
bool fgIsBigOffset(size_t offset);
bool fgNeedReturnSpillTemp();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Optimizer XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
void optInit();
GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt);
GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt);
void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt);
protected:
// Do hoisting for all loops.
void optHoistLoopCode();
// To represent sets of VN's that have already been hoisted in outer loops.
typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet;
struct LoopHoistContext
{
private:
// The set of variables hoisted in the current loop (or nullptr if there are none).
VNSet* m_pHoistedInCurLoop;
public:
// Value numbers of expressions that have been hoisted in parent loops in the loop nest.
VNSet m_hoistedInParentLoops;
// Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest.
// Previous decisions on loop-invariance of value numbers in the current loop.
VNSet m_curLoopVnInvariantCache;
VNSet* GetHoistedInCurLoop(Compiler* comp)
{
if (m_pHoistedInCurLoop == nullptr)
{
m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist());
}
return m_pHoistedInCurLoop;
}
VNSet* ExtractHoistedInCurLoop()
{
VNSet* res = m_pHoistedInCurLoop;
m_pHoistedInCurLoop = nullptr;
return res;
}
LoopHoistContext(Compiler* comp)
: m_pHoistedInCurLoop(nullptr)
, m_hoistedInParentLoops(comp->getAllocatorLoopHoist())
, m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist())
{
}
};
// Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it.
// Tracks the expressions that have been hoisted by containing loops by temporarily recording their
// value numbers in "m_hoistedInParentLoops". This set is not modified by the call.
void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt);
// Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.)
// Assumes that expressions have been hoisted in containing loops if their value numbers are in
// "m_hoistedInParentLoops".
//
void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt);
// Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable)
// outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted
// expressions to "hoistInLoop".
void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext);
// Return true if the tree looks profitable to hoist out of loop 'lnum'.
bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum);
// Performs the hoisting 'tree' into the PreHeader for loop 'lnum'
void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt);
// Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum".
// Constants and init values are always loop invariant.
// VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop.
bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs);
// If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop
// in the loop table.
bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum);
// Records the set of "side effects" of all loops: fields (object instance and static)
// written to, and SZ-array element type equivalence classes updated.
void optComputeLoopSideEffects();
#ifdef DEBUG
bool optAnyChildNotRemoved(unsigned loopNum);
#endif // DEBUG
// Mark a loop as removed.
void optMarkLoopRemoved(unsigned loopNum);
private:
// Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop,
// including all nested loops, and records the set of "side effects" of the loop: fields (object instance and
// static) written to, and SZ-array element type equivalence classes updated.
void optComputeLoopNestSideEffects(unsigned lnum);
// Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc'
void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc);
// Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part.
// Returns false if we encounter a block that is not marked as being inside a loop.
//
bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk);
// Hoist the expression "expr" out of loop "lnum".
void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum);
public:
void optOptimizeBools();
public:
PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom.
PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method
PhaseStatus optSetBlockWeights();
PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table
void optFindLoops();
PhaseStatus optCloneLoops();
void optCloneLoop(unsigned loopInd, LoopCloneContext* context);
void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight);
PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info)
void optRemoveRedundantZeroInits();
protected:
// This enumeration describes what is killed by a call.
enum callInterf
{
CALLINT_NONE, // no interference (most helpers)
CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ)
CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ)
CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT)
CALLINT_ALL, // kills everything (normal method call)
};
enum class FieldKindForVN
{
SimpleStatic,
WithBaseAddr
};
public:
// A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in
// bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered"
// in bbNext order; we use comparisons on the bbNum to decide order.)
// The blocks that define the body are
// top <= entry <= bottom
// The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a
// single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at
// Compiler::optFindNaturalLoops().
struct LoopDsc
{
BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor.
BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext
// order) reachable in this loop.
BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM)
BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP)
BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM)
callInterf lpAsgCall; // "callInterf" for calls in the loop
ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked)
varRefKinds lpAsgInds : 8; // set of inds modified within the loop
LoopFlags lpFlags;
unsigned char lpExitCnt; // number of exits from the loop
unsigned char lpParent; // The index of the most-nested loop that completely contains this one,
// or else BasicBlock::NOT_IN_LOOP if no such loop exists.
unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists.
// (Actually, an "immediately" nested loop --
// no other child of this loop is a parent of lpChild.)
unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent,
// or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop
// by following "lpChild" then "lpSibling" links.
bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary
// memory side effects. If this is set, the fields below
// may not be accurate (since they become irrelevant.)
VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop
VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop
// The following counts are used for hoisting profitability checks.
int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been
// hoisted
int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop
int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop
int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been
// hoisted
int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop
int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop
typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN>
FieldHandleSet;
FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified
// in the loop.
typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet;
ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that
// arrays of that type are modified
// in the loop.
// Adds the variable liveness information for 'blk' to 'this' LoopDsc
void AddVariableLiveness(Compiler* comp, BasicBlock* blk);
inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind);
// This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles
// (shifted left, with a low-order bit set to distinguish.)
// Use the {Encode/Decode}ElemType methods to construct/destruct these.
inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd);
/* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */
GenTree* lpIterTree; // The "i = i <op> const" tree
unsigned lpIterVar() const; // iterator variable #
int lpIterConst() const; // the constant with which the iterator is incremented
genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.)
void VERIFY_lpIterTree() const;
var_types lpIterOperType() const; // For overflow instructions
// Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops.
// Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block.
BasicBlock* lpInitBlock;
union {
int lpConstInit; // initial constant value of iterator
// : Valid if LPFLG_CONST_INIT
unsigned lpVarInit; // initial local var number to which we initialize the iterator
// : Valid if LPFLG_VAR_INIT
};
// The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var")
GenTree* lpTestTree; // pointer to the node containing the loop test
genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE,
// etc.)
void VERIFY_lpTestTree() const;
bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition
GenTree* lpIterator() const; // the iterator node in the loop test
GenTree* lpLimit() const; // the limit node in the loop test
// Limit constant value of iterator - loop condition is "i RELOP const"
// : Valid if LPFLG_CONST_LIMIT
int lpConstLimit() const;
// The lclVar # in the loop condition ( "i RELOP lclVar" )
// : Valid if LPFLG_VAR_LIMIT
unsigned lpVarLimit() const;
// The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" )
// : Valid if LPFLG_ARRLEN_LIMIT
bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const;
// Returns "true" iff this is a "top entry" loop.
bool lpIsTopEntry() const
{
if (lpHead->bbNext == lpEntry)
{
assert(lpHead->bbFallsThrough());
assert(lpTop == lpEntry);
return true;
}
else
{
return false;
}
}
// Returns "true" iff "*this" contains the blk.
bool lpContains(BasicBlock* blk) const
{
return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops
// to be equal, but requiring bottoms to be different.)
bool lpContains(BasicBlock* top, BasicBlock* bottom) const
{
return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring
// bottoms to be different.)
bool lpContains(const LoopDsc& lp2) const
{
return lpContains(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff "*this" is (properly) contained by the range [top, bottom]
// (allowing tops to be equal, but requiring bottoms to be different.)
bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const
{
return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum;
}
// Returns "true" iff "*this" is (properly) contained by "lp2"
// (allowing tops to be equal, but requiring bottoms to be different.)
bool lpContainedBy(const LoopDsc& lp2) const
{
return lpContainedBy(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff "*this" is disjoint from the range [top, bottom].
bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const
{
return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum;
}
// Returns "true" iff "*this" is disjoint from "lp2".
bool lpDisjoint(const LoopDsc& lp2) const
{
return lpDisjoint(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff the loop is well-formed (see code for defn).
bool lpWellFormed() const
{
return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum &&
(lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum);
}
#ifdef DEBUG
void lpValidatePreHeader() const
{
// If this is called, we expect there to be a pre-header.
assert(lpFlags & LPFLG_HAS_PREHEAD);
// The pre-header must unconditionally enter the loop.
assert(lpHead->GetUniqueSucc() == lpEntry);
// The loop block must be marked as a pre-header.
assert(lpHead->bbFlags & BBF_LOOP_PREHEADER);
// The loop entry must have a single non-loop predecessor, which is the pre-header.
// We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained()
// check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`.
}
#endif // DEBUG
// LoopBlocks: convenience method for enabling range-based `for` iteration over all the
// blocks in a loop, e.g.:
// for (BasicBlock* const block : loop->LoopBlocks()) ...
// Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order
// from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered
// to be part of the loop.
//
BasicBlockRangeList LoopBlocks() const
{
return BasicBlockRangeList(lpTop, lpBottom);
}
};
protected:
bool fgMightHaveLoop(); // returns true if there are any back edges
bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability
public:
LoopDsc* optLoopTable; // loop descriptor table
unsigned char optLoopCount; // number of tracked loops
unsigned char loopAlignCandidates; // number of loops identified for alignment
// Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or
// loop table pointers from the previous epoch are invalid.
// TODO: validate this in some way?
unsigned optCurLoopEpoch;
void NewLoopEpoch()
{
++optCurLoopEpoch;
JITDUMP("New loop epoch %d\n", optCurLoopEpoch);
}
#ifdef DEBUG
unsigned char loopsAligned; // number of loops actually aligned
#endif // DEBUG
bool optRecordLoop(BasicBlock* head,
BasicBlock* top,
BasicBlock* entry,
BasicBlock* bottom,
BasicBlock* exit,
unsigned char exitCnt);
void optClearLoopIterInfo();
#ifdef DEBUG
void optPrintLoopInfo(unsigned lnum, bool printVerbose = false);
void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false);
void optPrintLoopTable();
#endif
protected:
unsigned optCallCount; // number of calls made in the method
unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method
unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method
unsigned optLoopsCloned; // number of loops cloned in the current method.
#ifdef DEBUG
void optCheckPreds();
#endif
void optResetLoopInfo();
void optFindAndScaleGeneralLoopBlocks();
// Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads.
void optMarkLoopHeads();
void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false);
bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt);
unsigned optIsLoopIncrTree(GenTree* incr);
bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar);
bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar);
bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar);
bool optExtractInitTestIncr(
BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr);
void optFindNaturalLoops();
void optIdentifyLoopsForAlignment();
// Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' --
// each loop has a unique "top." Returns "true" iff the flowgraph has been modified.
bool optCanonicalizeLoopNest(unsigned char loopInd);
// Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top,"
// unshared with any other loop. Returns "true" iff the flowgraph has been modified
bool optCanonicalizeLoop(unsigned char loopInd);
// Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP".
// Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP".
// Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2".
// A loop contains itself.
bool optLoopContains(unsigned l1, unsigned l2) const;
// Updates the loop table by changing loop "loopInd", whose head is required
// to be "from", to be "to". Also performs this transformation for any
// loop nested in "loopInd" that shares the same head as "loopInd".
void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to);
void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false);
// Marks the containsCall information to "lnum" and any parent loops.
void AddContainsCallAllContainingLoops(unsigned lnum);
// Adds the variable liveness information from 'blk' to "lnum" and any parent loops.
void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk);
// Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops.
void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind);
// Adds "elemType" to the set of modified array element types of "lnum" and any parent loops.
void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType);
// Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone
// of "from".) Copies the jump destination from "from" to "to".
void optCopyBlkDest(BasicBlock* from, BasicBlock* to);
// Returns true if 'block' is an entry block for any loop in 'optLoopTable'
bool optIsLoopEntry(BasicBlock* block) const;
// The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level)
unsigned optLoopDepth(unsigned lnum)
{
assert(lnum < optLoopCount);
unsigned depth = 0;
while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP)
{
++depth;
}
return depth;
}
// Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score.
struct OptInvertCountTreeInfoType
{
int sharedStaticHelperCount;
int arrayLengthCount;
};
static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data);
bool optInvertWhileLoop(BasicBlock* block);
private:
static bool optIterSmallOverflow(int iterAtExit, var_types incrType);
static bool optIterSmallUnderflow(int iterAtExit, var_types decrType);
bool optComputeLoopRep(int constInit,
int constLimit,
int iterInc,
genTreeOps iterOper,
var_types iterType,
genTreeOps testOper,
bool unsignedTest,
bool dupCond,
unsigned* iterCount);
static fgWalkPreFn optIsVarAssgCB;
protected:
bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var);
bool optIsVarAssgLoop(unsigned lnum, unsigned var);
int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE);
bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit);
protected:
// The following is the upper limit on how many expressions we'll keep track
// of for the CSE analysis.
//
static const unsigned MAX_CSE_CNT = EXPSET_SZ;
static const int MIN_CSE_COST = 2;
// BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask.
// This BitVec uses one bit per CSE candidate
BitVecTraits* cseMaskTraits; // one bit per CSE candidate
// BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm.
// Two bits are allocated per CSE candidate to compute CSE availability
// plus an extra bit to handle the initial unvisited case.
// (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.)
//
// The two bits per CSE candidate have the following meanings:
// 11 - The CSE is available, and is also available when considering calls as killing availability.
// 10 - The CSE is available, but is not available when considering calls as killing availability.
// 00 - The CSE is not available
// 01 - An illegal combination
//
BitVecTraits* cseLivenessTraits;
//-----------------------------------------------------------------------------------------------------------------
// getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index.
// Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate
// CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from
// GET_CSE_INDEX().
//
static unsigned genCSEnum2bit(unsigned CSEnum)
{
assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT));
return CSEnum - 1;
}
//-----------------------------------------------------------------------------------------------------------------
// getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE.
//
static unsigned getCSEAvailBit(unsigned CSEnum)
{
return genCSEnum2bit(CSEnum) * 2;
}
//-----------------------------------------------------------------------------------------------------------------
// getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit
// for a CSE considering calls as killing availability bit (see description above).
//
static unsigned getCSEAvailCrossCallBit(unsigned CSEnum)
{
return getCSEAvailBit(CSEnum) + 1;
}
void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true);
EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites
/* Generic list of nodes - used by the CSE logic */
struct treeLst
{
treeLst* tlNext;
GenTree* tlTree;
};
struct treeStmtLst
{
treeStmtLst* tslNext;
GenTree* tslTree; // tree node
Statement* tslStmt; // statement containing the tree
BasicBlock* tslBlock; // block containing the statement
};
// The following logic keeps track of expressions via a simple hash table.
struct CSEdsc
{
CSEdsc* csdNextInBucket; // used by the hash table
size_t csdHashKey; // the orginal hashkey
ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def
ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar
// assignment
unsigned csdIndex; // 1..optCSECandidateCount
bool csdIsSharedConst; // true if this CSE is a shared const
bool csdLiveAcrossCall;
unsigned short csdDefCount; // definition count
unsigned short csdUseCount; // use count (excluding the implicit uses at defs)
weight_t csdDefWtCnt; // weighted def count
weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
GenTree* csdTree; // treenode containing the 1st occurrence
Statement* csdStmt; // stmt containing the 1st occurrence
BasicBlock* csdBlock; // block containing the 1st occurrence
treeStmtLst* csdTreeList; // list of matching tree nodes: head
treeStmtLst* csdTreeLast; // list of matching tree nodes: tail
// ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing
// and GT_IND nodes always have valid struct handle.
//
CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE
bool csdStructHndMismatch;
ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE.
// This will be set to NoVN if we decide to abandon this CSE
ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses.
ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value
// number, this will reflect it; otherwise, NoVN.
// not used for shared const CSE's
};
static const size_t s_optCSEhashSizeInitial;
static const size_t s_optCSEhashGrowthFactor;
static const size_t s_optCSEhashBucketSize;
size_t optCSEhashSize; // The current size of hashtable
size_t optCSEhashCount; // Number of entries in hashtable
size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize
CSEdsc** optCSEhash;
CSEdsc** optCSEtab;
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap;
NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be
// re-numbered with the bound to improve range check elimination
// Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found.
void optCseUpdateCheckedBoundMap(GenTree* compare);
void optCSEstop();
CSEdsc* optCSEfindDsc(unsigned index);
bool optUnmarkCSE(GenTree* tree);
// user defined callback data for the tree walk function optCSE_MaskHelper()
struct optCSE_MaskData
{
EXPSET_TP CSE_defMask;
EXPSET_TP CSE_useMask;
};
// Treewalk helper for optCSE_DefMask and optCSE_UseMask
static fgWalkPreFn optCSE_MaskHelper;
// This function walks all the node for an given tree
// and return the mask of CSE definitions and uses for the tree
//
void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData);
// Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2.
bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode);
struct optCSEcostCmpEx
{
bool operator()(const CSEdsc* op1, const CSEdsc* op2);
};
struct optCSEcostCmpSz
{
bool operator()(const CSEdsc* op1, const CSEdsc* op2);
};
void optCleanupCSEs();
#ifdef DEBUG
void optEnsureClearCSEInfo();
#endif // DEBUG
static bool Is_Shared_Const_CSE(size_t key)
{
return ((key & TARGET_SIGN_BIT) != 0);
}
// returns the encoded key
static size_t Encode_Shared_Const_CSE_Value(size_t key)
{
return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS);
}
// returns the orginal key
static size_t Decode_Shared_Const_CSE_Value(size_t enckey)
{
assert(Is_Shared_Const_CSE(enckey));
return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS;
}
/**************************************************************************
* Value Number based CSEs
*************************************************************************/
// String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX().
#define FMT_CSE "CSE #%02u"
public:
void optOptimizeValnumCSEs();
protected:
void optValnumCSE_Init();
unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt);
bool optValnumCSE_Locate();
void optValnumCSE_InitDataFlow();
void optValnumCSE_DataFlow();
void optValnumCSE_Availablity();
void optValnumCSE_Heuristic();
bool optDoCSE; // True when we have found a duplicate CSE tree
bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase
unsigned optCSECandidateCount; // Count of CSE's candidates
unsigned optCSEstart; // The first local variable number that is a CSE
unsigned optCSEcount; // The total count of CSE's introduced.
weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE
bool optIsCSEcandidate(GenTree* tree);
// lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler
//
bool lclNumIsTrueCSE(unsigned lclNum) const
{
return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount));
}
// lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop.
//
bool lclNumIsCSE(unsigned lclNum) const
{
return lvaGetDesc(lclNum)->lvIsCSE;
}
#ifdef DEBUG
bool optConfigDisableCSE();
bool optConfigDisableCSE2();
#endif
void optOptimizeCSEs();
struct isVarAssgDsc
{
GenTree* ivaSkip;
ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars.
#ifdef DEBUG
void* ivaSelf;
#endif
unsigned ivaVar; // Variable we are interested in, or -1
varRefKinds ivaMaskInd; // What kind of indirect assignments are there?
callInterf ivaMaskCall; // What kind of calls are there?
bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to.
};
static callInterf optCallInterf(GenTreeCall* call);
public:
// VN based copy propagation.
// In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for.
// While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor,
// for locals which will use "definitions from uses", it will not be, so we store it
// in this class instead.
class CopyPropSsaDef
{
LclSsaVarDsc* m_ssaDef;
#ifdef DEBUG
GenTree* m_defNode;
#endif
public:
CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode)
: m_ssaDef(ssaDef)
#ifdef DEBUG
, m_defNode(defNode)
#endif
{
}
LclSsaVarDsc* GetSsaDef() const
{
return m_ssaDef;
}
#ifdef DEBUG
GenTree* GetDefNode() const
{
return m_defNode;
}
#endif
};
typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack;
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap;
// Copy propagation functions.
void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName);
void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName);
void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName);
void optCopyPropPushDef(GenTreeOp* asg,
GenTreeLclVarCommon* lclNode,
unsigned lclNum,
LclNumToLiveDefsMap* curSsaName);
unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode);
int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2);
void optVnCopyProp();
INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName));
/**************************************************************************
* Early value propagation
*************************************************************************/
struct SSAName
{
unsigned m_lvNum;
unsigned m_ssaNum;
SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum)
{
}
static unsigned GetHashCode(SSAName ssaNm)
{
return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum);
}
static bool Equals(SSAName ssaNm1, SSAName ssaNm2)
{
return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum);
}
};
#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
#define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check.
#define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation.
#define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack.
#define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate
#define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary.
#define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints
#define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls
#define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT.
#define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints
#define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block
bool doesMethodHaveFatPointer()
{
return (optMethodFlags & OMF_HAS_FATPOINTER) != 0;
}
void setMethodHasFatPointer()
{
optMethodFlags |= OMF_HAS_FATPOINTER;
}
void clearMethodHasFatPointer()
{
optMethodFlags &= ~OMF_HAS_FATPOINTER;
}
void addFatPointerCandidate(GenTreeCall* call);
bool doesMethodHaveFrozenString() const
{
return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0;
}
void setMethodHasFrozenString()
{
optMethodFlags |= OMF_HAS_FROZEN_STRING;
}
bool doesMethodHaveGuardedDevirtualization() const
{
return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0;
}
void setMethodHasGuardedDevirtualization()
{
optMethodFlags |= OMF_HAS_GUARDEDDEVIRT;
}
void clearMethodHasGuardedDevirtualization()
{
optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT;
}
void considerGuardedDevirtualization(GenTreeCall* call,
IL_OFFSET ilOffset,
bool isInterface,
CORINFO_METHOD_HANDLE baseMethod,
CORINFO_CLASS_HANDLE baseClass,
CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass)
DEBUGARG(const char* objClassName));
void addGuardedDevirtualizationCandidate(GenTreeCall* call,
CORINFO_METHOD_HANDLE methodHandle,
CORINFO_CLASS_HANDLE classHandle,
unsigned methodAttr,
unsigned classAttr,
unsigned likelihood);
bool doesMethodHaveExpRuntimeLookup()
{
return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0;
}
void setMethodHasExpRuntimeLookup()
{
optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP;
}
void clearMethodHasExpRuntimeLookup()
{
optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP;
}
void addExpRuntimeLookupCandidate(GenTreeCall* call);
bool doesMethodHavePatchpoints()
{
return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0;
}
void setMethodHasPatchpoint()
{
optMethodFlags |= OMF_HAS_PATCHPOINT;
}
bool doesMethodHavePartialCompilationPatchpoints()
{
return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0;
}
void setMethodHasPartialCompilationPatchpoint()
{
optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT;
}
unsigned optMethodFlags;
bool doesMethodHaveNoReturnCalls()
{
return optNoReturnCallCount > 0;
}
void setMethodHasNoReturnCalls()
{
optNoReturnCallCount++;
}
unsigned optNoReturnCallCount;
// Recursion bound controls how far we can go backwards tracking for a SSA value.
// No throughput diff was found with backward walk bound between 3-8.
static const int optEarlyPropRecurBound = 5;
enum class optPropKind
{
OPK_INVALID,
OPK_ARRAYLEN,
OPK_NULLCHECK
};
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap;
GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block));
GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth);
GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind);
GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
bool optDoEarlyPropForBlock(BasicBlock* block);
bool optDoEarlyPropForFunc();
void optEarlyProp();
void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
bool optIsNullCheckFoldingLegal(GenTree* tree,
GenTree* nullCheckTree,
GenTree** nullCheckParent,
Statement** nullCheckStmt);
bool optCanMoveNullCheckPastTree(GenTree* tree,
unsigned nullCheckLclNum,
bool isInsideTry,
bool checkSideEffectSummary);
#if DEBUG
void optCheckFlagsAreSet(unsigned methodFlag,
const char* methodFlagStr,
unsigned bbFlag,
const char* bbFlagStr,
GenTree* tree,
BasicBlock* basicBlock);
#endif
// Redundant branch opts
//
PhaseStatus optRedundantBranches();
bool optRedundantRelop(BasicBlock* const block);
bool optRedundantBranch(BasicBlock* const block);
bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop);
bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock);
/**************************************************************************
* Value/Assertion propagation
*************************************************************************/
public:
// Data structures for assertion prop
BitVecTraits* apTraits;
ASSERT_TP apFull;
enum optAssertionKind
{
OAK_INVALID,
OAK_EQUAL,
OAK_NOT_EQUAL,
OAK_SUBRANGE,
OAK_NO_THROW,
OAK_COUNT
};
enum optOp1Kind
{
O1K_INVALID,
O1K_LCLVAR,
O1K_ARR_BND,
O1K_BOUND_OPER_BND,
O1K_BOUND_LOOP_BND,
O1K_CONSTANT_LOOP_BND,
O1K_CONSTANT_LOOP_BND_UN,
O1K_EXACT_TYPE,
O1K_SUBTYPE,
O1K_VALUE_NUMBER,
O1K_COUNT
};
enum optOp2Kind
{
O2K_INVALID,
O2K_LCLVAR_COPY,
O2K_IND_CNS_INT,
O2K_CONST_INT,
O2K_CONST_LONG,
O2K_CONST_DOUBLE,
O2K_ZEROOBJ,
O2K_SUBRANGE,
O2K_COUNT
};
struct AssertionDsc
{
optAssertionKind assertionKind;
struct SsaVar
{
unsigned lclNum; // assigned to or property of this local var number
unsigned ssaNum;
};
struct ArrBnd
{
ValueNum vnIdx;
ValueNum vnLen;
};
struct AssertionDscOp1
{
optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype
ValueNum vn;
union {
SsaVar lcl;
ArrBnd bnd;
};
} op1;
struct AssertionDscOp2
{
optOp2Kind kind; // a const or copy assignment
ValueNum vn;
struct IntVal
{
ssize_t iconVal; // integer
#if !defined(HOST_64BIT)
unsigned padding; // unused; ensures iconFlags does not overlap lconVal
#endif
GenTreeFlags iconFlags; // gtFlags
};
union {
struct
{
SsaVar lcl;
FieldSeqNode* zeroOffsetFieldSeq;
};
IntVal u1;
__int64 lconVal;
double dconVal;
IntegralRange u2;
};
} op2;
bool IsCheckedBoundArithBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND);
}
bool IsCheckedBoundBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND);
}
bool IsConstantBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
(op1.kind == O1K_CONSTANT_LOOP_BND));
}
bool IsConstantBoundUnsigned()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
(op1.kind == O1K_CONSTANT_LOOP_BND_UN));
}
bool IsBoundsCheckNoThrow()
{
return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND));
}
bool IsCopyAssertion()
{
return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY));
}
bool IsConstantInt32Assertion()
{
return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT);
}
static bool SameKind(AssertionDsc* a1, AssertionDsc* a2)
{
return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind &&
a1->op2.kind == a2->op2.kind;
}
static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2)
{
if (kind == OAK_EQUAL)
{
return kind2 == OAK_NOT_EQUAL;
}
else if (kind == OAK_NOT_EQUAL)
{
return kind2 == OAK_EQUAL;
}
return false;
}
bool HasSameOp1(AssertionDsc* that, bool vnBased)
{
if (op1.kind != that->op1.kind)
{
return false;
}
else if (op1.kind == O1K_ARR_BND)
{
assert(vnBased);
return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen);
}
else
{
return ((vnBased && (op1.vn == that->op1.vn)) ||
(!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum)));
}
}
bool HasSameOp2(AssertionDsc* that, bool vnBased)
{
if (op2.kind != that->op2.kind)
{
return false;
}
switch (op2.kind)
{
case O2K_IND_CNS_INT:
case O2K_CONST_INT:
return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags));
case O2K_CONST_LONG:
return (op2.lconVal == that->op2.lconVal);
case O2K_CONST_DOUBLE:
// exact match because of positive and negative zero.
return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0);
case O2K_ZEROOBJ:
return true;
case O2K_LCLVAR_COPY:
return (op2.lcl.lclNum == that->op2.lcl.lclNum) &&
(!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) &&
(op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq);
case O2K_SUBRANGE:
return op2.u2.Equals(that->op2.u2);
case O2K_INVALID:
// we will return false
break;
default:
assert(!"Unexpected value for op2.kind in AssertionDsc.");
break;
}
return false;
}
bool Complementary(AssertionDsc* that, bool vnBased)
{
return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) &&
HasSameOp2(that, vnBased);
}
bool Equals(AssertionDsc* that, bool vnBased)
{
if (assertionKind != that->assertionKind)
{
return false;
}
else if (assertionKind == OAK_NO_THROW)
{
assert(op2.kind == O2K_INVALID);
return HasSameOp1(that, vnBased);
}
else
{
return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased);
}
}
};
protected:
static fgWalkPreFn optAddCopiesCallback;
static fgWalkPreFn optVNAssertionPropCurStmtVisitor;
unsigned optAddCopyLclNum;
GenTree* optAddCopyAsgnNode;
bool optLocalAssertionProp; // indicates that we are performing local assertion prop
bool optAssertionPropagated; // set to true if we modified the trees
bool optAssertionPropagatedCurrentStmt;
#ifdef DEBUG
GenTree* optAssertionPropCurrentTree;
#endif
AssertionIndex* optComplementaryAssertionMap;
JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions
// using the value of a local var) for each local var
AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments
AssertionIndex optAssertionCount; // total number of assertions in the assertion table
AssertionIndex optMaxAssertionCount;
public:
void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree);
fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree);
GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test);
GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree);
GenTree* optExtractSideEffListFromConst(GenTree* tree);
AssertionIndex GetAssertionCount()
{
return optAssertionCount;
}
ASSERT_TP* bbJtrueAssertionOut;
typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap;
ValueNumToAssertsMap* optValueNumToAsserts;
// Assertion prop helpers.
ASSERT_TP& GetAssertionDep(unsigned lclNum);
AssertionDsc* optGetAssertion(AssertionIndex assertIndex);
void optAssertionInit(bool isLocalProp);
void optAssertionTraitsInit(AssertionIndex assertionCount);
void optAssertionReset(AssertionIndex limit);
void optAssertionRemove(AssertionIndex index);
// Assertion prop data flow functions.
void optAssertionPropMain();
Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt);
bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags);
ASSERT_TP* optInitAssertionDataflowFlags();
ASSERT_TP* optComputeAssertionGen();
// Assertion Gen functions.
void optAssertionGen(GenTree* tree);
AssertionIndex optAssertionGenCast(GenTreeCast* cast);
AssertionIndex optAssertionGenPhiDefn(GenTree* tree);
AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree);
AssertionInfo optAssertionGenJtrue(GenTree* tree);
AssertionIndex optCreateJtrueAssertions(GenTree* op1,
GenTree* op2,
Compiler::optAssertionKind assertionKind,
bool helperCallArgs = false);
AssertionIndex optFindComplementary(AssertionIndex assertionIndex);
void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index);
// Assertion creation functions.
AssertionIndex optCreateAssertion(GenTree* op1,
GenTree* op2,
optAssertionKind assertionKind,
bool helperCallArgs = false);
AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion);
bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange);
void optCreateComplementaryAssertion(AssertionIndex assertionIndex,
GenTree* op1,
GenTree* op2,
bool helperCallArgs = false);
bool optAssertionVnInvolvesNan(AssertionDsc* assertion);
AssertionIndex optAddAssertion(AssertionDsc* assertion);
void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index);
#ifdef DEBUG
void optPrintVnAssertionMapping();
#endif
ASSERT_TP optGetVnMappedAssertions(ValueNum vn);
// Used for respective assertion propagations.
AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased));
bool optAssertionIsNonNull(GenTree* op,
ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex));
AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2);
AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1);
AssertionIndex optLocalAssertionIsEqualOrNotEqual(
optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions);
// Assertion prop for lcl var functions.
bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc);
GenTree* optCopyAssertionProp(AssertionDsc* curAssertion,
GenTreeLclVarCommon* tree,
Statement* stmt DEBUGARG(AssertionIndex index));
GenTree* optConstantAssertionProp(AssertionDsc* curAssertion,
GenTreeLclVarCommon* tree,
Statement* stmt DEBUGARG(AssertionIndex index));
bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions);
// Assertion propagation functions.
GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block);
GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt);
GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt);
GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt);
GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt);
GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt);
GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt);
GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call);
// Implied assertion functions.
void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions);
void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions);
void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result);
void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result);
#ifdef DEBUG
void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0);
void optPrintAssertionIndex(AssertionIndex index);
void optPrintAssertionIndices(ASSERT_TP assertions);
void optDebugCheckAssertion(AssertionDsc* assertion);
void optDebugCheckAssertions(AssertionIndex AssertionIndex);
#endif
static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr);
static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr);
void optAddCopies();
/**************************************************************************
* Range checks
*************************************************************************/
public:
struct LoopCloneVisitorInfo
{
LoopCloneContext* context;
unsigned loopNum;
Statement* stmt;
LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt)
: context(context), loopNum(loopNum), stmt(nullptr)
{
}
};
bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum);
bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum);
bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum);
bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context);
static fgWalkPreFn optCanOptimizeByLoopCloningVisitor;
fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info);
bool optObtainLoopCloningOpts(LoopCloneContext* context);
bool optIsLoopClonable(unsigned loopInd);
bool optLoopCloningEnabled();
#ifdef DEBUG
void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore);
#endif
void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath));
bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context);
bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context);
BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context,
unsigned loopNum,
BasicBlock* slowHead,
BasicBlock* insertAfter);
protected:
ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk));
bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB);
protected:
bool optLoopsMarked;
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX RegAlloc XX
XX XX
XX Does the register allocation and puts the remaining lclVars on the stack XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc);
void raMarkStkVars();
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#if defined(TARGET_AMD64)
static bool varTypeNeedsPartialCalleeSave(var_types type)
{
assert(type != TYP_STRUCT);
return (type == TYP_SIMD32);
}
#elif defined(TARGET_ARM64)
static bool varTypeNeedsPartialCalleeSave(var_types type)
{
assert(type != TYP_STRUCT);
// ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes
// For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes.
return ((type == TYP_SIMD16) || (type == TYP_SIMD12));
}
#else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64)
#error("Unknown target architecture for FEATURE_SIMD")
#endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64)
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
protected:
// Some things are used by both LSRA and regpredict allocators.
FrameType rpFrameType;
bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once
bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason));
private:
Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering.
LinearScanInterface* m_pLinearScan; // Linear Scan allocator
/* raIsVarargsStackArg is called by raMaskStkVars and by
lvaComputeRefCounts. It identifies the special case
where a varargs function has a parameter passed on the
stack, other than the special varargs handle. Such parameters
require special treatment, because they cannot be tracked
by the GC (their offsets in the stack are not known
at compile time).
*/
bool raIsVarargsStackArg(unsigned lclNum)
{
#ifdef TARGET_X86
LclVarDsc* varDsc = lvaGetDesc(lclNum);
assert(varDsc->lvIsParam);
return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg));
#else // TARGET_X86
return false;
#endif // TARGET_X86
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX EEInterface XX
XX XX
XX Get to the class and method info from the Execution Engine given XX
XX tokens for the class and method XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// Get handles
void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedToken,
CORINFO_CALLINFO_FLAGS flags,
CORINFO_CALL_INFO* pResult);
void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS flags,
CORINFO_FIELD_INFO* pResult);
// Get the flags
bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd);
bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn);
bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd);
var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr);
#if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS)
const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className);
const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd);
unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle);
bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method);
CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method);
#endif
var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned);
CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list);
CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context);
unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
static unsigned eeGetArgAlignment(var_types type, bool isFloatHfa);
// VOM info, method sigs
void eeGetSig(unsigned sigTok,
CORINFO_MODULE_HANDLE scope,
CORINFO_CONTEXT_HANDLE context,
CORINFO_SIG_INFO* retSig);
void eeGetCallSiteSig(unsigned sigTok,
CORINFO_MODULE_HANDLE scope,
CORINFO_CONTEXT_HANDLE context,
CORINFO_SIG_INFO* retSig);
void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr);
// Method entry-points, instrs
CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method);
CORINFO_EE_INFO eeInfo;
bool eeInfoInitialized;
CORINFO_EE_INFO* eeGetEEInfo();
// Gets the offset of a SDArray's first element
static unsigned eeGetArrayDataOffset();
// Get the offset of a MDArray's first element
static unsigned eeGetMDArrayDataOffset(unsigned rank);
// Get the offset of a MDArray's dimension length for a given dimension.
static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension);
// Get the offset of a MDArray's lower bound for a given dimension.
static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension);
GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig);
// Returns the page size for the target machine as reported by the EE.
target_size_t eeGetPageSize()
{
return (target_size_t)eeGetEEInfo()->osPageSize;
}
//------------------------------------------------------------------------
// VirtualStubParam: virtual stub dispatch extra parameter (slot address).
//
// It represents Abi and target specific registers for the parameter.
//
class VirtualStubParamInfo
{
public:
VirtualStubParamInfo(bool isCoreRTABI)
{
#if defined(TARGET_X86)
reg = REG_EAX;
regMask = RBM_EAX;
#elif defined(TARGET_AMD64)
if (isCoreRTABI)
{
reg = REG_R10;
regMask = RBM_R10;
}
else
{
reg = REG_R11;
regMask = RBM_R11;
}
#elif defined(TARGET_ARM)
if (isCoreRTABI)
{
reg = REG_R12;
regMask = RBM_R12;
}
else
{
reg = REG_R4;
regMask = RBM_R4;
}
#elif defined(TARGET_ARM64)
reg = REG_R11;
regMask = RBM_R11;
#else
#error Unsupported or unset target architecture
#endif
}
regNumber GetReg() const
{
return reg;
}
_regMask_enum GetRegMask() const
{
return regMask;
}
private:
regNumber reg;
_regMask_enum regMask;
};
VirtualStubParamInfo* virtualStubParamInfo;
bool IsTargetAbi(CORINFO_RUNTIME_ABI abi)
{
return eeGetEEInfo()->targetAbi == abi;
}
bool generateCFIUnwindCodes()
{
#if defined(FEATURE_CFI_SUPPORT)
return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI);
#else
return false;
#endif
}
// Debugging support - Line number info
void eeGetStmtOffsets();
unsigned eeBoundariesCount;
ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE
void eeSetLIcount(unsigned count);
void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc);
void eeSetLIdone();
#ifdef DEBUG
static void eeDispILOffs(IL_OFFSET offs);
static void eeDispSourceMappingOffs(uint32_t offs);
static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line);
void eeDispLineInfos();
#endif // DEBUG
// Debugging support - Local var info
void eeGetVars();
unsigned eeVarsCount;
struct VarResultInfo
{
UNATIVE_OFFSET startOffset;
UNATIVE_OFFSET endOffset;
DWORD varNumber;
CodeGenInterface::siVarLoc loc;
} * eeVars;
void eeSetLVcount(unsigned count);
void eeSetLVinfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
const CodeGenInterface::siVarLoc& loc);
void eeSetLVdone();
#ifdef DEBUG
void eeDispVar(ICorDebugInfo::NativeVarInfo* var);
void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars);
#endif // DEBUG
// ICorJitInfo wrappers
void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize);
void eeAllocUnwindInfo(BYTE* pHotCode,
BYTE* pColdCode,
ULONG startOffset,
ULONG endOffset,
ULONG unwindSize,
BYTE* pUnwindBlock,
CorJitFuncKind funcKind);
void eeSetEHcount(unsigned cEH);
void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause);
WORD eeGetRelocTypeHint(void* target);
// ICorStaticInfo wrapper functions
bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken);
#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
static void dumpSystemVClassificationType(SystemVClassificationType ct);
#endif // DEBUG
void eeGetSystemVAmd64PassStructInRegisterDescriptor(
/*IN*/ CORINFO_CLASS_HANDLE structHnd,
/*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr);
#endif // UNIX_AMD64_ABI
template <typename ParamType>
bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
}
bool eeRunWithErrorTrapImp(void (*function)(void*), void* param);
template <typename ParamType>
bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
}
bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param);
// Utility functions
const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr);
#if defined(DEBUG)
const WCHAR* eeGetCPString(size_t stringHandle);
#endif
const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd);
static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper);
static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method);
static bool IsSharedStaticHelper(GenTree* tree);
static bool IsGcSafePoint(GenTreeCall* call);
static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs);
// returns true/false if 'field' is a Jit Data offset
static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field);
// returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB)
static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field);
/*****************************************************************************/
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX CodeGenerator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
CodeGenInterface* codeGen;
// Record the instr offset mapping to the generated code
jitstd::list<IPmappingDsc> genIPmappings;
#ifdef DEBUG
jitstd::list<PreciseIPMapping> genPreciseIPmappings;
#endif
// Managed RetVal - A side hash table meant to record the mapping from a
// GT_CALL node to its debug info. This info is used to emit sequence points
// that can be used by debugger to determine the native offset at which the
// managed RetVal will be available.
//
// In fact we can store debug info in a GT_CALL node. This was ruled out in
// favor of a side table for two reasons: 1) We need debug info for only those
// GT_CALL nodes (created during importation) that correspond to an IL call and
// whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used
// structure and IL offset is needed only when generating debuggable code. Therefore
// it is desirable to avoid memory size penalty in retail scenarios.
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable;
CallSiteDebugInfoTable* genCallSite2DebugInfoMap;
unsigned genReturnLocal; // Local number for the return value when applicable.
BasicBlock* genReturnBB; // jumped to when not optimizing for speed.
// The following properties are part of CodeGenContext. Getters are provided here for
// convenience and backward compatibility, but the properties can only be set by invoking
// the setter on CodeGenContext directly.
emitter* GetEmitter() const
{
return codeGen->GetEmitter();
}
bool isFramePointerUsed() const
{
return codeGen->isFramePointerUsed();
}
bool GetInterruptible()
{
return codeGen->GetInterruptible();
}
void SetInterruptible(bool value)
{
codeGen->SetInterruptible(value);
}
#if DOUBLE_ALIGN
const bool genDoubleAlign()
{
return codeGen->doDoubleAlign();
}
DWORD getCanDoubleAlign();
bool shouldDoubleAlign(unsigned refCntStk,
unsigned refCntReg,
weight_t refCntWtdReg,
unsigned refCntStkParam,
weight_t refCntWtdStkDbl);
#endif // DOUBLE_ALIGN
bool IsFullPtrRegMapRequired()
{
return codeGen->IsFullPtrRegMapRequired();
}
void SetFullPtrRegMapRequired(bool value)
{
codeGen->SetFullPtrRegMapRequired(value);
}
// Things that MAY belong either in CodeGen or CodeGenContext
#if defined(FEATURE_EH_FUNCLETS)
FuncInfoDsc* compFuncInfos;
unsigned short compCurrFuncIdx;
unsigned short compFuncInfoCount;
unsigned short compFuncCount()
{
assert(fgFuncletsCreated);
return compFuncInfoCount;
}
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
void genUpdateCurrentFunclet(BasicBlock* block)
{
return;
}
FuncInfoDsc compFuncInfoRoot;
static const unsigned compCurrFuncIdx = 0;
unsigned short compFuncCount()
{
return 1;
}
#endif // !FEATURE_EH_FUNCLETS
FuncInfoDsc* funCurrentFunc();
void funSetCurrentFunc(unsigned funcIdx);
FuncInfoDsc* funGetFunc(unsigned funcIdx);
unsigned int funGetFuncIdx(BasicBlock* block);
// LIVENESS
VARSET_TP compCurLife; // current live variables
GenTree* compCurLifeTree; // node after which compCurLife has been computed
// Compare the given "newLife" with last set of live variables and update
// codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness.
template <bool ForCodeGen>
void compChangeLife(VARSET_VALARG_TP newLife);
// Update the GC's masks, register's masks and reports change on variable's homes given a set of
// current live variables if changes have happened since "compCurLife".
template <bool ForCodeGen>
inline void compUpdateLife(VARSET_VALARG_TP newLife);
// Gets a register mask that represent the kill set for a helper call since
// not all JIT Helper calls follow the standard ABI on the target architecture.
regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper);
#ifdef TARGET_ARM
// Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at
// "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the
// struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" --
// i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and
// a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask.
void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask);
#endif // TARGET_ARM
// If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR
// node, else NULL.
static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree);
// This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which
// have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this
// table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise,
// the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field
// vars of the promoted struct local that go dead at the given node (the set bits are the bits
// for the tracked var indices of the field vars, as in a live var set).
//
// The map is allocated on demand so all map operations should use one of the following three
// wrapper methods.
NodeToVarsetPtrMap* m_promotedStructDeathVars;
NodeToVarsetPtrMap* GetPromotedStructDeathVars()
{
if (m_promotedStructDeathVars == nullptr)
{
m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator());
}
return m_promotedStructDeathVars;
}
void ClearPromotedStructDeathVars()
{
if (m_promotedStructDeathVars != nullptr)
{
m_promotedStructDeathVars->RemoveAll();
}
}
bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits)
{
*bits = nullptr;
bool result = false;
if (m_promotedStructDeathVars != nullptr)
{
result = m_promotedStructDeathVars->Lookup(tree, bits);
}
return result;
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX UnwindInfo XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#if !defined(__GNUC__)
#pragma region Unwind information
#endif
public:
//
// Infrastructure functions: start/stop/reserve/emit.
//
void unwindBegProlog();
void unwindEndProlog();
void unwindBegEpilog();
void unwindEndEpilog();
void unwindReserve();
void unwindEmit(void* pHotCode, void* pColdCode);
//
// Specific unwind information functions: called by code generation to indicate a particular
// prolog or epilog unwindable instruction has been generated.
//
void unwindPush(regNumber reg);
void unwindAllocStack(unsigned size);
void unwindSetFrameReg(regNumber reg, unsigned offset);
void unwindSaveReg(regNumber reg, unsigned offset);
#if defined(TARGET_ARM)
void unwindPushMaskInt(regMaskTP mask);
void unwindPushMaskFloat(regMaskTP mask);
void unwindPopMaskInt(regMaskTP mask);
void unwindPopMaskFloat(regMaskTP mask);
void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr")
void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only
// called via unwindPadding().
void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
// instruction and the current location.
#endif // TARGET_ARM
#if defined(TARGET_ARM64)
void unwindNop();
void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
// instruction and the current location.
void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset]
void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]!
void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]
void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]!
void unwindSaveNext(); // unwind code: save_next
void unwindReturn(regNumber reg); // ret lr
#endif // defined(TARGET_ARM64)
//
// Private "helper" functions for the unwind implementation.
//
private:
#if defined(FEATURE_EH_FUNCLETS)
void unwindGetFuncLocations(FuncInfoDsc* func,
bool getHotSectionData,
/* OUT */ emitLocation** ppStartLoc,
/* OUT */ emitLocation** ppEndLoc);
#endif // FEATURE_EH_FUNCLETS
void unwindReserveFunc(FuncInfoDsc* func);
void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS))
void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode);
void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode);
#endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS)
UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func);
#if defined(TARGET_AMD64)
void unwindBegPrologWindows();
void unwindPushWindows(regNumber reg);
void unwindAllocStackWindows(unsigned size);
void unwindSetFrameRegWindows(regNumber reg, unsigned offset);
void unwindSaveRegWindows(regNumber reg, unsigned offset);
#ifdef UNIX_AMD64_ABI
void unwindSaveRegCFI(regNumber reg, unsigned offset);
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM)
void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16);
void unwindPushPopMaskFloat(regMaskTP mask);
#endif // TARGET_ARM
#if defined(FEATURE_CFI_SUPPORT)
short mapRegNumToDwarfReg(regNumber reg);
void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0);
void unwindPushPopCFI(regNumber reg);
void unwindBegPrologCFI();
void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat);
void unwindAllocStackCFI(unsigned size);
void unwindSetFrameRegCFI(regNumber reg, unsigned offset);
void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#ifdef DEBUG
void DumpCfiInfo(bool isHotCode,
UNATIVE_OFFSET startOffset,
UNATIVE_OFFSET endOffset,
DWORD cfiCodeBytes,
const CFI_CODE* const pCfiCode);
#endif
#endif // FEATURE_CFI_SUPPORT
#if !defined(__GNUC__)
#pragma endregion // Note: region is NOT under !defined(__GNUC__)
#endif
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX SIMD XX
XX XX
XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX
XX that contains the distinguished, well-known SIMD type definitions). XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
bool IsBaselineSimdIsaSupported()
{
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2;
#elif defined(TARGET_ARM64)
CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return compOpportunisticallyDependsOn(minimumIsa);
#else
return false;
#endif
}
#if defined(DEBUG)
bool IsBaselineSimdIsaSupportedDebugOnly()
{
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2;
#elif defined(TARGET_ARM64)
CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return compIsaSupportedDebugOnly(minimumIsa);
#else
return false;
#endif // FEATURE_SIMD
}
#endif // DEBUG
// Get highest available level for SIMD codegen
SIMDLevel getSIMDSupportLevel()
{
#if defined(TARGET_XARCH)
if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
return SIMD_AVX2_Supported;
}
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
return SIMD_SSE4_Supported;
}
// min bar is SSE2
return SIMD_SSE2_Supported;
#else
assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch");
unreached();
return SIMD_Not_Supported;
#endif
}
bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd)
{
return info.compCompHnd->isIntrinsicType(clsHnd);
}
const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName)
{
return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName);
}
CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index)
{
return info.compCompHnd->getTypeInstantiationArgument(cls, index);
}
#ifdef FEATURE_SIMD
// Should we support SIMD intrinsics?
bool featureSIMD;
// Should we recognize SIMD types?
// We always do this on ARM64 to support HVA types.
bool supportSIMDTypes()
{
#ifdef TARGET_ARM64
return true;
#else
return featureSIMD;
#endif
}
// Have we identified any SIMD types?
// This is currently used by struct promotion to avoid getting type information for a struct
// field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in
// the method.
bool _usesSIMDTypes;
bool usesSIMDTypes()
{
return _usesSIMDTypes;
}
void setUsesSIMDTypes(bool value)
{
_usesSIMDTypes = value;
}
// This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics
// that require indexed access to the individual fields of the vector, which is not well supported
// by the hardware. It is allocated when/if such situations are encountered during Lowering.
unsigned lvaSIMDInitTempVarNum;
struct SIMDHandlesCache
{
// SIMD Types
CORINFO_CLASS_HANDLE SIMDFloatHandle;
CORINFO_CLASS_HANDLE SIMDDoubleHandle;
CORINFO_CLASS_HANDLE SIMDIntHandle;
CORINFO_CLASS_HANDLE SIMDUShortHandle;
CORINFO_CLASS_HANDLE SIMDUByteHandle;
CORINFO_CLASS_HANDLE SIMDShortHandle;
CORINFO_CLASS_HANDLE SIMDByteHandle;
CORINFO_CLASS_HANDLE SIMDLongHandle;
CORINFO_CLASS_HANDLE SIMDUIntHandle;
CORINFO_CLASS_HANDLE SIMDULongHandle;
CORINFO_CLASS_HANDLE SIMDNIntHandle;
CORINFO_CLASS_HANDLE SIMDNUIntHandle;
CORINFO_CLASS_HANDLE SIMDVector2Handle;
CORINFO_CLASS_HANDLE SIMDVector3Handle;
CORINFO_CLASS_HANDLE SIMDVector4Handle;
CORINFO_CLASS_HANDLE SIMDVectorHandle;
#ifdef FEATURE_HW_INTRINSICS
#if defined(TARGET_ARM64)
CORINFO_CLASS_HANDLE Vector64FloatHandle;
CORINFO_CLASS_HANDLE Vector64DoubleHandle;
CORINFO_CLASS_HANDLE Vector64IntHandle;
CORINFO_CLASS_HANDLE Vector64UShortHandle;
CORINFO_CLASS_HANDLE Vector64UByteHandle;
CORINFO_CLASS_HANDLE Vector64ShortHandle;
CORINFO_CLASS_HANDLE Vector64ByteHandle;
CORINFO_CLASS_HANDLE Vector64LongHandle;
CORINFO_CLASS_HANDLE Vector64UIntHandle;
CORINFO_CLASS_HANDLE Vector64ULongHandle;
CORINFO_CLASS_HANDLE Vector64NIntHandle;
CORINFO_CLASS_HANDLE Vector64NUIntHandle;
#endif // defined(TARGET_ARM64)
CORINFO_CLASS_HANDLE Vector128FloatHandle;
CORINFO_CLASS_HANDLE Vector128DoubleHandle;
CORINFO_CLASS_HANDLE Vector128IntHandle;
CORINFO_CLASS_HANDLE Vector128UShortHandle;
CORINFO_CLASS_HANDLE Vector128UByteHandle;
CORINFO_CLASS_HANDLE Vector128ShortHandle;
CORINFO_CLASS_HANDLE Vector128ByteHandle;
CORINFO_CLASS_HANDLE Vector128LongHandle;
CORINFO_CLASS_HANDLE Vector128UIntHandle;
CORINFO_CLASS_HANDLE Vector128ULongHandle;
CORINFO_CLASS_HANDLE Vector128NIntHandle;
CORINFO_CLASS_HANDLE Vector128NUIntHandle;
#if defined(TARGET_XARCH)
CORINFO_CLASS_HANDLE Vector256FloatHandle;
CORINFO_CLASS_HANDLE Vector256DoubleHandle;
CORINFO_CLASS_HANDLE Vector256IntHandle;
CORINFO_CLASS_HANDLE Vector256UShortHandle;
CORINFO_CLASS_HANDLE Vector256UByteHandle;
CORINFO_CLASS_HANDLE Vector256ShortHandle;
CORINFO_CLASS_HANDLE Vector256ByteHandle;
CORINFO_CLASS_HANDLE Vector256LongHandle;
CORINFO_CLASS_HANDLE Vector256UIntHandle;
CORINFO_CLASS_HANDLE Vector256ULongHandle;
CORINFO_CLASS_HANDLE Vector256NIntHandle;
CORINFO_CLASS_HANDLE Vector256NUIntHandle;
#endif // defined(TARGET_XARCH)
#endif // FEATURE_HW_INTRINSICS
SIMDHandlesCache()
{
memset(this, 0, sizeof(*this));
}
};
SIMDHandlesCache* m_simdHandleCache;
// Get an appropriate "zero" for the given type and class handle.
GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle);
// Get the handle for a SIMD type.
CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType)
{
if (m_simdHandleCache == nullptr)
{
// This may happen if the JIT generates SIMD node on its own, without importing them.
// Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache.
return NO_CLASS_HANDLE;
}
if (simdBaseJitType == CORINFO_TYPE_FLOAT)
{
switch (simdType)
{
case TYP_SIMD8:
return m_simdHandleCache->SIMDVector2Handle;
case TYP_SIMD12:
return m_simdHandleCache->SIMDVector3Handle;
case TYP_SIMD16:
if ((getSIMDVectorType() == TYP_SIMD32) ||
(m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE))
{
return m_simdHandleCache->SIMDVector4Handle;
}
break;
case TYP_SIMD32:
break;
default:
unreached();
}
}
assert(emitTypeSize(simdType) <= largestEnregisterableStructSize());
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->SIMDFloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->SIMDDoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->SIMDIntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->SIMDUShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->SIMDUByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->SIMDShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->SIMDByteHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->SIMDLongHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->SIMDUIntHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->SIMDULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->SIMDNIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->SIMDNUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
return NO_CLASS_HANDLE;
}
// Returns true if this is a SIMD type that should be considered an opaque
// vector type (i.e. do not analyze or promote its fields).
// Note that all but the fixed vector types are opaque, even though they may
// actually be declared as having fields.
bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const
{
return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) &&
(structHandle != m_simdHandleCache->SIMDVector3Handle) &&
(structHandle != m_simdHandleCache->SIMDVector4Handle));
}
// Returns true if the tree corresponds to a TYP_SIMD lcl var.
// Note that both SIMD vector args and locals are mared as lvSIMDType = true, but
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT.
bool isSIMDTypeLocal(GenTree* tree)
{
return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType;
}
// Returns true if the lclVar is an opaque SIMD type.
bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const
{
if (!varDsc->lvSIMDType)
{
return false;
}
return isOpaqueSIMDType(varDsc->GetStructHnd());
}
static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId)
{
return (intrinsicId == SIMDIntrinsicEqual);
}
// Returns base JIT type of a TYP_SIMD local.
// Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD.
CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree)
{
if (isSIMDTypeLocal(tree))
{
return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType();
}
return CORINFO_TYPE_UNDEF;
}
bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
if (isIntrinsicType(clsHnd))
{
const char* namespaceName = nullptr;
(void)getClassNameFromMetadata(clsHnd, &namespaceName);
return strcmp(namespaceName, "System.Numerics") == 0;
}
return false;
}
bool isSIMDClass(typeInfo* pTypeInfo)
{
return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass());
}
bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
#ifdef FEATURE_HW_INTRINSICS
if (isIntrinsicType(clsHnd))
{
const char* namespaceName = nullptr;
(void)getClassNameFromMetadata(clsHnd, &namespaceName);
return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0;
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
bool isHWSIMDClass(typeInfo* pTypeInfo)
{
#ifdef FEATURE_HW_INTRINSICS
return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass());
#else
return false;
#endif
}
bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd);
}
bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo)
{
return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo);
}
// Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF
// if it is not a SIMD type or is an unsupported base JIT type.
CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr);
CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd)
{
return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr);
}
// Get SIMD Intrinsic info given the method handle.
// Also sets typeHnd, argCount, baseType and sizeBytes out params.
const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd,
CORINFO_METHOD_HANDLE methodHnd,
CORINFO_SIG_INFO* sig,
bool isNewObj,
unsigned* argCount,
CorInfoType* simdBaseJitType,
unsigned* sizeBytes);
// Pops and returns GenTree node from importers type stack.
// Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes.
GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr);
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain given relop result.
SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
CorInfoType* inOutBaseJitType,
GenTree** op1,
GenTree** op2);
#if defined(TARGET_XARCH)
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain == comparison result.
SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
GenTree** op1,
GenTree** op2);
#endif // defined(TARGET_XARCH)
void setLclRelatedToSIMDIntrinsic(GenTree* tree);
bool areFieldsContiguous(GenTree* op1, GenTree* op2);
bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second);
bool areArrayElementsContiguous(GenTree* op1, GenTree* op2);
bool areArgumentsContiguous(GenTree* op1, GenTree* op2);
GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize);
// check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT.
GenTree* impSIMDIntrinsic(OPCODE opcode,
GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef);
GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd);
// Whether SIMD vector occupies part of SIMD register.
// SSE2: vector2f/3f are considered sub register SIMD types.
// AVX: vector2f, 3f and 4f are all considered sub register SIMD types.
bool isSubRegisterSIMDType(GenTreeSIMD* simdNode)
{
unsigned vectorRegisterByteLength;
#if defined(TARGET_XARCH)
// Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded
// with the AOT compiler, so that it cannot change from aot compilation time to runtime
// This api does not require such fixing as it merely pertains to the size of the simd type
// relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here
// does not preclude the code from being used on a machine with a larger vector length.)
if (getSIMDSupportLevel() < SIMD_AVX2_Supported)
{
vectorRegisterByteLength = 16;
}
else
{
vectorRegisterByteLength = 32;
}
#else
vectorRegisterByteLength = getSIMDVectorRegisterByteLength();
#endif
return (simdNode->GetSimdSize() < vectorRegisterByteLength);
}
// Get the type for the hardware SIMD vector.
// This is the maximum SIMD type supported for this target.
var_types getSIMDVectorType()
{
#if defined(TARGET_XARCH)
if (getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
return TYP_SIMD32;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return TYP_SIMD16;
}
#elif defined(TARGET_ARM64)
return TYP_SIMD16;
#else
assert(!"getSIMDVectorType() unimplemented on target arch");
unreached();
#endif
}
// Get the size of the SIMD type in bytes
int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd)
{
unsigned sizeBytes = 0;
(void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
return sizeBytes;
}
// Get the the number of elements of baseType of SIMD vector given by its size and baseType
static int getSIMDVectorLength(unsigned simdSize, var_types baseType);
// Get the the number of elements of baseType of SIMD vector given by its type handle
int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd);
// Get preferred alignment of SIMD type.
int getSIMDTypeAlignment(var_types simdType);
// Get the number of bytes in a System.Numeric.Vector<T> for the current compilation.
// Note - cannot be used for System.Runtime.Intrinsic
unsigned getSIMDVectorRegisterByteLength()
{
#if defined(TARGET_XARCH)
if (getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
return YMM_REGSIZE_BYTES;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return XMM_REGSIZE_BYTES;
}
#elif defined(TARGET_ARM64)
return FP_REGSIZE_BYTES;
#else
assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch");
unreached();
#endif
}
// The minimum and maximum possible number of bytes in a SIMD vector.
// maxSIMDStructBytes
// The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic
// SSE: 16-byte Vector<T> and Vector128<T>
// AVX: 32-byte Vector256<T> (Vector<T> is 16-byte)
// AVX2: 32-byte Vector<T> and Vector256<T>
unsigned int maxSIMDStructBytes()
{
#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
if (compOpportunisticallyDependsOn(InstructionSet_AVX))
{
return YMM_REGSIZE_BYTES;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return XMM_REGSIZE_BYTES;
}
#else
return getSIMDVectorRegisterByteLength();
#endif
}
unsigned int minSIMDStructBytes()
{
return emitTypeSize(TYP_SIMD8);
}
public:
// Returns the codegen type for a given SIMD size.
static var_types getSIMDTypeForSize(unsigned size)
{
var_types simdType = TYP_UNDEF;
if (size == 8)
{
simdType = TYP_SIMD8;
}
else if (size == 12)
{
simdType = TYP_SIMD12;
}
else if (size == 16)
{
simdType = TYP_SIMD16;
}
else if (size == 32)
{
simdType = TYP_SIMD32;
}
else
{
noway_assert(!"Unexpected size for SIMD type");
}
return simdType;
}
private:
unsigned getSIMDInitTempVarNum(var_types simdType);
#else // !FEATURE_SIMD
bool isOpaqueSIMDLclVar(LclVarDsc* varDsc)
{
return false;
}
#endif // FEATURE_SIMD
public:
//------------------------------------------------------------------------
// largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered.
//
// Notes: It is not guaranteed that the struct of this size or smaller WILL be a
// candidate for enregistration.
unsigned largestEnregisterableStructSize()
{
#ifdef FEATURE_SIMD
#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
if (opts.IsReadyToRun())
{
// Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs
// checks that are effected by the current level of instruction set support would
// otherwise cause the highest level of instruction set support to be reported to crossgen2.
// and this api is only ever used as an optimization or assert, so no reporting should
// ever happen.
return YMM_REGSIZE_BYTES;
}
#endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
unsigned vectorRegSize = maxSIMDStructBytes();
assert(vectorRegSize >= TARGET_POINTER_SIZE);
return vectorRegSize;
#else // !FEATURE_SIMD
return TARGET_POINTER_SIZE;
#endif // !FEATURE_SIMD
}
// Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many
// structs will fit the criteria.
bool structSizeMightRepresentSIMDType(size_t structSize)
{
#ifdef FEATURE_SIMD
// Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT
// about the size of a struct under the assumption that the struct size needs to be recorded.
// By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is
// enregistered or not will not be messaged to the R2R compiler.
return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize());
#else
return false;
#endif // FEATURE_SIMD
}
#ifdef FEATURE_SIMD
static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId);
#endif // !FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID);
#endif // FEATURE_HW_INTRINSICS
private:
// These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType()
// is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use
// of this routines also avoids the need of #ifdef FEATURE_SIMD specific code.
// Is this var is of type simd struct?
bool lclVarIsSIMDType(unsigned varNum)
{
return lvaGetDesc(varNum)->lvIsSIMDType();
}
// Is this Local node a SIMD local?
bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree)
{
return lclVarIsSIMDType(lclVarTree->GetLclNum());
}
// Returns true if the TYP_SIMD locals on stack are aligned at their
// preferred byte boundary specified by getSIMDTypeAlignment().
//
// As per the Intel manual, the preferred alignment for AVX vectors is
// 32-bytes. It is not clear whether additional stack space used in
// aligning stack is worth the benefit and for now will use 16-byte
// alignment for AVX 256-bit vectors with unaligned load/stores to/from
// memory. On x86, the stack frame is aligned to 4 bytes. We need to extend
// existing support for double (8-byte) alignment to 16 or 32 byte
// alignment for frames with local SIMD vars, if that is determined to be
// profitable.
//
// On Amd64 and SysV, RSP+8 is aligned on entry to the function (before
// prolog has run). This means that in RBP-based frames RBP will be 16-byte
// aligned. For RSP-based frames these are only sometimes aligned, depending
// on the frame size.
//
bool isSIMDTypeLocalAligned(unsigned varNum)
{
#if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES
if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF)
{
// TODO-Cleanup: Can't this use the lvExactSize on the varDsc?
int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType);
if (alignment <= STACK_ALIGN)
{
bool rbpBased;
int off = lvaFrameAddress(varNum, &rbpBased);
// On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the
// first instruction of a function. If our frame is RBP based
// then RBP will always be 16 bytes aligned, so we can simply
// check the offset.
if (rbpBased)
{
return (off % alignment) == 0;
}
// For RSP-based frame the alignment of RSP depends on our
// locals. rsp+8 is aligned on entry and we just subtract frame
// size so it is not hard to compute. Note that the compiler
// tries hard to make sure the frame size means RSP will be
// 16-byte aligned, but for leaf functions without locals (i.e.
// frameSize = 0) it will not be.
int frameSize = codeGen->genTotalFrameSize();
return ((8 - frameSize + off) % alignment) == 0;
}
}
#endif // FEATURE_SIMD
return false;
}
#ifdef DEBUG
// Answer the question: Is a particular ISA supported?
// Use this api when asking the question so that future
// ISA questions can be asked correctly or when asserting
// support/nonsupport for an instruction set
bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return (opts.compSupportsISA & (1ULL << isa)) != 0;
#else
return false;
#endif
}
#endif // DEBUG
bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const;
// Answer the question: Is a particular ISA allowed to be used implicitly by optimizations?
// The result of this api call will exactly match the target machine
// on which the function is executed (except for CoreLib, where there are special rules)
bool compExactlyDependsOn(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
uint64_t isaBit = (1ULL << isa);
if ((opts.compSupportsISAReported & isaBit) == 0)
{
if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0))
((Compiler*)this)->opts.compSupportsISAExactly |= isaBit;
((Compiler*)this)->opts.compSupportsISAReported |= isaBit;
}
return (opts.compSupportsISAExactly & isaBit) != 0;
#else
return false;
#endif
}
// Ensure that code will not execute if an instruction set is usable. Call only
// if the instruction set has previously reported as unusable, but when
// that that status has not yet been recorded to the AOT compiler
void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa)
{
// use compExactlyDependsOn to capture are record the use of the isa
bool isaUsable = compExactlyDependsOn(isa);
// Assert that the is unusable. If true, this function should never be called.
assert(!isaUsable);
}
// Answer the question: Is a particular ISA allowed to be used implicitly by optimizations?
// The result of this api call will match the target machine if the result is true
// If the result is false, then the target machine may have support for the instruction
bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const
{
if ((opts.compSupportsISA & (1ULL << isa)) != 0)
{
return compExactlyDependsOn(isa);
}
else
{
return false;
}
}
// Answer the question: Is a particular ISA supported for explicit hardware intrinsics?
bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const
{
// Report intent to use the ISA to the EE
compExactlyDependsOn(isa);
return ((opts.compSupportsISA & (1ULL << isa)) != 0);
}
bool canUseVexEncoding() const
{
#ifdef TARGET_XARCH
return compOpportunisticallyDependsOn(InstructionSet_AVX);
#else
return false;
#endif
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Compiler XX
XX XX
XX Generic info about the compilation and the method being compiled. XX
XX It is responsible for driving the other phases. XX
XX It is also responsible for all the memory management. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
Compiler* InlineeCompiler; // The Compiler instance for the inlinee
InlineResult* compInlineResult; // The result of importing the inlinee method.
bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE
bool compJmpOpUsed; // Does the method do a JMP
bool compLongUsed; // Does the method use TYP_LONG
bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
bool compTailCallUsed; // Does the method do a tailcall
bool compTailPrefixSeen; // Does the method IL have tail. prefix
bool compLocallocSeen; // Does the method IL have localloc opcode
bool compLocallocUsed; // Does the method use localloc.
bool compLocallocOptimized; // Does the method have an optimized localloc
bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node.
bool compUnsafeCastUsed; // Does the method use LDIND/STIND to cast between scalar/refernce types
bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump?
bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler?
bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts
bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts
bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set
// NOTE: These values are only reliable after
// the importing is completely finished.
#ifdef DEBUG
// State information - which phases have completed?
// These are kept together for easy discoverability
bool bRangeAllowStress;
bool compCodeGenDone;
int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks
bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done?
size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`.
size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder`
#endif // DEBUG
bool fgLocalVarLivenessDone; // Note that this one is used outside of debug.
bool fgLocalVarLivenessChanged;
bool compLSRADone;
bool compRationalIRForm;
bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method.
bool compGeneratingProlog;
bool compGeneratingEpilog;
bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack.
// Insert cookie on frame and code to check the cookie, like VC++ -GS.
bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local
// copies of susceptible parameters to avoid buffer overrun attacks through locals/params
bool getNeedsGSSecurityCookie() const
{
return compNeedsGSSecurityCookie;
}
void setNeedsGSSecurityCookie()
{
compNeedsGSSecurityCookie = true;
}
FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During
// frame layout calculations, this is the level we are currently
// computing.
//---------------------------- JITing options -----------------------------
enum codeOptimize
{
BLENDED_CODE,
SMALL_CODE,
FAST_CODE,
COUNT_OPT_CODE
};
struct Options
{
JitFlags* jitFlags; // all flags passed from the EE
// The instruction sets that the compiler is allowed to emit.
uint64_t compSupportsISA;
// The instruction sets that were reported to the VM as being used by the current method. Subset of
// compSupportsISA.
uint64_t compSupportsISAReported;
// The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations.
// Subset of compSupportsISA.
// The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only
// used via explicit hardware intrinsics.
uint64_t compSupportsISAExactly;
void setSupportedISAs(CORINFO_InstructionSetFlags isas)
{
compSupportsISA = isas.GetFlagsRaw();
}
unsigned compFlags; // method attributes
unsigned instrCount;
unsigned lvRefCount;
codeOptimize compCodeOpt; // what type of code optimizations
bool compUseCMOV;
// optimize maximally and/or favor speed over size?
#define DEFAULT_MIN_OPTS_CODE_SIZE 60000
#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000
#define DEFAULT_MIN_OPTS_BB_COUNT 2000
#define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000
#define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000
// Maximun number of locals before turning off the inlining
#define MAX_LV_NUM_COUNT_FOR_INLINING 512
bool compMinOpts;
bool compMinOptsIsSet;
#ifdef DEBUG
mutable bool compMinOptsIsUsed;
bool MinOpts() const
{
assert(compMinOptsIsSet);
compMinOptsIsUsed = true;
return compMinOpts;
}
bool IsMinOptsSet() const
{
return compMinOptsIsSet;
}
#else // !DEBUG
bool MinOpts() const
{
return compMinOpts;
}
bool IsMinOptsSet() const
{
return compMinOptsIsSet;
}
#endif // !DEBUG
bool OptimizationDisabled() const
{
return MinOpts() || compDbgCode;
}
bool OptimizationEnabled() const
{
return !OptimizationDisabled();
}
void SetMinOpts(bool val)
{
assert(!compMinOptsIsUsed);
assert(!compMinOptsIsSet || (compMinOpts == val));
compMinOpts = val;
compMinOptsIsSet = true;
}
// true if the CLFLG_* for an optimization is set.
bool OptEnabled(unsigned optFlag) const
{
return !!(compFlags & optFlag);
}
#ifdef FEATURE_READYTORUN
bool IsReadyToRun() const
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN);
}
#else
bool IsReadyToRun() const
{
return false;
}
#endif
// Check if the compilation is control-flow guard enabled.
bool IsCFGEnabled() const
{
#if defined(TARGET_ARM64) || defined(TARGET_AMD64)
// On these platforms we assume the register that the target is
// passed in is preserved by the validator and take care to get the
// target from the register for the call (even in debug mode).
static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0);
if (JitConfig.JitForceControlFlowGuard())
return true;
return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG);
#else
// The remaining platforms are not supported and would require some
// work to support.
//
// ARM32:
// The ARM32 validator does not preserve any volatile registers
// which means we have to take special care to allocate and use a
// callee-saved register (reloading the target from memory is a
// security issue).
//
// x86:
// On x86 some VSD calls disassemble the call site and expect an
// indirect call which is fundamentally incompatible with CFG.
// This would require a different way to pass this information
// through.
//
return false;
#endif
}
#ifdef FEATURE_ON_STACK_REPLACEMENT
bool IsOSR() const
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR);
}
#else
bool IsOSR() const
{
return false;
}
#endif
// true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating
// PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as
// the current logic for frame setup initializes and pushes
// the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot
// safely be pushed/popped while the thread is in a preemptive state.).
bool ShouldUsePInvokeHelpers()
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) ||
jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE);
}
// true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method
// prolog/epilog
bool IsReversePInvoke()
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE);
}
bool compScopeInfo; // Generate the LocalVar info ?
bool compDbgCode; // Generate debugger-friendly code?
bool compDbgInfo; // Gather debugging info?
bool compDbgEnC;
#ifdef PROFILING_SUPPORTED
bool compNoPInvokeInlineCB;
#else
static const bool compNoPInvokeInlineCB;
#endif
#ifdef DEBUG
bool compGcChecks; // Check arguments and return values to ensure they are sane
#endif
#if defined(DEBUG) && defined(TARGET_XARCH)
bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct.
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#if defined(DEBUG) && defined(TARGET_X86)
bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86.
#endif // defined(DEBUG) && defined(TARGET_X86)
bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen
#ifdef DEBUG
#if defined(TARGET_XARCH)
bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible
#endif
#endif // DEBUG
#ifdef UNIX_AMD64_ABI
// This flag is indicating if there is a need to align the frame.
// On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for
// FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called.
// On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of
// 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that
// there are calls and making sure the frame alignment logic is executed.
bool compNeedToAlignFrame;
#endif // UNIX_AMD64_ABI
bool compProcedureSplitting; // Separate cold code from hot code
bool genFPorder; // Preserve FP order (operations are non-commutative)
bool genFPopt; // Can we do frame-pointer-omission optimization?
bool altJit; // True if we are an altjit and are compiling this method
#ifdef OPT_CONFIG
bool optRepeat; // Repeat optimizer phases k times
#endif
#ifdef DEBUG
bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH
bool dspCode; // Display native code generated
bool dspEHTable; // Display the EH table reported to the VM
bool dspDebugInfo; // Display the Debug info reported to the VM
bool dspInstrs; // Display the IL instructions intermixed with the native code output
bool dspLines; // Display source-code lines intermixed with native code output
bool dmpHex; // Display raw bytes in hex of native code output
bool varNames; // Display variables names in native code output
bool disAsm; // Display native code as it is generated
bool disAsmSpilled; // Display native code when any register spilling occurs
bool disasmWithGC; // Display GC info interleaved with disassembly.
bool disDiffable; // Makes the Disassembly code 'diff-able'
bool disAddr; // Display process address next to each instruction in disassembly code
bool disAlignment; // Display alignment boundaries in disassembly code
bool disAsm2; // Display native code after it is generated using external disassembler
bool dspOrder; // Display names of each of the methods that we ngen/jit
bool dspUnwind; // Display the unwind info output
bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable)
bool compLongAddress; // Force using large pseudo instructions for long address
// (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC)
bool dspGCtbls; // Display the GC tables
#endif
bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method
// Default numbers used to perform loop alignment. All the numbers are choosen
// based on experimenting with various benchmarks.
// Default minimum loop block weight required to enable loop alignment.
#define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4
// By default a loop will be aligned at 32B address boundary to get better
// performance as per architecture manuals.
#define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20
// For non-adaptive loop alignment, by default, only align a loop whose size is
// at most 3 times the alignment block size. If the loop is bigger than that, it is most
// likely complicated enough that loop alignment will not impact performance.
#define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3
#ifdef DEBUG
// Loop alignment variables
// If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary.
bool compJitAlignLoopForJcc;
#endif
// For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done.
unsigned short compJitAlignLoopMaxCodeSize;
// Minimum weight needed for the first block of a loop to make it a candidate for alignment.
unsigned short compJitAlignLoopMinBlockWeight;
// For non-adaptive alignment, address boundary (power of 2) at which loop alignment should
// be done. By default, 32B.
unsigned short compJitAlignLoopBoundary;
// Padding limit to align a loop.
unsigned short compJitAlignPaddingLimit;
// If set, perform adaptive loop alignment that limits number of padding based on loop size.
bool compJitAlignLoopAdaptive;
// If set, tries to hide alignment instructions behind unconditional jumps.
bool compJitHideAlignBehindJmp;
#ifdef LATE_DISASM
bool doLateDisasm; // Run the late disassembler
#endif // LATE_DISASM
#if DUMP_GC_TABLES && !defined(DEBUG)
#pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!")
static const bool dspGCtbls = true;
#endif
#ifdef PROFILING_SUPPORTED
// Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()).
// This option helps make the JIT behave as if it is running under a profiler.
bool compJitELTHookEnabled;
#endif // PROFILING_SUPPORTED
#if FEATURE_TAILCALL_OPT
// Whether opportunistic or implicit tail call optimization is enabled.
bool compTailCallOpt;
// Whether optimization of transforming a recursive tail call into a loop is enabled.
bool compTailCallLoopOpt;
#endif
#if FEATURE_FASTTAILCALL
// Whether fast tail calls are allowed.
bool compFastTailCalls;
#endif // FEATURE_FASTTAILCALL
#if defined(TARGET_ARM64)
// Decision about whether to save FP/LR registers with callee-saved registers (see
// COMPlus_JitSaveFpLrWithCalleSavedRegisters).
int compJitSaveFpLrWithCalleeSavedRegisters;
#endif // defined(TARGET_ARM64)
#ifdef CONFIGURABLE_ARM_ABI
bool compUseSoftFP = false;
#else
#ifdef ARM_SOFTFP
static const bool compUseSoftFP = true;
#else // !ARM_SOFTFP
static const bool compUseSoftFP = false;
#endif // ARM_SOFTFP
#endif // CONFIGURABLE_ARM_ABI
} opts;
static bool s_pAltJitExcludeAssembliesListInitialized;
static AssemblyNamesList2* s_pAltJitExcludeAssembliesList;
#ifdef DEBUG
static bool s_pJitDisasmIncludeAssembliesListInitialized;
static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList;
static bool s_pJitFunctionFileInitialized;
static MethodSet* s_pJitMethodSet;
#endif // DEBUG
#ifdef DEBUG
// silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and
// it is safe in this case
#pragma warning(push)
#pragma warning(disable : 4312)
template <typename T>
T dspPtr(T p)
{
return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p);
}
template <typename T>
T dspOffset(T o)
{
return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o);
}
#pragma warning(pop)
static int dspTreeID(GenTree* tree)
{
return tree->gtTreeID;
}
static void printStmtID(Statement* stmt)
{
assert(stmt != nullptr);
printf(FMT_STMT, stmt->GetID());
}
static void printTreeID(GenTree* tree)
{
if (tree == nullptr)
{
printf("[------]");
}
else
{
printf("[%06d]", dspTreeID(tree));
}
}
const char* pgoSourceToString(ICorJitInfo::PgoSource p);
const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail);
#endif // DEBUG
// clang-format off
#define STRESS_MODES \
\
STRESS_MODE(NONE) \
\
/* "Variations" stress areas which we try to mix up with each other. */ \
/* These should not be exhaustively used as they might */ \
/* hide/trivialize other areas */ \
\
STRESS_MODE(REGS) \
STRESS_MODE(DBL_ALN) \
STRESS_MODE(LCL_FLDS) \
STRESS_MODE(UNROLL_LOOPS) \
STRESS_MODE(MAKE_CSE) \
STRESS_MODE(LEGACY_INLINE) \
STRESS_MODE(CLONE_EXPR) \
STRESS_MODE(USE_CMOV) \
STRESS_MODE(FOLD) \
STRESS_MODE(MERGED_RETURNS) \
STRESS_MODE(BB_PROFILE) \
STRESS_MODE(OPT_BOOLS_GC) \
STRESS_MODE(REMORPH_TREES) \
STRESS_MODE(64RSLT_MUL) \
STRESS_MODE(DO_WHILE_LOOPS) \
STRESS_MODE(MIN_OPTS) \
STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \
STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \
STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \
STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \
STRESS_MODE(UNSAFE_BUFFER_CHECKS) \
STRESS_MODE(NULL_OBJECT_CHECK) \
STRESS_MODE(PINVOKE_RESTORE_ESP) \
STRESS_MODE(RANDOM_INLINE) \
STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \
STRESS_MODE(GENERIC_VARN) \
STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \
STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \
STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \
STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \
\
/* After COUNT_VARN, stress level 2 does all of these all the time */ \
\
STRESS_MODE(COUNT_VARN) \
\
/* "Check" stress areas that can be exhaustively used if we */ \
/* dont care about performance at all */ \
\
STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \
STRESS_MODE(CHK_FLOW_UPDATE) \
STRESS_MODE(EMITTER) \
STRESS_MODE(CHK_REIMPORT) \
STRESS_MODE(FLATFP) \
STRESS_MODE(GENERIC_CHECK) \
STRESS_MODE(COUNT)
enum compStressArea
{
#define STRESS_MODE(mode) STRESS_##mode,
STRESS_MODES
#undef STRESS_MODE
};
// clang-format on
#ifdef DEBUG
static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1];
BYTE compActiveStressModes[STRESS_COUNT];
#endif // DEBUG
#define MAX_STRESS_WEIGHT 100
bool compStressCompile(compStressArea stressArea, unsigned weightPercentage);
bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage);
#ifdef DEBUG
bool compInlineStress()
{
return compStressCompile(STRESS_LEGACY_INLINE, 50);
}
bool compRandomInlineStress()
{
return compStressCompile(STRESS_RANDOM_INLINE, 50);
}
bool compPromoteFewerStructs(unsigned lclNum);
#endif // DEBUG
bool compTailCallStress()
{
#ifdef DEBUG
// Do not stress tailcalls in IL stubs as the runtime creates several IL
// stubs to implement the tailcall mechanism, which would then
// recursively create more IL stubs.
return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) &&
(JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5));
#else
return false;
#endif
}
const char* compGetTieringName(bool wantShortName = false) const;
const char* compGetStressMessage() const;
codeOptimize compCodeOpt() const
{
#if 0
// Switching between size & speed has measurable throughput impact
// (3.5% on NGen CoreLib when measured). It used to be enabled for
// DEBUG, but should generate identical code between CHK & RET builds,
// so that's not acceptable.
// TODO-Throughput: Figure out what to do about size vs. speed & throughput.
// Investigate the cause of the throughput regression.
return opts.compCodeOpt;
#else
return BLENDED_CODE;
#endif
}
//--------------------- Info about the procedure --------------------------
struct Info
{
COMP_HANDLE compCompHnd;
CORINFO_MODULE_HANDLE compScopeHnd;
CORINFO_CLASS_HANDLE compClassHnd;
CORINFO_METHOD_HANDLE compMethodHnd;
CORINFO_METHOD_INFO* compMethodInfo;
bool hasCircularClassConstraints;
bool hasCircularMethodConstraints;
#if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS
const char* compMethodName;
const char* compClassName;
const char* compFullName;
double compPerfScore;
int compMethodSuperPMIIndex; // useful when debugging under SuperPMI
#endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS
#if defined(DEBUG) || defined(INLINE_DATA)
// Method hash is logically const, but computed
// on first demand.
mutable unsigned compMethodHashPrivate;
unsigned compMethodHash() const;
#endif // defined(DEBUG) || defined(INLINE_DATA)
#ifdef PSEUDORANDOM_NOP_INSERTION
// things for pseudorandom nop insertion
unsigned compChecksum;
CLRRandom compRNG;
#endif
// The following holds the FLG_xxxx flags for the method we're compiling.
unsigned compFlags;
// The following holds the class attributes for the method we're compiling.
unsigned compClassAttr;
const BYTE* compCode;
IL_OFFSET compILCodeSize; // The IL code size
IL_OFFSET compILImportSize; // Estimated amount of IL actually imported
IL_OFFSET compILEntry; // The IL entry point (normally 0)
PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr)
UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This
// is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if:
// (1) the code is not hot/cold split, and we issued less code than we expected, or
// (2) the code is hot/cold split, and we issued less code than we expected
// in the cold section (the hot section will always be padded out to compTotalHotCodeSize).
bool compIsStatic : 1; // Is the method static (no 'this' pointer)?
bool compIsVarArgs : 1; // Does the method have varargs parameters?
bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options?
bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback
bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic
bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used.
var_types compRetType; // Return type of the method as declared in IL
var_types compRetNativeType; // Normalized return type as per target arch ABI
unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden)
unsigned compArgsCount; // Number of arguments (incl. implicit and hidden)
#if FEATURE_FASTTAILCALL
unsigned compArgStackSize; // Incoming argument stack size in bytes
#endif // FEATURE_FASTTAILCALL
unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present);
int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE)
unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var)
unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden)
unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden)
unsigned compMaxStack;
UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method
UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method
unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition.
CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method.
unsigned compLvFrameListRoot; // lclNum for the Frame root
unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL.
// You should generally use compHndBBtabCount instead: it is the
// current number of EH clauses (after additions like synchronized
// methods and funclets, and removals like unreachable code deletion).
Target::ArgOrder compArgOrder;
bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler
// and the VM expects that, or the JIT is a "self-host" compiler
// (e.g., x86 hosted targeting x86) and the VM expects that.
/* The following holds IL scope information about local variables.
*/
unsigned compVarScopesCount;
VarScopeDsc* compVarScopes;
/* The following holds information about instr offsets for
* which we need to report IP-mappings
*/
IL_OFFSET* compStmtOffsets; // sorted
unsigned compStmtOffsetsCount;
ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit;
#define CPU_X86 0x0100 // The generic X86 CPU
#define CPU_X86_PENTIUM_4 0x0110
#define CPU_X64 0x0200 // The generic x64 CPU
#define CPU_AMD_X64 0x0210 // AMD x64 CPU
#define CPU_INTEL_X64 0x0240 // Intel x64 CPU
#define CPU_ARM 0x0300 // The generic ARM CPU
#define CPU_ARM64 0x0400 // The generic ARM64 CPU
unsigned genCPU; // What CPU are we running on
// Number of class profile probes in this method
unsigned compClassProbeCount;
} info;
// Returns true if the method being compiled returns a non-void and non-struct value.
// Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a
// single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2,
// 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs).
// Methods returning such structs are considered to return non-struct return value and
// this method returns true in that case.
bool compMethodReturnsNativeScalarType()
{
return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType);
}
// Returns true if the method being compiled returns RetBuf addr as its return value
bool compMethodReturnsRetBufAddr()
{
// There are cases where implicit RetBuf argument should be explicitly returned in a register.
// In such cases the return type is changed to TYP_BYREF and appropriate IR is generated.
// These cases are:
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_AMD64
// 1. on x64 Windows and Unix the address of RetBuf needs to be returned by
// methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
// returning the address of RetBuf.
return (info.compRetBuffArg != BAD_VAR_NUM);
#else // TARGET_AMD64
#ifdef PROFILING_SUPPORTED
// 2. Profiler Leave callback expects the address of retbuf as return value for
// methods with hidden RetBuf argument. impReturnInstruction() when profiler
// callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for
// methods with hidden RetBufArg.
if (compIsProfilerHookNeeded())
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif
// 3. Windows ARM64 native instance calling convention requires the address of RetBuff
// to be returned in x0.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
auto callConv = info.compCallConv;
if (callConvIsInstanceMethodCallConv(callConv))
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
}
#endif // TARGET_ARM64
// 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
if (info.compCallConv != CorInfoCallConvExtension::Managed)
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif
return false;
#endif // TARGET_AMD64
}
// Returns true if the method returns a value in more than one return register
// TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs?
// TODO-ARM64: Does this apply for ARM64 too?
bool compMethodReturnsMultiRegRetType()
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
// On x86, 64-bit longs and structs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType) ||
(varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM);
#endif // TARGET_XXX
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
#endif // FEATURE_MULTIREG_RET
}
bool compEnregLocals()
{
return ((opts.compFlags & CLFLG_REGVAR) != 0);
}
bool compEnregStructLocals()
{
return (JitConfig.JitEnregStructLocals() != 0);
}
bool compObjectStackAllocation()
{
return (JitConfig.JitObjectStackAllocation() != 0);
}
// Returns true if the method returns a value in more than one return register,
// it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed.
// The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling,
// this method correctly returns false for it (it is passed as HVA), when the original returns true.
bool compMethodReturnsMultiRegRegTypeAlternate()
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
// On x86, 64-bit longs and structs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType) ||
(varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
#if defined(TARGET_ARM64)
// TYP_SIMD* are returned in one register.
if (varTypeIsSIMD(info.compRetNativeType))
{
return false;
}
#endif
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM);
#endif // TARGET_XXX
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
#endif // FEATURE_MULTIREG_RET
}
// Returns true if the method being compiled returns a value
bool compMethodHasRetVal()
{
return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() ||
compMethodReturnsMultiRegRetType();
}
// Returns true if the method requires a PInvoke prolog and epilog
bool compMethodRequiresPInvokeFrame()
{
return (info.compUnmanagedCallCountWithGCTransition > 0);
}
// Returns true if address-exposed user variables should be poisoned with a recognizable value
bool compShouldPoisonFrame()
{
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (opts.IsOSR())
return false;
#endif
return !info.compInitMem && opts.compDbgCode;
}
// Returns true if the jit supports having patchpoints in this method.
// Optionally, get the reason why not.
bool compCanHavePatchpoints(const char** reason = nullptr);
#if defined(DEBUG)
void compDispLocalVars();
#endif // DEBUG
private:
class ClassLayoutTable* m_classLayoutTable;
class ClassLayoutTable* typCreateClassLayoutTable();
class ClassLayoutTable* typGetClassLayoutTable();
public:
// Get the layout having the specified layout number.
ClassLayout* typGetLayoutByNum(unsigned layoutNum);
// Get the layout number of the specified layout.
unsigned typGetLayoutNum(ClassLayout* layout);
// Get the layout having the specified size but no class handle.
ClassLayout* typGetBlkLayout(unsigned blockSize);
// Get the number of a layout having the specified size but no class handle.
unsigned typGetBlkLayoutNum(unsigned blockSize);
// Get the layout for the specified class handle.
ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle);
// Get the number of a layout for the specified class handle.
unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle);
//-------------------------- Global Compiler Data ------------------------------------
#ifdef DEBUG
private:
static LONG s_compMethodsCount; // to produce unique label names
#endif
public:
#ifdef DEBUG
LONG compMethodID;
unsigned compGenTreeID;
unsigned compStatementID;
unsigned compBasicBlockID;
#endif
BasicBlock* compCurBB; // the current basic block in process
Statement* compCurStmt; // the current statement in process
GenTree* compCurTree; // the current tree in process
// The following is used to create the 'method JIT info' block.
size_t compInfoBlkSize;
BYTE* compInfoBlkAddr;
EHblkDsc* compHndBBtab; // array of EH data
unsigned compHndBBtabCount; // element count of used elements in EH data array
unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array
#if defined(TARGET_X86)
//-------------------------------------------------------------------------
// Tracking of region covered by the monitor in synchronized methods
void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER
void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT
#endif // !TARGET_X86
Phases mostRecentlyActivePhase; // the most recently active phase
PhaseChecks activePhaseChecks; // the currently active phase checks
//-------------------------------------------------------------------------
// The following keeps track of how many bytes of local frame space we've
// grabbed so far in the current function, and how many argument bytes we
// need to pop when we return.
//
unsigned compLclFrameSize; // secObject+lclBlk+locals+temps
// Count of callee-saved regs we pushed in the prolog.
// Does not include EBP for isFramePointerUsed() and double-aligned frames.
// In case of Amd64 this doesn't include float regs saved on stack.
unsigned compCalleeRegsPushed;
#if defined(TARGET_XARCH)
// Mask of callee saved float regs on stack.
regMaskTP compCalleeFPRegsSavedMask;
#endif
#ifdef TARGET_AMD64
// Quirk for VS debug-launch scenario to work:
// Bytes of padding between save-reg area and locals.
#define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES)
unsigned compVSQuirkStackPaddingNeeded;
#endif
unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg))
unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args
unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args
unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args
#if defined(TARGET_ARM64)
struct FrameInfo
{
// Frame type (1-5)
int frameType;
// Distance from established (method body) SP to base of callee save area
int calleeSaveSpOffset;
// Amount to subtract from SP before saving (prolog) OR
// to add to SP after restoring (epilog) callee saves
int calleeSaveSpDelta;
// Distance from established SP to where caller's FP was saved
int offsetSpToSavedFp;
} compFrameInfo;
#endif
//-------------------------------------------------------------------------
static void compStartup(); // One-time initialization
static void compShutdown(); // One-time finalization
void compInit(ArenaAllocator* pAlloc,
CORINFO_METHOD_HANDLE methodHnd,
COMP_HANDLE compHnd,
CORINFO_METHOD_INFO* methodInfo,
InlineInfo* inlineInfo);
void compDone();
static void compDisplayStaticSizes(FILE* fout);
//------------ Some utility functions --------------
void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
void** ppIndirection); /* OUT */
// Several JIT/EE interface functions return a CorInfoType, and also return a
// class handle as an out parameter if the type is a value class. Returns the
// size of the type these describe.
unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd);
// Returns true if the method being compiled has a return buffer.
bool compHasRetBuffArg();
#ifdef DEBUG
// Components used by the compiler may write unit test suites, and
// have them run within this method. They will be run only once per process, and only
// in debug. (Perhaps should be under the control of a COMPlus_ flag.)
// These should fail by asserting.
void compDoComponentUnitTestsOnce();
#endif // DEBUG
int compCompile(CORINFO_MODULE_HANDLE classPtr,
void** methodCodePtr,
uint32_t* methodCodeSize,
JitFlags* compileFlags);
void compCompileFinish();
int compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
COMP_HANDLE compHnd,
CORINFO_METHOD_INFO* methodInfo,
void** methodCodePtr,
uint32_t* methodCodeSize,
JitFlags* compileFlag);
ArenaAllocator* compGetArenaAllocator();
void generatePatchpointInfo();
#if MEASURE_MEM_ALLOC
static bool s_dspMemStats; // Display per-phase memory statistics for every function
#endif // MEASURE_MEM_ALLOC
#if LOOP_HOIST_STATS
unsigned m_loopsConsidered;
bool m_curLoopHasHoistedExpression;
unsigned m_loopsWithHoistedExpressions;
unsigned m_totalHoistedExpressions;
void AddLoopHoistStats();
void PrintPerMethodLoopHoistStats();
static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below.
static unsigned s_loopsConsidered;
static unsigned s_loopsWithHoistedExpressions;
static unsigned s_totalHoistedExpressions;
static void PrintAggregateLoopHoistStats(FILE* f);
#endif // LOOP_HOIST_STATS
#if TRACK_ENREG_STATS
class EnregisterStats
{
private:
unsigned m_totalNumberOfVars;
unsigned m_totalNumberOfStructVars;
unsigned m_totalNumberOfEnregVars;
unsigned m_totalNumberOfStructEnregVars;
unsigned m_addrExposed;
unsigned m_VMNeedsStackAddr;
unsigned m_localField;
unsigned m_blockOp;
unsigned m_dontEnregStructs;
unsigned m_notRegSizeStruct;
unsigned m_structArg;
unsigned m_lclAddrNode;
unsigned m_castTakesAddr;
unsigned m_storeBlkSrc;
unsigned m_oneAsgRetyping;
unsigned m_swizzleArg;
unsigned m_blockOpRet;
unsigned m_returnSpCheck;
unsigned m_simdUserForcesDep;
unsigned m_liveInOutHndlr;
unsigned m_depField;
unsigned m_noRegVars;
unsigned m_minOptsGC;
#ifdef JIT32_GCENCODER
unsigned m_PinningRef;
#endif // JIT32_GCENCODER
#if !defined(TARGET_64BIT)
unsigned m_longParamField;
#endif // !TARGET_64BIT
unsigned m_parentExposed;
unsigned m_tooConservative;
unsigned m_escapeAddress;
unsigned m_osrExposed;
unsigned m_stressLclFld;
unsigned m_copyFldByFld;
unsigned m_dispatchRetBuf;
unsigned m_wideIndir;
public:
void RecordLocal(const LclVarDsc* varDsc);
void Dump(FILE* fout) const;
};
static EnregisterStats s_enregisterStats;
#endif // TRACK_ENREG_STATS
bool compIsForImportOnly();
bool compIsForInlining() const;
bool compDonotInline();
#ifdef DEBUG
// Get the default fill char value we randomize this value when JitStress is enabled.
static unsigned char compGetJitDefaultFill(Compiler* comp);
const char* compLocalVarName(unsigned varNum, unsigned offs);
VarName compVarName(regNumber reg, bool isFloatReg = false);
const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false);
const char* compRegNameForSize(regNumber reg, size_t size);
const char* compFPregVarName(unsigned fpReg, bool displayVar = false);
void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP);
void compDspSrcLinesByLineNum(unsigned line, bool seek = false);
#endif // DEBUG
//-------------------------------------------------------------------------
struct VarScopeListNode
{
VarScopeDsc* data;
VarScopeListNode* next;
static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc)
{
VarScopeListNode* node = new (alloc) VarScopeListNode;
node->data = value;
node->next = nullptr;
return node;
}
};
struct VarScopeMapInfo
{
VarScopeListNode* head;
VarScopeListNode* tail;
static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc)
{
VarScopeMapInfo* info = new (alloc) VarScopeMapInfo;
info->head = node;
info->tail = node;
return info;
}
};
// Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup.
static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32;
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap;
// Map to keep variables' scope indexed by varNum containing it's scope dscs at the index.
VarNumToScopeDscMap* compVarScopeMap;
VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd);
VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs);
VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs);
void compInitVarScopeMap();
VarScopeDsc** compEnterScopeList; // List has the offsets where variables
// enter scope, sorted by instr offset
unsigned compNextEnterScope;
VarScopeDsc** compExitScopeList; // List has the offsets where variables
// go out of scope, sorted by instr offset
unsigned compNextExitScope;
void compInitScopeLists();
void compResetScopeLists();
VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false);
VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false);
void compProcessScopesUntil(unsigned offset,
VARSET_TP* inScope,
void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*),
void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*));
#ifdef DEBUG
void compDispScopeLists();
#endif // DEBUG
bool compIsProfilerHookNeeded();
//-------------------------------------------------------------------------
/* Statistical Data Gathering */
void compJitStats(); // call this function and enable
// various ifdef's below for statistical data
#if CALL_ARG_STATS
void compCallArgStats();
static void compDispCallArgStats(FILE* fout);
#endif
//-------------------------------------------------------------------------
protected:
#ifdef DEBUG
bool skipMethod();
#endif
ArenaAllocator* compArenaAllocator;
public:
void compFunctionTraceStart();
void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI);
protected:
size_t compMaxUncheckedOffsetForNullObject;
void compInitOptions(JitFlags* compileFlags);
void compSetProcessor();
void compInitDebuggingInfo();
void compSetOptimizationLevel();
#ifdef TARGET_ARMARCH
bool compRsvdRegCheck(FrameLayoutState curState);
#endif
void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags);
// Clear annotations produced during optimizations; to be used between iterations when repeating opts.
void ResetOptAnnotations();
// Regenerate loop descriptors; to be used between iterations when repeating opts.
void RecomputeLoopInfo();
#ifdef PROFILING_SUPPORTED
// Data required for generating profiler Enter/Leave/TailCall hooks
bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method
void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks
bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle
#endif
public:
// Assumes called as part of process shutdown; does any compiler-specific work associated with that.
static void ProcessShutdownWork(ICorStaticInfo* statInfo);
CompAllocator getAllocator(CompMemKind cmk = CMK_Generic)
{
return CompAllocator(compArenaAllocator, cmk);
}
CompAllocator getAllocatorGC()
{
return getAllocator(CMK_GC);
}
CompAllocator getAllocatorLoopHoist()
{
return getAllocator(CMK_LoopHoist);
}
#ifdef DEBUG
CompAllocator getAllocatorDebugOnly()
{
return getAllocator(CMK_DebugOnly);
}
#endif // DEBUG
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX typeInfo XX
XX XX
XX Checks for type compatibility and merges types XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// Returns true if child is equal to or a subtype of parent for merge purposes
// This support is necessary to suport attributes that are not described in
// for example, signatures. For example, the permanent home byref (byref that
// points to the gc heap), isn't a property of method signatures, therefore,
// it is safe to have mismatches here (that tiCompatibleWith will not flag),
// but when deciding if we need to reimport a block, we need to take these
// in account
bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Returns true if child is equal to or a subtype of parent.
// normalisedForStack indicates that both types are normalised for the stack
bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Merges pDest and pSrc. Returns false if merge is undefined.
// *pDest is modified to represent the merged type. Sets "*changed" to true
// if this changes "*pDest".
bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const;
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX IL verification stuff XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// The following is used to track liveness of local variables, initialization
// of valueclass constructors, and type safe use of IL instructions.
// dynamic state info needed for verification
EntryState verCurrentState;
// this ptr of object type .ctors are considered intited only after
// the base class ctor is called, or an alternate ctor is called.
// An uninited this ptr can be used to access fields, but cannot
// be used to call a member function.
bool verTrackObjCtorInitState;
void verInitBBEntryState(BasicBlock* block, EntryState* currentState);
// Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state.
void verSetThisInit(BasicBlock* block, ThisInitState tis);
void verInitCurrentState();
void verResetCurrentState(BasicBlock* block, EntryState* currentState);
// Merges the current verification state into the entry state of "block", return false if that merge fails,
// TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block".
bool verMergeEntryStates(BasicBlock* block, bool* changed);
void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg));
void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg));
typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd,
bool bashStructToRef = false); // converts from jit type representation to typeInfo
typeInfo verMakeTypeInfo(CorInfoType ciType,
CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo
bool verIsSDArray(const typeInfo& ti);
typeInfo verGetArrayElemType(const typeInfo& ti);
typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args);
bool verIsByRefLike(const typeInfo& ti);
bool verIsSafeToReturnByRef(const typeInfo& ti);
// generic type variables range over types that satisfy IsBoxable
bool verIsBoxable(const typeInfo& ti);
void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file)
DEBUGARG(unsigned line));
void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file)
DEBUGARG(unsigned line));
bool verCheckTailCallConstraint(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call
// on a type parameter?
bool speculative // If true, won't throw if verificatoin fails. Instead it will
// return false to the caller.
// If false, it will throw.
);
bool verIsBoxedValueType(const typeInfo& ti);
void verVerifyCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
bool tailCall,
bool readonlyCall, // is this a "readonly." call?
const BYTE* delegateCreateStart,
const BYTE* codeAddr,
CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName));
bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef);
typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType);
typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType);
void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const CORINFO_FIELD_INFO& fieldInfo,
const typeInfo* tiThis,
bool mutator,
bool allowPlainStructAsThis = false);
void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode);
void verVerifyThisPtrInitialised();
bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target);
#ifdef DEBUG
// One line log function. Default level is 0. Increasing it gives you
// more log information
// levels are currently unused: #define JITDUMP(level,...) ();
void JitLogEE(unsigned level, const char* fmt, ...);
bool compDebugBreak;
bool compJitHaltMethod();
#endif
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GS Security checks for unsafe buffers XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
struct ShadowParamVarInfo
{
FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other
unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM
static bool mayNeedShadowCopy(LclVarDsc* varDsc)
{
#if defined(TARGET_AMD64)
// GS cookie logic to create shadow slots, create trees to copy reg args to shadow
// slots and update all trees to refer to shadow slots is done immediately after
// fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines
// not to shadow a parameter. Also, LSRA could potentially spill a param which is passed
// in register. Therefore, conservatively all params may need a shadow copy. Note that
// GS cookie logic further checks whether the param is a ptr or an unsafe buffer before
// creating a shadow slot even though this routine returns true.
//
// TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than
// required. There are two cases under which a reg arg could potentially be used from its
// home location:
// a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates())
// b) LSRA spills it
//
// Possible solution to address case (a)
// - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked
// in this routine. Note that live out of exception handler is something we may not be
// able to do it here since GS cookie logic is invoked ahead of liveness computation.
// Therefore, for methods with exception handling and need GS cookie check we might have
// to take conservative approach.
//
// Possible solution to address case (b)
// - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we
// create a new spill temp if the method needs GS cookie check.
return varDsc->lvIsParam;
#else // !defined(TARGET_AMD64)
return varDsc->lvIsParam && !varDsc->lvIsRegArg;
#endif
}
#ifdef DEBUG
void Print()
{
printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy);
}
#endif
};
GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks
GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL
ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code
void gsGSChecksInitCookie(); // Grabs cookie variable
void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies
bool gsFindVulnerableParams(); // Shadow param analysis code
void gsParamsToShadows(); // Insert copy code and replave param uses by shadow
static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk
static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk
#define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined.
// This can be overwritten by setting complus_JITInlineSize env variable.
#define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined
#define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers
private:
#ifdef FEATURE_JIT_METHOD_PERF
JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation.
static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run.
static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD.
static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to.
#endif
void BeginPhase(Phases phase); // Indicate the start of the given phase.
void EndPhase(Phases phase); // Indicate the end of the given phase.
#if MEASURE_CLRAPI_CALLS
// Thin wrappers that call into JitTimer (if present).
inline void CLRApiCallEnter(unsigned apix);
inline void CLRApiCallLeave(unsigned apix);
public:
inline void CLR_API_Enter(API_ICorJitInfo_Names ename);
inline void CLR_API_Leave(API_ICorJitInfo_Names ename);
private:
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These variables are associated with maintaining SQM data about compile time.
unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase
// in the current compilation.
unsigned __int64 m_compCycles; // Net cycle count for current compilation
DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of
// the inlining phase in the current compilation.
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete.
// (We do this after inlining because this marks the last point at which the JIT is likely to cause
// type-loading and class initialization).
void RecordStateAtEndOfInlining();
// Assumes being called at the end of compilation. Update the SQM state.
void RecordStateAtEndOfCompilation();
public:
#if FUNC_INFO_LOGGING
static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the
// filename to write it to.
static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to.
#endif // FUNC_INFO_LOGGING
Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers.
#if MEASURE_NOWAY
void RecordNowayAssert(const char* filename, unsigned line, const char* condStr);
#endif // MEASURE_NOWAY
#ifndef FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway();
#else // FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway(const char* filename, unsigned line);
// Telemetry instance to use per method compilation.
JitTelemetry compJitTelemetry;
// Get common parameters that have to be logged with most telemetry data.
void compGetTelemetryDefaults(const char** assemblyName,
const char** scopeName,
const char** methodName,
unsigned* methodHash);
#endif // !FEATURE_TRACELOGGING
#ifdef DEBUG
private:
NodeToTestDataMap* m_nodeTestData;
static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000;
unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we
// label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS.
// Current kept in this.
public:
NodeToTestDataMap* GetNodeTestData()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_nodeTestData == nullptr)
{
compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly());
}
return compRoot->m_nodeTestData;
}
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap;
// Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and
// currently occur in the AST graph.
NodeToIntMap* FindReachableNodesInNodeTestData();
// Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated
// test data, associate that data with "to".
void TransferTestDataToNode(GenTree* from, GenTree* to);
// These are the methods that test that the various conditions implied by the
// test attributes are satisfied.
void JitTestCheckSSA(); // SSA builder tests.
void JitTestCheckVN(); // Value numbering tests.
#endif // DEBUG
// The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for
// operations.
FieldSeqStore* m_fieldSeqStore;
FieldSeqStore* GetFieldSeqStore()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_fieldSeqStore == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_FieldSeqStore));
compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc);
}
return compRoot->m_fieldSeqStore;
}
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap;
// Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since
// the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant
// that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to
// attach the field sequence directly to the address node.
NodeToFieldSeqMap* m_zeroOffsetFieldMap;
NodeToFieldSeqMap* GetZeroOffsetFieldMap()
{
// Don't need to worry about inlining here
if (m_zeroOffsetFieldMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for
// allocation.
CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap));
m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
}
return m_zeroOffsetFieldMap;
}
// Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in
// "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on
// "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has
// a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const
// has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we
// record the the field sequence using the ZeroOffsetFieldMap described above.
//
// One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR.
// This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in
// CoreRT. Such case is handled same as the default case.
void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq);
typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap;
NodeToArrayInfoMap* m_arrayInfoMap;
NodeToArrayInfoMap* GetArrayInfoMap()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_arrayInfoMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap));
compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc);
}
return compRoot->m_arrayInfoMap;
}
//-----------------------------------------------------------------------------------------------------------------
// Compiler::TryGetArrayInfo:
// Given an indirection node, checks to see whether or not that indirection represents an array access, and
// if so returns information about the array.
//
// Arguments:
// indir - The `GT_IND` node.
// arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise.
//
// Returns:
// True if the `GT_IND` node represents an array access; false otherwise.
bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo)
{
if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
return false;
}
if (indir->gtOp1->OperIs(GT_INDEX_ADDR))
{
GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr();
*arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset,
indexAddr->gtStructElemClass);
return true;
}
bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo);
assert(found);
return true;
}
NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount];
// In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory
// states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory
// state, all the possible memory states are possible initial states of the corresponding catch block(s).)
NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind)
{
if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates)
{
// Use the same map for GCHeap and ByrefExposed when their states match.
memoryKind = ByrefExposed;
}
assert(memoryKind < MemoryKindCount);
Compiler* compRoot = impInlineRoot();
if (compRoot->m_memorySsaMap[memoryKind] == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap));
compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc);
}
return compRoot->m_memorySsaMap[memoryKind];
}
// The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields.
CORINFO_CLASS_HANDLE m_refAnyClass;
CORINFO_FIELD_HANDLE GetRefanyDataField()
{
if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
return info.compCompHnd->getFieldInClass(m_refAnyClass, 0);
}
CORINFO_FIELD_HANDLE GetRefanyTypeField()
{
if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
return info.compCompHnd->getFieldInClass(m_refAnyClass, 1);
}
#if VARSET_COUNTOPS
static BitSetSupport::BitSetOpCounter m_varsetOpCounter;
#endif
#if ALLVARSET_COUNTOPS
static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter;
#endif
static HelperCallProperties s_helperCallProperties;
#ifdef UNIX_AMD64_ABI
static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size);
static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
unsigned slotNum);
static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
var_types* type0,
var_types* type1,
unsigned __int8* offset0,
unsigned __int8* offset1);
void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd,
var_types* type0,
var_types* type1,
unsigned __int8* offset0,
unsigned __int8* offset1);
#endif // defined(UNIX_AMD64_ABI)
void fgMorphMultiregStructArgs(GenTreeCall* call);
GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr);
bool killGCRefs(GenTree* tree);
}; // end of class Compiler
//---------------------------------------------------------------------------------------------------------------------
// GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern.
//
// This class implements a configurable walker for IR trees. There are five configuration options (defaults values are
// shown in parentheses):
//
// - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit
// of a misnomer, as the first entry will always be the current node.
//
// - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an
// argument before visiting the node's operands.
//
// - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an
// argument after visiting the node's operands.
//
// - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes.
// `DoPreOrder` must be true if this option is true.
//
// - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a
// binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be
// visited before the first).
//
// At least one of `DoPreOrder` and `DoPostOrder` must be specified.
//
// A simple pre-order visitor might look something like the following:
//
// class CountingVisitor final : public GenTreeVisitor<CountingVisitor>
// {
// public:
// enum
// {
// DoPreOrder = true
// };
//
// unsigned m_count;
//
// CountingVisitor(Compiler* compiler)
// : GenTreeVisitor<CountingVisitor>(compiler), m_count(0)
// {
// }
//
// Compiler::fgWalkResult PreOrderVisit(GenTree* node)
// {
// m_count++;
// }
// };
//
// This visitor would then be used like so:
//
// CountingVisitor countingVisitor(compiler);
// countingVisitor.WalkTree(root);
//
template <typename TVisitor>
class GenTreeVisitor
{
protected:
typedef Compiler::fgWalkResult fgWalkResult;
enum
{
ComputeStack = false,
DoPreOrder = false,
DoPostOrder = false,
DoLclVarsOnly = false,
UseExecutionOrder = false,
};
Compiler* m_compiler;
ArrayStack<GenTree*> m_ancestors;
GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack))
{
assert(compiler != nullptr);
static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder);
static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder);
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
return fgWalkResult::WALK_CONTINUE;
}
fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
return fgWalkResult::WALK_CONTINUE;
}
public:
fgWalkResult WalkTree(GenTree** use, GenTree* user)
{
assert(use != nullptr);
GenTree* node = *use;
if (TVisitor::ComputeStack)
{
m_ancestors.Push(node);
}
fgWalkResult result = fgWalkResult::WALK_CONTINUE;
if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
node = *use;
if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES))
{
goto DONE;
}
}
switch (node->OperGet())
{
// Leaf lclVars
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
if (TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
FALLTHROUGH;
// Leaf nodes
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
break;
// Lclvar unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
if (TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
FALLTHROUGH;
// Standard unary operators
case GT_NOT:
case GT_NEG:
case GT_BSWAP:
case GT_BSWAP16:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
case GT_RUNTIMELOOKUP:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
{
GenTreeUnOp* const unOp = node->AsUnOp();
if (unOp->gtOp1 != nullptr)
{
result = WalkTree(&unOp->gtOp1, unOp);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& use : node->AsPhi()->Uses())
{
result = WalkTree(&use.NodeRef(), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses())
{
result = WalkTree(&use.NodeRef(), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg();
result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&cmpXchg->gtOpValue, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = node->AsArrElem();
result = WalkTree(&arrElem->gtArrObj, arrElem);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
const unsigned rank = arrElem->gtArrRank;
for (unsigned dim = 0; dim < rank; dim++)
{
result = WalkTree(&arrElem->gtArrInds[dim], arrElem);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = node->AsArrOffs();
result = WalkTree(&arrOffs->gtOffset, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&arrOffs->gtIndex, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&arrOffs->gtArrObj, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk();
GenTree** op1Use = &dynBlock->gtOp1;
GenTree** op2Use = &dynBlock->gtOp2;
GenTree** op3Use = &dynBlock->gtDynamicSize;
result = WalkTree(op1Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(op2Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(op3Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_CALL:
{
GenTreeCall* const call = node->AsCall();
if (call->gtCallThisArg != nullptr)
{
result = WalkTree(&call->gtCallThisArg->NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
for (GenTreeCall::Use& use : call->Args())
{
result = WalkTree(&use.NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
for (GenTreeCall::Use& use : call->LateArgs())
{
result = WalkTree(&use.NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (call->gtCallType == CT_INDIRECT)
{
if (call->gtCallCookie != nullptr)
{
result = WalkTree(&call->gtCallCookie, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
result = WalkTree(&call->gtCallAddr, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (call->gtControlExpr != nullptr)
{
result = WalkTree(&call->gtControlExpr, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
if (TVisitor::UseExecutionOrder && node->IsReverseOp())
{
assert(node->AsMultiOp()->GetOperandCount() == 2);
result = WalkTree(&node->AsMultiOp()->Op(2), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&node->AsMultiOp()->Op(1), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
else
{
for (GenTree** use : node->AsMultiOp()->UseEdges())
{
result = WalkTree(use, node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Binary nodes
default:
{
assert(node->OperIsBinary());
GenTreeOp* const op = node->AsOp();
GenTree** op1Use = &op->gtOp1;
GenTree** op2Use = &op->gtOp2;
if (TVisitor::UseExecutionOrder && node->IsReverseOp())
{
std::swap(op1Use, op2Use);
}
if (*op1Use != nullptr)
{
result = WalkTree(op1Use, op);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (*op2Use != nullptr)
{
result = WalkTree(op2Use, op);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
}
DONE:
// Finally, visit the current node
if (TVisitor::DoPostOrder)
{
result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user);
}
if (TVisitor::ComputeStack)
{
m_ancestors.Pop();
}
return result;
}
};
template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder>
class GenericTreeWalker final
: public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>
{
public:
enum
{
ComputeStack = computeStack,
DoPreOrder = doPreOrder,
DoPostOrder = doPostOrder,
DoLclVarsOnly = doLclVarsOnly,
UseExecutionOrder = useExecutionOrder,
};
private:
Compiler::fgWalkData* m_walkData;
public:
GenericTreeWalker(Compiler::fgWalkData* walkData)
: GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>(
walkData->compiler)
, m_walkData(walkData)
{
assert(walkData != nullptr);
if (computeStack)
{
walkData->parentStack = &this->m_ancestors;
}
}
Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
m_walkData->parent = user;
return m_walkData->wtprVisitorFn(use, m_walkData);
}
Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
m_walkData->parent = user;
return m_walkData->wtpoVisitorFn(use, m_walkData);
}
};
// A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor.
template <typename TVisitor>
class DomTreeVisitor
{
protected:
Compiler* const m_compiler;
DomTreeNode* const m_domTree;
DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree)
{
}
void Begin()
{
}
void PreOrderVisit(BasicBlock* block)
{
}
void PostOrderVisit(BasicBlock* block)
{
}
void End()
{
}
public:
//------------------------------------------------------------------------
// WalkTree: Walk the dominator tree, starting from fgFirstBB.
//
// Notes:
// This performs a non-recursive, non-allocating walk of the tree by using
// DomTreeNode's firstChild and nextSibling links to locate the children of
// a node and BasicBlock's bbIDom parent link to go back up the tree when
// no more children are left.
//
// Forests are also supported, provided that all the roots are chained via
// DomTreeNode::nextSibling to fgFirstBB.
//
void WalkTree()
{
static_cast<TVisitor*>(this)->Begin();
for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next)
{
static_cast<TVisitor*>(this)->PreOrderVisit(block);
next = m_domTree[block->bbNum].firstChild;
if (next != nullptr)
{
assert(next->bbIDom == block);
continue;
}
do
{
static_cast<TVisitor*>(this)->PostOrderVisit(block);
next = m_domTree[block->bbNum].nextSibling;
if (next != nullptr)
{
assert(next->bbIDom == block->bbIDom);
break;
}
block = block->bbIDom;
} while (block != nullptr);
}
static_cast<TVisitor*>(this)->End();
}
};
// EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.:
// for (EHblkDsc* const ehDsc : EHClauses(compiler))
//
class EHClauses
{
EHblkDsc* m_begin;
EHblkDsc* m_end;
// Forward iterator for the exception handling table entries. Iteration is in table order.
//
class iterator
{
EHblkDsc* m_ehDsc;
public:
iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc)
{
}
EHblkDsc* operator*() const
{
return m_ehDsc;
}
iterator& operator++()
{
++m_ehDsc;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_ehDsc != i.m_ehDsc;
}
};
public:
EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount)
{
assert((m_begin != nullptr) || (m_begin == m_end));
}
iterator begin() const
{
return iterator(m_begin);
}
iterator end() const
{
return iterator(m_end);
}
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Miscellaneous Compiler stuff XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// Values used to mark the types a stack slot is used for
const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int
const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long
const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float
const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float
const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer
const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer
const unsigned TYPE_REF_STC = 0x40; // slot used as a struct
const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type
// const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken
/*****************************************************************************
*
* Variables to keep track of total code amounts.
*/
#if DISPLAY_SIZES
extern size_t grossVMsize;
extern size_t grossNCsize;
extern size_t totalNCsize;
extern unsigned genMethodICnt;
extern unsigned genMethodNCnt;
extern size_t gcHeaderISize;
extern size_t gcPtrMapISize;
extern size_t gcHeaderNSize;
extern size_t gcPtrMapNSize;
#endif // DISPLAY_SIZES
/*****************************************************************************
*
* Variables to keep track of basic block counts (more data on 1 BB methods)
*/
#if COUNT_BASIC_BLOCKS
extern Histogram bbCntTable;
extern Histogram bbOneBBSizeTable;
#endif
/*****************************************************************************
*
* Used by optFindNaturalLoops to gather statistical information such as
* - total number of natural loops
* - number of loops with 1, 2, ... exit conditions
* - number of loops that have an iterator (for like)
* - number of loops that have a constant iterator
*/
#if COUNT_LOOPS
extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops
extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has
extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent
extern unsigned totalLoopCount; // counts the total number of natural loops
extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops
extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent
extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like)
extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter <
// const)
extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like)
extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops
extern unsigned loopsThisMethod; // counts the number of loops in the current method
extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method.
extern Histogram loopCountTable; // Histogram of loop counts
extern Histogram loopExitCountTable; // Histogram of loop exit counts
#endif // COUNT_LOOPS
/*****************************************************************************
* variables to keep track of how many iterations we go in a dataflow pass
*/
#if DATAFLOW_ITER
extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow
extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow
#endif // DATAFLOW_ITER
#if MEASURE_BLOCK_SIZE
extern size_t genFlowNodeSize;
extern size_t genFlowNodeCnt;
#endif // MEASURE_BLOCK_SIZE
#if MEASURE_NODE_SIZE
struct NodeSizeStats
{
void Init()
{
genTreeNodeCnt = 0;
genTreeNodeSize = 0;
genTreeNodeActualSize = 0;
}
// Count of tree nodes allocated.
unsigned __int64 genTreeNodeCnt;
// The size we allocate.
unsigned __int64 genTreeNodeSize;
// The actual size of the node. Note that the actual size will likely be smaller
// than the allocated size, but we sometimes use SetOper()/ChangeOper() to change
// a smaller node to a larger one. TODO-Cleanup: add stats on
// SetOper()/ChangeOper() usage to quantify this.
unsigned __int64 genTreeNodeActualSize;
};
extern NodeSizeStats genNodeSizeStats; // Total node size stats
extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats
extern Histogram genTreeNcntHist;
extern Histogram genTreeNsizHist;
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
* Count fatal errors (including noway_asserts).
*/
#if MEASURE_FATAL
extern unsigned fatal_badCode;
extern unsigned fatal_noWay;
extern unsigned fatal_implLimitation;
extern unsigned fatal_NOMEM;
extern unsigned fatal_noWayAssertBody;
#ifdef DEBUG
extern unsigned fatal_noWayAssertBodyArgs;
#endif // DEBUG
extern unsigned fatal_NYI;
#endif // MEASURE_FATAL
/*****************************************************************************
* Codegen
*/
#ifdef TARGET_XARCH
const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl;
const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr;
const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar;
const instruction INS_AND = INS_and;
const instruction INS_OR = INS_or;
const instruction INS_XOR = INS_xor;
const instruction INS_NEG = INS_neg;
const instruction INS_TEST = INS_test;
const instruction INS_MUL = INS_imul;
const instruction INS_SIGNED_DIVIDE = INS_idiv;
const instruction INS_UNSIGNED_DIVIDE = INS_div;
const instruction INS_BREAKPOINT = INS_int3;
const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbb;
const instruction INS_NOT = INS_not;
#endif // TARGET_XARCH
#ifdef TARGET_ARM
const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl;
const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr;
const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr;
const instruction INS_AND = INS_and;
const instruction INS_OR = INS_orr;
const instruction INS_XOR = INS_eor;
const instruction INS_NEG = INS_rsb;
const instruction INS_TEST = INS_tst;
const instruction INS_MUL = INS_mul;
const instruction INS_MULADD = INS_mla;
const instruction INS_SIGNED_DIVIDE = INS_sdiv;
const instruction INS_UNSIGNED_DIVIDE = INS_udiv;
const instruction INS_BREAKPOINT = INS_bkpt;
const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbc;
const instruction INS_NOT = INS_mvn;
const instruction INS_ABS = INS_vabs;
const instruction INS_SQRT = INS_vsqrt;
#endif // TARGET_ARM
#ifdef TARGET_ARM64
const instruction INS_MULADD = INS_madd;
inline const instruction INS_BREAKPOINT_osHelper()
{
// GDB needs the encoding of brk #0
// Windbg needs the encoding of brk #F000
return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows;
}
#define INS_BREAKPOINT INS_BREAKPOINT_osHelper()
const instruction INS_ABS = INS_fabs;
const instruction INS_SQRT = INS_fsqrt;
#endif // TARGET_ARM64
/*****************************************************************************/
extern const BYTE genTypeSizes[];
extern const BYTE genTypeAlignments[];
extern const BYTE genTypeStSzs[];
extern const BYTE genActualTypes[];
/*****************************************************************************/
#ifdef DEBUG
void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars);
#endif // DEBUG
#include "compiler.hpp" // All the shared inline functions
/*****************************************************************************/
#endif //_COMPILER_H_
/*****************************************************************************/
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Compiler XX
XX XX
XX Represents the method data we are currently JIT-compiling. XX
XX An instance of this class is created for every method we JIT. XX
XX This contains all the info needed for the method. So allocating a XX
XX a new instance per method makes it thread-safe. XX
XX It should be used to do all the memory management for the compiler run. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _COMPILER_H_
#define _COMPILER_H_
/*****************************************************************************/
#include "jit.h"
#include "opcode.h"
#include "varset.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "gentree.h"
#include "debuginfo.h"
#include "lir.h"
#include "block.h"
#include "inline.h"
#include "jiteh.h"
#include "instr.h"
#include "regalloc.h"
#include "sm.h"
#include "cycletimer.h"
#include "blockset.h"
#include "arraystack.h"
#include "hashbv.h"
#include "jitexpandarray.h"
#include "tinyarray.h"
#include "valuenum.h"
#include "jittelemetry.h"
#include "namedintrinsiclist.h"
#ifdef LATE_DISASM
#include "disasm.h"
#endif
#include "codegeninterface.h"
#include "regset.h"
#include "jitgcinfo.h"
#if DUMP_GC_TABLES && defined(JIT32_GCENCODER)
#include "gcdump.h"
#endif
#include "emit.h"
#include "hwintrinsic.h"
#include "simd.h"
#include "simdashwintrinsic.h"
// This is only used locally in the JIT to indicate that
// a verification block should be inserted
#define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER
/*****************************************************************************
* Forward declarations
*/
struct InfoHdr; // defined in GCInfo.h
struct escapeMapping_t; // defined in fgdiagnostic.cpp
class emitter; // defined in emit.h
struct ShadowParamVarInfo; // defined in GSChecks.cpp
struct InitVarDscInfo; // defined in register_arg_convention.h
class FgStack; // defined in fgbasic.cpp
class Instrumentor; // defined in fgprofile.cpp
class SpanningTreeVisitor; // defined in fgprofile.cpp
class CSE_DataFlow; // defined in OptCSE.cpp
class OptBoolsDsc; // defined in optimizer.cpp
#ifdef DEBUG
struct IndentStack;
#endif
class Lowering; // defined in lower.h
// The following are defined in this file, Compiler.h
class Compiler;
/*****************************************************************************
* Unwind info
*/
#include "unwind.h"
/*****************************************************************************/
//
// Declare global operator new overloads that use the compiler's arena allocator
//
// I wanted to make the second argument optional, with default = CMK_Unknown, but that
// caused these to be ambiguous with the global placement new operators.
void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk);
void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk);
void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference);
// Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions.
#include "loopcloning.h"
/*****************************************************************************/
/* This is included here and not earlier as it needs the definition of "CSE"
* which is defined in the section above */
/*****************************************************************************/
unsigned genLog2(unsigned value);
unsigned genLog2(unsigned __int64 value);
unsigned ReinterpretHexAsDecimal(unsigned in);
/*****************************************************************************/
const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC);
#ifdef DEBUG
const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs
#endif
//------------------------------------------------------------------------
// HFA info shared by LclVarDsc and fgArgTabEntry
//------------------------------------------------------------------------
inline bool IsHfa(CorInfoHFAElemType kind)
{
return kind != CORINFO_HFA_ELEM_NONE;
}
inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind)
{
switch (kind)
{
case CORINFO_HFA_ELEM_FLOAT:
return TYP_FLOAT;
case CORINFO_HFA_ELEM_DOUBLE:
return TYP_DOUBLE;
#ifdef FEATURE_SIMD
case CORINFO_HFA_ELEM_VECTOR64:
return TYP_SIMD8;
case CORINFO_HFA_ELEM_VECTOR128:
return TYP_SIMD16;
#endif
case CORINFO_HFA_ELEM_NONE:
return TYP_UNDEF;
default:
assert(!"Invalid HfaElemKind");
return TYP_UNDEF;
}
}
inline CorInfoHFAElemType HfaElemKindFromType(var_types type)
{
switch (type)
{
case TYP_FLOAT:
return CORINFO_HFA_ELEM_FLOAT;
case TYP_DOUBLE:
return CORINFO_HFA_ELEM_DOUBLE;
#ifdef FEATURE_SIMD
case TYP_SIMD8:
return CORINFO_HFA_ELEM_VECTOR64;
case TYP_SIMD16:
return CORINFO_HFA_ELEM_VECTOR128;
#endif
case TYP_UNDEF:
return CORINFO_HFA_ELEM_NONE;
default:
assert(!"Invalid HFA Type");
return CORINFO_HFA_ELEM_NONE;
}
}
// The following holds the Local var info (scope information)
typedef const char* VarName; // Actual ASCII string
struct VarScopeDsc
{
unsigned vsdVarNum; // (remapped) LclVarDsc number
unsigned vsdLVnum; // 'which' in eeGetLVinfo().
// Also, it is the index of this entry in the info.compVarScopes array,
// which is useful since the array is also accessed via the
// compEnterScopeList and compExitScopeList sorted arrays.
IL_OFFSET vsdLifeBeg; // instr offset of beg of life
IL_OFFSET vsdLifeEnd; // instr offset of end of life
#ifdef DEBUG
VarName vsdName; // name of the var
#endif
};
// This class stores information associated with a LclVar SSA definition.
class LclSsaVarDsc
{
// The basic block where the definition occurs. Definitions of uninitialized variables
// are considered to occur at the start of the first basic block (fgFirstBB).
//
// TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by
// SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to
// investigate and perhaps eliminate this rather unexpected behavior.
BasicBlock* m_block;
// The GT_ASG node that generates the definition, or nullptr for definitions
// of uninitialized variables.
GenTreeOp* m_asg;
public:
LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr)
{
}
LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg)
{
assert((asg == nullptr) || asg->OperIs(GT_ASG));
}
BasicBlock* GetBlock() const
{
return m_block;
}
void SetBlock(BasicBlock* block)
{
m_block = block;
}
GenTreeOp* GetAssignment() const
{
return m_asg;
}
void SetAssignment(GenTreeOp* asg)
{
assert((asg == nullptr) || asg->OperIs(GT_ASG));
m_asg = asg;
}
ValueNumPair m_vnPair;
};
// This class stores information associated with a memory SSA definition.
class SsaMemDef
{
public:
ValueNumPair m_vnPair;
};
//------------------------------------------------------------------------
// SsaDefArray: A resizable array of SSA definitions.
//
// Unlike an ordinary resizable array implementation, this allows only element
// addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM
// (basically it's a 1-based array). The array doesn't impose any particular
// requirements on the elements it stores and AllocSsaNum forwards its arguments
// to the array element constructor, this way the array supports both LclSsaVarDsc
// and SsaMemDef elements.
//
template <typename T>
class SsaDefArray
{
T* m_array;
unsigned m_arraySize;
unsigned m_count;
static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0);
static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1);
// Get the minimum valid SSA number.
unsigned GetMinSsaNum() const
{
return SsaConfig::FIRST_SSA_NUM;
}
// Increase (double) the size of the array.
void GrowArray(CompAllocator alloc)
{
unsigned oldSize = m_arraySize;
unsigned newSize = max(2, oldSize * 2);
T* newArray = alloc.allocate<T>(newSize);
for (unsigned i = 0; i < oldSize; i++)
{
newArray[i] = m_array[i];
}
m_array = newArray;
m_arraySize = newSize;
}
public:
// Construct an empty SsaDefArray.
SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0)
{
}
// Reset the array (used only if the SSA form is reconstructed).
void Reset()
{
m_count = 0;
}
// Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM).
template <class... Args>
unsigned AllocSsaNum(CompAllocator alloc, Args&&... args)
{
if (m_count == m_arraySize)
{
GrowArray(alloc);
}
unsigned ssaNum = GetMinSsaNum() + m_count;
m_array[m_count++] = T(std::forward<Args>(args)...);
// Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM
assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1));
return ssaNum;
}
// Get the number of SSA definitions in the array.
unsigned GetCount() const
{
return m_count;
}
// Get a pointer to the SSA definition at the specified index.
T* GetSsaDefByIndex(unsigned index)
{
assert(index < m_count);
return &m_array[index];
}
// Check if the specified SSA number is valid.
bool IsValidSsaNum(unsigned ssaNum) const
{
return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count));
}
// Get a pointer to the SSA definition associated with the specified SSA number.
T* GetSsaDef(unsigned ssaNum)
{
assert(ssaNum != SsaConfig::RESERVED_SSA_NUM);
return GetSsaDefByIndex(ssaNum - GetMinSsaNum());
}
// Get an SSA number associated with the specified SSA def (that must be in this array).
unsigned GetSsaNum(T* ssaDef)
{
assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count]));
return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]);
}
};
enum RefCountState
{
RCS_INVALID, // not valid to get/set ref counts
RCS_EARLY, // early counts for struct promotion and struct passing
RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward)
};
#ifdef DEBUG
// Reasons why we can't enregister a local.
enum class DoNotEnregisterReason
{
None,
AddrExposed, // the address of this local is exposed.
DontEnregStructs, // struct enregistration is disabled.
NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big.
LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals.
VMNeedsStackAddr,
LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def.
BlockOp, // Is read or written via a block operation.
IsStructArg, // Is a struct passed as an argument in a way that requires a stack location.
DepField, // It is a field of a dependently promoted struct
NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set
MinOptsGC, // It is a GC Ref and we are compiling MinOpts
#if !defined(TARGET_64BIT)
LongParamField, // It is a decomposed field of a long parameter.
#endif
#ifdef JIT32_GCENCODER
PinningRef,
#endif
LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD.
CastTakesAddr,
StoreBlkSrc, // the local is used as STORE_BLK source.
OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister.
SwizzleArg, // the local is passed using LCL_FLD as another type.
BlockOpRet, // the struct is returned and it promoted or there is a cast.
ReturnSpCheck, // the local is used to do SP check
SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted
};
enum class AddressExposedReason
{
NONE,
PARENT_EXPOSED, // This is a promoted field but the parent is exposed.
TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places.
ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument.
WIDE_INDIR, // We access via indirection with wider type.
OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it.
STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed.
COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed.
DISPATCH_RET_BUF // Caller return buffer dispatch.
};
#endif // DEBUG
class LclVarDsc
{
public:
// The constructor. Most things can just be zero'ed.
//
// Initialize the ArgRegs to REG_STK.
// Morph will update if this local is passed in a register.
LclVarDsc()
: _lvArgReg(REG_STK)
,
#if FEATURE_MULTIREG_ARGS
_lvOtherArgReg(REG_STK)
,
#endif // FEATURE_MULTIREG_ARGS
lvClassHnd(NO_CLASS_HANDLE)
, lvRefBlks(BlockSetOps::UninitVal())
, lvPerSsaData()
{
}
// note this only packs because var_types is a typedef of unsigned char
var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF
unsigned char lvIsParam : 1; // is this a parameter?
unsigned char lvIsRegArg : 1; // is this an argument that was passed by register?
unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP)
unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame
unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the
// variable is in the same register for the entire function.
unsigned char lvTracked : 1; // is this a tracked variable?
bool lvTrackedNonStruct()
{
return lvTracked && lvType != TYP_STRUCT;
}
unsigned char lvPinned : 1; // is this a pinned variable?
unsigned char lvMustInit : 1; // must be initialized
private:
bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a
// global location, etc.
// We cannot reason reliably about the value of the variable.
public:
unsigned char lvDoNotEnregister : 1; // Do not enregister this variable.
unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects
// struct promotion.
unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must
// be on the stack (at least at those boundaries.)
unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder)
unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable.
unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local.
unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local
// stack frame.
unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local
unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local
unsigned char lvIsTemp : 1; // Short-lifetime compiler temp
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref.
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
unsigned char lvIsBoolean : 1; // set if variable is boolean
unsigned char lvSingleDef : 1; // variable has a single def
// before lvaMarkLocalVars: identifies ref type locals that can get type updates
// after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies
unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate
// Currently, this is only used to decide if an EH variable can be
// a register candiate or not.
unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register
// candidancy
unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan)
// and is spilled making it candidate to spill right after the
// first (and only) definition.
// Note: We cannot reuse lvSingleDefRegCandidate because it is set
// in earlier phase and the information might not be appropriate
// in LSRA.
unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization
unsigned char lvVolatileHint : 1; // hint for AssertionProp
#ifndef TARGET_64BIT
unsigned char lvStructDoubleAlign : 1; // Must we double align this struct?
#endif // !TARGET_64BIT
#ifdef TARGET_64BIT
unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long
#endif
#ifdef DEBUG
unsigned char lvKeepType : 1; // Don't change the type of this variable
unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one
#endif
unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security
// checks)
unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks?
unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a
// 32-bit target. For implicit byref parameters, this gets hijacked between
// fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether
// references to the arg are being rewritten as references to a promoted shadow local.
unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local?
unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields
unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes
unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout"
unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context
unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call
#ifdef FEATURE_HFA_FIELDS_PRESENT
CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
#endif // FEATURE_HFA_FIELDS_PRESENT
#ifdef DEBUG
// TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct
// types, and is needed because of cases where TYP_STRUCT is bashed to an integral type.
// Consider cleaning this up so this workaround is not required.
unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals.
// I.e. there is no longer any reference to the struct directly.
// In this case we can simply remove this struct local.
unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no
// reference to the fields of this struct.
#endif
unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes
#ifdef FEATURE_SIMD
// Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD*.
unsigned char lvSIMDType : 1; // This is a SIMD struct
unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic
unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)lvSimdBaseJitType;
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
assert(simdBaseJitType < (1 << 5));
lvSimdBaseJitType = (unsigned char)simdBaseJitType;
}
var_types GetSimdBaseType() const;
#endif // FEATURE_SIMD
unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct.
unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type
#ifdef DEBUG
unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness
#endif
unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc,
// eh)
unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop
unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in
// the prolog. If the local has gc pointers, there are no gc-safe points
// between the prolog and the explicit initialization.
union {
unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct
// local. For implicit byref parameters, this gets hijacked between
// fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the
// struct local created to model the parameter's struct promotion, if any.
unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local).
// Valid on promoted struct local fields.
};
unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc.
unsigned char lvFldOffset;
unsigned char lvFldOrdinal;
#ifdef DEBUG
unsigned char lvSingleDefDisqualifyReason = 'H';
#endif
#if FEATURE_MULTIREG_ARGS
regNumber lvRegNumForSlot(unsigned slotNum)
{
if (slotNum == 0)
{
return (regNumber)_lvArgReg;
}
else if (slotNum == 1)
{
return GetOtherArgReg();
}
else
{
assert(false && "Invalid slotNum!");
}
unreached();
}
#endif // FEATURE_MULTIREG_ARGS
CorInfoHFAElemType GetLvHfaElemKind() const
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
return _lvHfaElemKind;
#else
NOWAY_MSG("GetLvHfaElemKind");
return CORINFO_HFA_ELEM_NONE;
#endif // FEATURE_HFA_FIELDS_PRESENT
}
void SetLvHfaElemKind(CorInfoHFAElemType elemKind)
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
_lvHfaElemKind = elemKind;
#else
NOWAY_MSG("SetLvHfaElemKind");
#endif // FEATURE_HFA_FIELDS_PRESENT
}
bool lvIsHfa() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetLvHfaElemKind());
}
else
{
return false;
}
}
bool lvIsHfaRegArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return lvIsRegArg && lvIsHfa();
}
else
{
return false;
}
}
//------------------------------------------------------------------------------
// lvHfaSlots: Get the number of slots used by an HFA local
//
// Return Value:
// On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA
// On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8
//
unsigned lvHfaSlots() const
{
assert(lvIsHfa());
assert(varTypeIsStruct(lvType));
unsigned slots = 0;
#ifdef TARGET_ARM
slots = lvExactSize / sizeof(float);
assert(slots <= 8);
#elif defined(TARGET_ARM64)
switch (GetLvHfaElemKind())
{
case CORINFO_HFA_ELEM_NONE:
assert(!"lvHfaSlots called for non-HFA");
break;
case CORINFO_HFA_ELEM_FLOAT:
assert((lvExactSize % 4) == 0);
slots = lvExactSize >> 2;
break;
case CORINFO_HFA_ELEM_DOUBLE:
case CORINFO_HFA_ELEM_VECTOR64:
assert((lvExactSize % 8) == 0);
slots = lvExactSize >> 3;
break;
case CORINFO_HFA_ELEM_VECTOR128:
assert((lvExactSize % 16) == 0);
slots = lvExactSize >> 4;
break;
default:
unreached();
}
assert(slots <= 4);
#endif // TARGET_ARM64
return slots;
}
// lvIsMultiRegArgOrRet()
// returns true if this is a multireg LclVar struct used in an argument context
// or if this is a multireg LclVar struct assigned from a multireg call
bool lvIsMultiRegArgOrRet()
{
return lvIsMultiRegArg || lvIsMultiRegRet;
}
#if defined(DEBUG)
private:
DoNotEnregisterReason m_doNotEnregReason;
AddressExposedReason m_addrExposedReason;
public:
void SetDoNotEnregReason(DoNotEnregisterReason reason)
{
m_doNotEnregReason = reason;
}
DoNotEnregisterReason GetDoNotEnregReason() const
{
return m_doNotEnregReason;
}
AddressExposedReason GetAddrExposedReason() const
{
return m_addrExposedReason;
}
#endif // DEBUG
public:
void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason))
{
m_addrExposed = value;
INDEBUG(m_addrExposedReason = reason);
}
void CleanAddressExposed()
{
m_addrExposed = false;
}
bool IsAddressExposed() const
{
return m_addrExposed;
}
private:
regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a
// register pair). It is set during codegen any time the
// variable is enregistered (lvRegister is only set
// to non-zero if the variable gets the same register assignment for its entire
// lifetime).
#if !defined(TARGET_64BIT)
regNumberSmall _lvOtherReg; // Used for "upper half" of long var.
#endif // !defined(TARGET_64BIT)
regNumberSmall _lvArgReg; // The (first) register in which this argument is passed.
#if FEATURE_MULTIREG_ARGS
regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register.
// Note this is defined but not used by ARM32
#endif // FEATURE_MULTIREG_ARGS
regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
/////////////////////
regNumber GetRegNum() const
{
return (regNumber)_lvRegNum;
}
void SetRegNum(regNumber reg)
{
_lvRegNum = (regNumberSmall)reg;
assert(_lvRegNum == reg);
}
/////////////////////
#if defined(TARGET_64BIT)
regNumber GetOtherReg() const
{
assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
// "unreachable code" warnings
return REG_NA;
}
void SetOtherReg(regNumber reg)
{
assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
// "unreachable code" warnings
}
#else // !TARGET_64BIT
regNumber GetOtherReg() const
{
return (regNumber)_lvOtherReg;
}
void SetOtherReg(regNumber reg)
{
_lvOtherReg = (regNumberSmall)reg;
assert(_lvOtherReg == reg);
}
#endif // !TARGET_64BIT
/////////////////////
regNumber GetArgReg() const
{
return (regNumber)_lvArgReg;
}
void SetArgReg(regNumber reg)
{
_lvArgReg = (regNumberSmall)reg;
assert(_lvArgReg == reg);
}
#if FEATURE_MULTIREG_ARGS
regNumber GetOtherArgReg() const
{
return (regNumber)_lvOtherArgReg;
}
void SetOtherArgReg(regNumber reg)
{
_lvOtherArgReg = (regNumberSmall)reg;
assert(_lvOtherArgReg == reg);
}
#endif // FEATURE_MULTIREG_ARGS
#ifdef FEATURE_SIMD
// Is this is a SIMD struct?
bool lvIsSIMDType() const
{
return lvSIMDType;
}
// Is this is a SIMD struct which is used for SIMD intrinsic?
bool lvIsUsedInSIMDIntrinsic() const
{
return lvUsedInSIMDIntrinsic;
}
#else
// If feature_simd not enabled, return false
bool lvIsSIMDType() const
{
return false;
}
bool lvIsUsedInSIMDIntrinsic() const
{
return false;
}
#endif
/////////////////////
regNumber GetArgInitReg() const
{
return (regNumber)_lvArgInitReg;
}
void SetArgInitReg(regNumber reg)
{
_lvArgInitReg = (regNumberSmall)reg;
assert(_lvArgInitReg == reg);
}
/////////////////////
bool lvIsRegCandidate() const
{
return lvLRACandidate != 0;
}
bool lvIsInReg() const
{
return lvIsRegCandidate() && (GetRegNum() != REG_STK);
}
regMaskTP lvRegMask() const
{
regMaskTP regMask = RBM_NONE;
if (varTypeUsesFloatReg(TypeGet()))
{
if (GetRegNum() != REG_STK)
{
regMask = genRegMaskFloat(GetRegNum(), TypeGet());
}
}
else
{
if (GetRegNum() != REG_STK)
{
regMask = genRegMask(GetRegNum());
}
}
return regMask;
}
unsigned short lvVarIndex; // variable tracking index
private:
unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference
// parameters, this gets hijacked from fgResetImplicitByRefRefCount
// through fgMarkDemotedImplicitByRefArgs, to provide a static
// appearance count (computed during address-exposed analysis)
// that fgMakeOutgoingStructArgCopy consults during global morph
// to determine if eliding its copy is legal.
weight_t m_lvRefCntWtd; // weighted reference count
public:
unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const;
void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL);
void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL);
weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const;
void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL);
void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL);
private:
int lvStkOffs; // stack offset of home in bytes.
public:
int GetStackOffset() const
{
return lvStkOffs;
}
void SetStackOffset(int offset)
{
lvStkOffs = offset;
}
unsigned lvExactSize; // (exact) size of the type in bytes
// Is this a promoted struct?
// This method returns true only for structs (including SIMD structs), not for
// locals that are split on a 32-bit target.
// It is only necessary to use this:
// 1) if only structs are wanted, and
// 2) if Lowering has already been done.
// Otherwise lvPromoted is valid.
bool lvPromotedStruct()
{
#if !defined(TARGET_64BIT)
return (lvPromoted && !varTypeIsLong(lvType));
#else // defined(TARGET_64BIT)
return lvPromoted;
#endif // defined(TARGET_64BIT)
}
unsigned lvSize() const;
size_t lvArgStackSize() const;
unsigned lvSlotNum; // original slot # (if remapped)
typeInfo lvVerTypeInfo; // type info needed for verification
// class handle for the local or null if not known or not a class,
// for a struct handle use `GetStructHnd()`.
CORINFO_CLASS_HANDLE lvClassHnd;
// Get class handle for a struct local or implicitByRef struct local.
CORINFO_CLASS_HANDLE GetStructHnd() const
{
#ifdef FEATURE_SIMD
if (lvSIMDType && (m_layout == nullptr))
{
return NO_CLASS_HANDLE;
}
#endif
assert(m_layout != nullptr);
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF)));
#else
assert(varTypeIsStruct(TypeGet()));
#endif
CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle();
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields
private:
ClassLayout* m_layout; // layout info for structs
public:
BlockSet lvRefBlks; // Set of blocks that contain refs
Statement* lvDefStmt; // Pointer to the statement with the single definition
void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies
var_types TypeGet() const
{
return (var_types)lvType;
}
bool lvStackAligned() const
{
assert(lvIsStructField);
return ((lvFldOffset % TARGET_POINTER_SIZE) == 0);
}
bool lvNormalizeOnLoad() const
{
return varTypeIsSmall(TypeGet()) &&
// lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
(lvIsParam || m_addrExposed || lvIsStructField);
}
bool lvNormalizeOnStore() const
{
return varTypeIsSmall(TypeGet()) &&
// lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
!(lvIsParam || m_addrExposed || lvIsStructField);
}
void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true);
var_types GetHfaType() const
{
if (GlobalJitOptions::compFeatureHfa)
{
assert(lvIsHfa());
return HfaTypeFromElemKind(GetLvHfaElemKind());
}
else
{
return TYP_UNDEF;
}
}
void SetHfaType(var_types type)
{
if (GlobalJitOptions::compFeatureHfa)
{
CorInfoHFAElemType elemKind = HfaElemKindFromType(type);
SetLvHfaElemKind(elemKind);
// Ensure we've allocated enough bits.
assert(GetLvHfaElemKind() == elemKind);
}
}
// Returns true if this variable contains GC pointers (including being a GC pointer itself).
bool HasGCPtr() const
{
return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr());
}
// Returns the layout of a struct variable.
ClassLayout* GetLayout() const
{
assert(varTypeIsStruct(lvType));
return m_layout;
}
// Sets the layout of a struct variable.
void SetLayout(ClassLayout* layout)
{
assert(varTypeIsStruct(lvType));
assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout));
m_layout = layout;
}
SsaDefArray<LclSsaVarDsc> lvPerSsaData;
// Returns the address of the per-Ssa data for the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
LclSsaVarDsc* GetPerSsaData(unsigned ssaNum)
{
return lvPerSsaData.GetSsaDef(ssaNum);
}
// Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition
// of this variable.
unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef)
{
return lvPerSsaData.GetSsaNum(ssaDef);
}
var_types GetRegisterType(const GenTreeLclVarCommon* tree) const;
var_types GetRegisterType() const;
var_types GetActualRegisterType() const;
bool IsEnregisterableType() const
{
return GetRegisterType() != TYP_UNDEF;
}
bool IsEnregisterableLcl() const
{
if (lvDoNotEnregister)
{
return false;
}
return IsEnregisterableType();
}
//-----------------------------------------------------------------------------
// IsAlwaysAliveInMemory: Determines if this variable's value is always
// up-to-date on stack. This is possible if this is an EH-var or
// we decided to spill after single-def.
//
bool IsAlwaysAliveInMemory() const
{
return lvLiveInOutOfHndlr || lvSpillAtSingleDef;
}
bool CanBeReplacedWithItsField(Compiler* comp) const;
#ifdef DEBUG
public:
const char* lvReason;
void PrintVarReg() const
{
printf("%s", getRegName(GetRegNum()));
}
#endif // DEBUG
}; // class LclVarDsc
enum class SymbolicIntegerValue : int32_t
{
LongMin,
IntMin,
ShortMin,
ByteMin,
Zero,
One,
ByteMax,
UByteMax,
ShortMax,
UShortMax,
IntMax,
UIntMax,
LongMax,
};
inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) > static_cast<int32_t>(right);
}
inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) >= static_cast<int32_t>(right);
}
inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) < static_cast<int32_t>(right);
}
inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) <= static_cast<int32_t>(right);
}
// Represents an integral range useful for reasoning about integral casts.
// It uses a symbolic representation for lower and upper bounds so
// that it can efficiently handle integers of all sizes on all hosts.
//
// Note that the ranges represented by this class are **always** in the
// "signed" domain. This is so that if we know the range a node produces, it
// can be trivially used to determine if a cast above the node does or does not
// overflow, which requires that the interpretation of integers be the same both
// for the "input" and "output". We choose signed interpretation here because it
// produces nice continuous ranges and because IR uses sign-extension for constants.
//
// Some examples of how ranges are computed for casts:
// 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the
// same range - all casts that do not change the representation, i. e. have the same
// "actual" input and output type, have the same "input" and "output" range.
// 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX]
// (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32
// bit integers zero-extended to 64 bits).
// 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0
// when interpreting as signed => the "input" range is [0..INT_MAX], the same range
// being the produced one as the node does not change the width of the integer.
//
class IntegralRange
{
private:
SymbolicIntegerValue m_lowerBound;
SymbolicIntegerValue m_upperBound;
public:
IntegralRange() = default;
IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound)
: m_lowerBound(lowerBound), m_upperBound(upperBound)
{
assert(lowerBound <= upperBound);
}
bool Contains(int64_t value) const;
bool Contains(IntegralRange other) const
{
return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound);
}
bool IsPositive()
{
return m_lowerBound >= SymbolicIntegerValue::Zero;
}
bool Equals(IntegralRange other) const
{
return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound);
}
static int64_t SymbolicToRealValue(SymbolicIntegerValue value);
static SymbolicIntegerValue LowerBoundForType(var_types type);
static SymbolicIntegerValue UpperBoundForType(var_types type);
static IntegralRange ForType(var_types type)
{
return {LowerBoundForType(type), UpperBoundForType(type)};
}
static IntegralRange ForNode(GenTree* node, Compiler* compiler);
static IntegralRange ForCastInput(GenTreeCast* cast);
static IntegralRange ForCastOutput(GenTreeCast* cast);
#ifdef DEBUG
static void Print(IntegralRange range);
#endif // DEBUG
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX TempsInfo XX
XX XX
XX The temporary lclVars allocated by the compiler for code generation XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************
*
* The following keeps track of temporaries allocated in the stack frame
* during code-generation (after register allocation). These spill-temps are
* only used if we run out of registers while evaluating a tree.
*
* These are different from the more common temps allocated by lvaGrabTemp().
*/
class TempDsc
{
public:
TempDsc* tdNext;
private:
int tdOffs;
#ifdef DEBUG
static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG
#endif // DEBUG
int tdNum;
BYTE tdSize;
var_types tdType;
public:
TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType)
{
#ifdef DEBUG
// temps must have a negative number (so they have a different number from all local variables)
assert(tdNum < 0);
tdOffs = BAD_TEMP_OFFSET;
#endif // DEBUG
if (tdNum != _tdNum)
{
IMPL_LIMITATION("too many spill temps");
}
}
#ifdef DEBUG
bool tdLegalOffset() const
{
return tdOffs != BAD_TEMP_OFFSET;
}
#endif // DEBUG
int tdTempOffs() const
{
assert(tdLegalOffset());
return tdOffs;
}
void tdSetTempOffs(int offs)
{
tdOffs = offs;
assert(tdLegalOffset());
}
void tdAdjustTempOffs(int offs)
{
tdOffs += offs;
assert(tdLegalOffset());
}
int tdTempNum() const
{
assert(tdNum < 0);
return tdNum;
}
unsigned tdTempSize() const
{
return tdSize;
}
var_types tdTempType() const
{
return tdType;
}
};
// interface to hide linearscan implementation from rest of compiler
class LinearScanInterface
{
public:
virtual void doLinearScan() = 0;
virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0;
virtual bool willEnregisterLocalVars() const = 0;
#if TRACK_LSRA_STATS
virtual void dumpLsraStatsCsv(FILE* file) = 0;
virtual void dumpLsraStatsSummary(FILE* file) = 0;
#endif // TRACK_LSRA_STATS
};
LinearScanInterface* getLinearScanAllocator(Compiler* comp);
// Information about arrays: their element type and size, and the offset of the first element.
// We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes,
// associate an array info via the map retrieved by GetArrayInfoMap(). This information is used,
// for example, in value numbering of array index expressions.
struct ArrayInfo
{
var_types m_elemType;
CORINFO_CLASS_HANDLE m_elemStructType;
unsigned m_elemSize;
unsigned m_elemOffset;
ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0)
{
}
ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType)
: m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset)
{
}
};
// This enumeration names the phases into which we divide compilation. The phases should completely
// partition a compilation.
enum Phases
{
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm,
#include "compphases.h"
PHASE_NUMBER_OF
};
extern const char* PhaseNames[];
extern const char* PhaseEnums[];
extern const LPCWSTR PhaseShortNames[];
// Specify which checks should be run after each phase
//
enum class PhaseChecks
{
CHECK_NONE,
CHECK_ALL
};
// Specify compiler data that a phase might modify
enum class PhaseStatus : unsigned
{
MODIFIED_NOTHING,
MODIFIED_EVERYTHING
};
// The following enum provides a simple 1:1 mapping to CLR API's
enum API_ICorJitInfo_Names
{
#define DEF_CLR_API(name) API_##name,
#include "ICorJitInfo_API_names.h"
API_COUNT
};
//---------------------------------------------------------------
// Compilation time.
//
// A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods.
// We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles
// of the compilation, as well as the cycles for each phase. We also track the number of bytecodes.
// If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated
// by "m_timerFailure" being true.
// If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile.
struct CompTimeInfo
{
#ifdef FEATURE_JIT_METHOD_PERF
// The string names of the phases.
static const char* PhaseNames[];
static bool PhaseHasChildren[];
static int PhaseParent[];
static bool PhaseReportsIRSize[];
unsigned m_byteCodeBytes;
unsigned __int64 m_totalCycles;
unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF];
#if MEASURE_CLRAPI_CALLS
unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF];
#endif
unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF];
// For better documentation, we call EndPhase on
// non-leaf phases. We should also call EndPhase on the
// last leaf subphase; obviously, the elapsed cycles between the EndPhase
// for the last leaf subphase and the EndPhase for an ancestor should be very small.
// We add all such "redundant end phase" intervals to this variable below; we print
// it out in a report, so we can verify that it is, indeed, very small. If it ever
// isn't, this means that we're doing something significant between the end of the last
// declared subphase and the end of its parent.
unsigned __int64 m_parentPhaseEndSlop;
bool m_timerFailure;
#if MEASURE_CLRAPI_CALLS
// The following measures the time spent inside each individual CLR API call.
unsigned m_allClrAPIcalls;
unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT];
unsigned __int64 m_allClrAPIcycles;
unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT];
unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT];
#endif // MEASURE_CLRAPI_CALLS
CompTimeInfo(unsigned byteCodeBytes);
#endif
};
#ifdef FEATURE_JIT_METHOD_PERF
#if MEASURE_CLRAPI_CALLS
struct WrapICorJitInfo;
#endif
// This class summarizes the JIT time information over the course of a run: the number of methods compiled,
// and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above).
// The operation of adding a single method's timing to the summary may be performed concurrently by several
// threads, so it is protected by a lock.
// This class is intended to be used as a singleton type, with only a single instance.
class CompTimeSummaryInfo
{
// This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one).
static CritSecObject s_compTimeSummaryLock;
int m_numMethods;
int m_totMethods;
CompTimeInfo m_total;
CompTimeInfo m_maximum;
int m_numFilteredMethods;
CompTimeInfo m_filtered;
// This can use what ever data you want to determine if the value to be added
// belongs in the filtered section (it's always included in the unfiltered section)
bool IncludedInFilteredData(CompTimeInfo& info);
public:
// This is the unique CompTimeSummaryInfo object for this instance of the runtime.
static CompTimeSummaryInfo s_compTimeSummary;
CompTimeSummaryInfo()
: m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0)
{
}
// Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary.
// This is thread safe.
void AddInfo(CompTimeInfo& info, bool includePhases);
// Print the summary information to "f".
// This is not thread-safe; assumed to be called by only one thread.
void Print(FILE* f);
};
// A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation,
// and when the current phase started. This is intended to be part of a Compilation object.
//
class JitTimer
{
unsigned __int64 m_start; // Start of the compilation.
unsigned __int64 m_curPhaseStart; // Start of the current phase.
#if MEASURE_CLRAPI_CALLS
unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any).
unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far
unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far.
int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1).
static double s_cyclesPerSec; // Cached for speedier measurements
#endif
#ifdef DEBUG
Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start).
#endif
CompTimeInfo m_info; // The CompTimeInfo for this compilation.
static CritSecObject s_csvLock; // Lock to protect the time log file.
static FILE* s_csvFile; // The time log file handle.
void PrintCsvMethodStats(Compiler* comp);
private:
void* operator new(size_t);
void* operator new[](size_t);
void operator delete(void*);
void operator delete[](void*);
public:
// Initialized the timer instance
JitTimer(unsigned byteCodeSize);
static JitTimer* Create(Compiler* comp, unsigned byteCodeSize)
{
return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize);
}
static void PrintCsvHeader();
// Ends the current phase (argument is for a redundant check).
void EndPhase(Compiler* compiler, Phases phase);
#if MEASURE_CLRAPI_CALLS
// Start and end a timed CLR API call.
void CLRApiCallEnter(unsigned apix);
void CLRApiCallLeave(unsigned apix);
#endif // MEASURE_CLRAPI_CALLS
// Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode,
// and adds it to "sum".
void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases);
// Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets
// *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of
// "m_info" to true.
bool GetThreadCycles(unsigned __int64* cycles)
{
bool res = CycleTimer::GetThreadCyclesS(cycles);
if (!res)
{
m_info.m_timerFailure = true;
}
return res;
}
static void Shutdown();
};
#endif // FEATURE_JIT_METHOD_PERF
//------------------- Function/Funclet info -------------------------------
enum FuncKind : BYTE
{
FUNC_ROOT, // The main/root function (always id==0)
FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler)
FUNC_FILTER, // a funclet associated with an EH filter
FUNC_COUNT
};
class emitLocation;
struct FuncInfoDsc
{
FuncKind funKind;
BYTE funFlags; // Currently unused, just here for padding
unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this
// funclet. It is only valid if funKind field indicates this is a
// EH-related funclet: FUNC_HANDLER or FUNC_FILTER
#if defined(TARGET_AMD64)
// TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array.
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
UNWIND_INFO unwindHeader;
// Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd
// number of codes, the VM or Zapper will 4-byte align the whole thing.
BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))];
unsigned unwindCodeSlot;
#elif defined(TARGET_X86)
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
#elif defined(TARGET_ARMARCH)
UnwindInfo uwi; // Unwind information for this function/funclet's hot section
UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section
// Note: we only have a pointer here instead of the actual object,
// to save memory in the JIT case (compared to the NGEN case),
// where we don't have any cold section.
// Note 2: we currently don't support hot/cold splitting in functions
// with EH, so uwiCold will be NULL for all funclets.
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
#endif // TARGET_ARMARCH
#if defined(FEATURE_CFI_SUPPORT)
jitstd::vector<CFI_CODE>* cfiCodes;
#endif // FEATURE_CFI_SUPPORT
// Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else
// that isn't shared between the main function body and funclets.
};
struct fgArgTabEntry
{
GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg.
GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any.
// Get the node that coresponds to this argument entry.
// This is the "real" node and not a placeholder or setup node.
GenTree* GetNode() const
{
return lateUse == nullptr ? use->GetNode() : lateUse->GetNode();
}
unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL
private:
regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for
// arguments passed on the stack
public:
unsigned numRegs; // Count of number of registers that this argument uses.
// Note that on ARM, if we have a double hfa, this reflects the number
// of DOUBLE registers.
#if defined(UNIX_AMD64_ABI)
// Unix amd64 will split floating point types and integer types in structs
// between floating point and general purpose registers. Keep track of that
// information so we do not need to recompute it later.
unsigned structIntRegs;
unsigned structFloatRegs;
#endif // UNIX_AMD64_ABI
#if defined(DEBUG_ARG_SLOTS)
// These fields were used to calculate stack size in stack slots for arguments
// but now they are replaced by precise `m_byteOffset/m_byteSize` because of
// arm64 apple abi requirements.
// A slot is a pointer sized region in the OutArg area.
unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area
unsigned numSlots; // Count of number of slots that this argument uses
#endif // DEBUG_ARG_SLOTS
// Return number of stack slots that this argument is taking.
// TODO-Cleanup: this function does not align with arm64 apple model,
// delete it. In most cases we just want to know if we it is using stack or not
// but in some cases we are checking if it is a multireg arg, like:
// `numRegs + GetStackSlotsNumber() > 1` that is harder to replace.
//
unsigned GetStackSlotsNumber() const
{
return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
}
private:
unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg.
public:
unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg
var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a
// struct is passed as a scalar type, this is that type.
// Note that if a struct is passed by reference, this will still be the struct type.
bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar
bool needPlace : 1; // True when we must replace this argument with a placeholder node
bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct
bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs
bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of
// previous arguments.
NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced
// to be in certain registers or on the stack, regardless of where they
// appear in the arg list.
bool isStruct : 1; // True if this is a struct arg
bool _isVararg : 1; // True if the argument is in a vararg context.
bool passedByRef : 1; // True iff the argument is passed by reference.
#if FEATURE_ARG_SPLIT
bool _isSplit : 1; // True when this argument is split between the registers and OutArg area
#endif // FEATURE_ARG_SPLIT
#ifdef FEATURE_HFA_FIELDS_PRESENT
CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
#endif
CorInfoHFAElemType GetHfaElemKind() const
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
return _hfaElemKind;
#else
NOWAY_MSG("GetHfaElemKind");
return CORINFO_HFA_ELEM_NONE;
#endif
}
void SetHfaElemKind(CorInfoHFAElemType elemKind)
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
_hfaElemKind = elemKind;
#else
NOWAY_MSG("SetHfaElemKind");
#endif
}
bool isNonStandard() const
{
return nonStandardArgKind != NonStandardArgKind::None;
}
// Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo.
// In this case, it must be removed by GenTreeCall::ResetArgInfo.
bool isNonStandardArgAddedLate() const
{
switch (static_cast<NonStandardArgKind>(nonStandardArgKind))
{
case NonStandardArgKind::None:
case NonStandardArgKind::PInvokeFrame:
case NonStandardArgKind::ShiftLow:
case NonStandardArgKind::ShiftHigh:
case NonStandardArgKind::FixedRetBuffer:
case NonStandardArgKind::ValidateIndirectCallTarget:
return false;
case NonStandardArgKind::WrapperDelegateCell:
case NonStandardArgKind::VirtualStubCell:
case NonStandardArgKind::PInvokeCookie:
case NonStandardArgKind::PInvokeTarget:
case NonStandardArgKind::R2RIndirectionCell:
return true;
default:
unreached();
}
}
bool isLateArg() const
{
bool isLate = (_lateArgInx != UINT_MAX);
return isLate;
}
unsigned GetLateArgInx() const
{
assert(isLateArg());
return _lateArgInx;
}
void SetLateArgInx(unsigned inx)
{
_lateArgInx = inx;
}
regNumber GetRegNum() const
{
return (regNumber)regNums[0];
}
regNumber GetOtherRegNum() const
{
return (regNumber)regNums[1];
}
#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif
void setRegNum(unsigned int i, regNumber regNum)
{
assert(i < MAX_ARG_REG_COUNT);
regNums[i] = (regNumberSmall)regNum;
}
regNumber GetRegNum(unsigned int i)
{
assert(i < MAX_ARG_REG_COUNT);
return (regNumber)regNums[i];
}
bool IsSplit() const
{
#if FEATURE_ARG_SPLIT
return compFeatureArgSplit() && _isSplit;
#else // FEATURE_ARG_SPLIT
return false;
#endif
}
void SetSplit(bool value)
{
#if FEATURE_ARG_SPLIT
_isSplit = value;
#endif
}
bool IsVararg() const
{
return compFeatureVarArg() && _isVararg;
}
void SetIsVararg(bool value)
{
if (compFeatureVarArg())
{
_isVararg = value;
}
}
bool IsHfaArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetHfaElemKind());
}
else
{
return false;
}
}
bool IsHfaRegArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetHfaElemKind()) && isPassedInRegisters();
}
else
{
return false;
}
}
unsigned intRegCount() const
{
#if defined(UNIX_AMD64_ABI)
if (this->isStruct)
{
return this->structIntRegs;
}
#endif // defined(UNIX_AMD64_ABI)
if (!this->isPassedInFloatRegisters())
{
return this->numRegs;
}
return 0;
}
unsigned floatRegCount() const
{
#if defined(UNIX_AMD64_ABI)
if (this->isStruct)
{
return this->structFloatRegs;
}
#endif // defined(UNIX_AMD64_ABI)
if (this->isPassedInFloatRegisters())
{
return this->numRegs;
}
return 0;
}
// Get the number of bytes that this argument is occupying on the stack,
// including padding up to the target pointer size for platforms
// where a stack argument can't take less.
unsigned GetStackByteSize() const
{
if (!IsSplit() && numRegs > 0)
{
return 0;
}
assert(!IsHfaArg() || !IsSplit());
assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs);
const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs;
return stackByteSize;
}
var_types GetHfaType() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return HfaTypeFromElemKind(GetHfaElemKind());
}
else
{
return TYP_UNDEF;
}
}
void SetHfaType(var_types type, unsigned hfaSlots)
{
if (GlobalJitOptions::compFeatureHfa)
{
if (type != TYP_UNDEF)
{
// We must already have set the passing mode.
assert(numRegs != 0 || GetStackByteSize() != 0);
// We originally set numRegs according to the size of the struct, but if the size of the
// hfaType is not the same as the pointer size, we need to correct it.
// Note that hfaSlots is the number of registers we will use. For ARM, that is twice
// the number of "double registers".
unsigned numHfaRegs = hfaSlots;
#ifdef TARGET_ARM
if (type == TYP_DOUBLE)
{
// Must be an even number of registers.
assert((numRegs & 1) == 0);
numHfaRegs = hfaSlots / 2;
}
#endif // TARGET_ARM
if (!IsHfaArg())
{
// We haven't previously set this; do so now.
CorInfoHFAElemType elemKind = HfaElemKindFromType(type);
SetHfaElemKind(elemKind);
// Ensure we've allocated enough bits.
assert(GetHfaElemKind() == elemKind);
if (isPassedInRegisters())
{
numRegs = numHfaRegs;
}
}
else
{
// We've already set this; ensure that it's consistent.
if (isPassedInRegisters())
{
assert(numRegs == numHfaRegs);
}
assert(type == HfaTypeFromElemKind(GetHfaElemKind()));
}
}
}
}
#ifdef TARGET_ARM
void SetIsBackFilled(bool backFilled)
{
isBackFilled = backFilled;
}
bool IsBackFilled() const
{
return isBackFilled;
}
#else // !TARGET_ARM
void SetIsBackFilled(bool backFilled)
{
}
bool IsBackFilled() const
{
return false;
}
#endif // !TARGET_ARM
bool isPassedInRegisters() const
{
return !IsSplit() && (numRegs != 0);
}
bool isPassedInFloatRegisters() const
{
#ifdef TARGET_X86
return false;
#else
return isValidFloatArgReg(GetRegNum());
#endif
}
// Can we replace the struct type of this node with a primitive type for argument passing?
bool TryPassAsPrimitive() const
{
return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE));
}
#if defined(DEBUG_ARG_SLOTS)
// Returns the number of "slots" used, where for this purpose a
// register counts as a slot.
unsigned getSlotCount() const
{
if (isBackFilled)
{
assert(isPassedInRegisters());
assert(numRegs == 1);
}
else if (GetRegNum() == REG_STK)
{
assert(!isPassedInRegisters());
assert(numRegs == 0);
}
else
{
assert(numRegs > 0);
}
return numSlots + numRegs;
}
#endif
#if defined(DEBUG_ARG_SLOTS)
// Returns the size as a multiple of pointer-size.
// For targets without HFAs, this is the same as getSlotCount().
unsigned getSize() const
{
unsigned size = getSlotCount();
if (GlobalJitOptions::compFeatureHfa)
{
if (IsHfaRegArg())
{
#ifdef TARGET_ARM
// We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size.
if (GetHfaType() == TYP_DOUBLE)
{
assert(!IsSplit());
size <<= 1;
}
#elif defined(TARGET_ARM64)
// We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size,
// or if they are SIMD16 vector hfa regs we have to double the size.
if (GetHfaType() == TYP_FLOAT)
{
// Round up in case of odd HFA count.
size = (size + 1) >> 1;
}
#ifdef FEATURE_SIMD
else if (GetHfaType() == TYP_SIMD16)
{
size <<= 1;
}
#endif // FEATURE_SIMD
#endif // TARGET_ARM64
}
}
return size;
}
#endif // DEBUG_ARG_SLOTS
private:
unsigned m_byteOffset;
// byte size that this argument takes including the padding after.
// For example, 1-byte arg on x64 with 8-byte alignment
// will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`.
unsigned m_byteSize;
unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers).
public:
void SetByteOffset(unsigned byteOffset)
{
DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum);
m_byteOffset = byteOffset;
}
unsigned GetByteOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum);
return m_byteOffset;
}
void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa)
{
unsigned roundedByteSize;
if (compMacOsArm64Abi())
{
// Only struct types need extension or rounding to pointer size, but HFA<float> does not.
if (isStruct && !isFloatHfa)
{
roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE);
}
else
{
roundedByteSize = byteSize;
}
}
else
{
roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE);
}
#if !defined(TARGET_ARM)
// Arm32 could have a struct with 8 byte alignment
// which rounded size % 8 is not 0.
assert(m_byteAlignment != 0);
assert(roundedByteSize % m_byteAlignment == 0);
#endif // TARGET_ARM
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi() && !isStruct)
{
assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE);
}
#endif
m_byteSize = roundedByteSize;
}
unsigned GetByteSize() const
{
return m_byteSize;
}
void SetByteAlignment(unsigned byteAlignment)
{
m_byteAlignment = byteAlignment;
}
unsigned GetByteAlignment() const
{
return m_byteAlignment;
}
// Set the register numbers for a multireg argument.
// There's nothing to do on x64/Ux because the structDesc has already been used to set the
// register numbers.
void SetMultiRegNums()
{
#if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI)
if (numRegs == 1)
{
return;
}
regNumber argReg = GetRegNum(0);
#ifdef TARGET_ARM
unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1;
#else
unsigned int regSize = 1;
#endif
if (numRegs > MAX_ARG_REG_COUNT)
NO_WAY("Multireg argument exceeds the maximum length");
for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++)
{
argReg = (regNumber)(argReg + regSize);
setRegNum(regIndex, argReg);
}
#endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI)
}
#ifdef DEBUG
// Check that the value of 'isStruct' is consistent.
// A struct arg must be one of the following:
// - A node of struct type,
// - A GT_FIELD_LIST, or
// - A node of a scalar type, passed in a single register or slot
// (or two slots in the case of a struct pass on the stack as TYP_DOUBLE).
//
void checkIsStruct() const
{
GenTree* node = GetNode();
if (isStruct)
{
if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST))
{
// This is the case where we are passing a struct as a primitive type.
// On most targets, this is always a single register or slot.
// However, on ARM this could be two slots if it is TYP_DOUBLE.
bool isPassedAsPrimitiveType =
((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE)));
#ifdef TARGET_ARM
if (!isPassedAsPrimitiveType)
{
if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2))
{
isPassedAsPrimitiveType = true;
}
}
#endif // TARGET_ARM
assert(isPassedAsPrimitiveType);
}
}
else
{
assert(!varTypeIsStruct(node));
}
}
void Dump() const;
#endif
};
//-------------------------------------------------------------------------
//
// The class fgArgInfo is used to handle the arguments
// when morphing a GT_CALL node.
//
class fgArgInfo
{
Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory
GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo
unsigned argCount; // Updatable arg count value
#if defined(DEBUG_ARG_SLOTS)
unsigned nextSlotNum; // Updatable slot count value
#endif
unsigned nextStackByteOffset;
unsigned stkLevel; // Stack depth when we make this call (for x86)
#if defined(UNIX_X86_ABI)
bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment.
unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs().
unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call.
// Computed dynamically during codegen, based on stkSizeBytes and the current
// stack level (genStackLevel) when the first stack adjustment is made for
// this call.
#endif
#if FEATURE_FIXED_OUT_ARGS
unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL
#endif
unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs)
bool hasRegArgs; // true if we have one or more register arguments
bool hasStackArgs; // true if we have one or more stack arguments
bool argsComplete; // marker for state
bool argsSorted; // marker for state
bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps
fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize])
private:
void AddArg(fgArgTabEntry* curArgTabEntry);
public:
fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount);
fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall);
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg = false);
#ifdef UNIX_AMD64_ABI
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
const bool isStruct,
const bool isFloatHfa,
const bool isVararg,
const regNumber otherRegNum,
const unsigned structIntRegs,
const unsigned structFloatRegs,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr);
#endif // UNIX_AMD64_ABI
fgArgTabEntry* AddStkArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
unsigned numSlots,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg = false);
void RemorphReset();
void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing);
void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing);
void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots);
void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode);
void ArgsComplete();
void SortArgs();
void EvalArgsToTemps();
unsigned ArgCount() const
{
return argCount;
}
fgArgTabEntry** ArgTable() const
{
return argTable;
}
#if defined(DEBUG_ARG_SLOTS)
unsigned GetNextSlotNum() const
{
return nextSlotNum;
}
#endif
unsigned GetNextSlotByteOffset() const
{
return nextStackByteOffset;
}
bool HasRegArgs() const
{
return hasRegArgs;
}
bool NeedsTemps() const
{
return needsTemps;
}
bool HasStackArgs() const
{
return hasStackArgs;
}
bool AreArgsComplete() const
{
return argsComplete;
}
#if FEATURE_FIXED_OUT_ARGS
unsigned GetOutArgSize() const
{
return outArgSize;
}
void SetOutArgSize(unsigned newVal)
{
outArgSize = newVal;
}
#endif // FEATURE_FIXED_OUT_ARGS
#if defined(UNIX_X86_ABI)
void ComputeStackAlignment(unsigned curStackLevelInBytes)
{
padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN);
}
unsigned GetStkAlign() const
{
return padStkAlign;
}
void SetStkSizeBytes(unsigned newStkSizeBytes)
{
stkSizeBytes = newStkSizeBytes;
}
unsigned GetStkSizeBytes() const
{
return stkSizeBytes;
}
bool IsStkAlignmentDone() const
{
return alignmentDone;
}
void SetStkAlignmentDone()
{
alignmentDone = true;
}
#endif // defined(UNIX_X86_ABI)
// Get the fgArgTabEntry for the arg at position argNum.
fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const
{
fgArgTabEntry* curArgTabEntry = nullptr;
if (!reMorphing)
{
// The arg table has not yet been sorted.
curArgTabEntry = argTable[argNum];
assert(curArgTabEntry->argNum == argNum);
return curArgTabEntry;
}
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->argNum == argNum)
{
return curArgTabEntry;
}
}
noway_assert(!"GetArgEntry: argNum not found");
return nullptr;
}
void SetNeedsTemps()
{
needsTemps = true;
}
// Get the node for the arg at position argIndex.
// Caller must ensure that this index is a valid arg index.
GenTree* GetArgNode(unsigned argIndex) const
{
return GetArgEntry(argIndex)->GetNode();
}
void Dump(Compiler* compiler) const;
};
#ifdef DEBUG
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// We have the ability to mark source expressions with "Test Labels."
// These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions
// that should be CSE defs, and other expressions that should uses of those defs, with a shared label.
enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel.
{
TL_SsaName,
TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown).
TL_VNNorm, // Like above, but uses the non-exceptional value of the expression.
TL_CSE_Def, // This must be identified in the JIT as a CSE def
TL_CSE_Use, // This must be identified in the JIT as a CSE use
TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop.
};
struct TestLabelAndNum
{
TestLabel m_tl;
ssize_t m_num;
TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0)
{
}
};
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap;
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif // DEBUG
//-------------------------------------------------------------------------
// LoopFlags: flags for the loop table.
//
enum LoopFlags : unsigned short
{
LPFLG_EMPTY = 0,
// LPFLG_UNUSED = 0x0001,
// LPFLG_UNUSED = 0x0002,
LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++)
// LPFLG_UNUSED = 0x0008,
LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call
LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit)
LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit)
LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit)
LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit)
LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit)
LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit)
LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop
LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away)
LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop
LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed
LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet
// type are assigned to.
};
inline constexpr LoopFlags operator~(LoopFlags a)
{
return (LoopFlags)(~(unsigned short)a);
}
inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b)
{
return (LoopFlags)((unsigned short)a | (unsigned short)b);
}
inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b)
{
return (LoopFlags)((unsigned short)a & (unsigned short)b);
}
inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b)
{
return a = (LoopFlags)((unsigned short)a | (unsigned short)b);
}
inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b)
{
return a = (LoopFlags)((unsigned short)a & (unsigned short)b);
}
// The following holds information about instr offsets in terms of generated code.
enum class IPmappingDscKind
{
Prolog, // The mapping represents the start of a prolog.
Epilog, // The mapping represents the start of an epilog.
NoMapping, // This does not map to any IL offset.
Normal, // The mapping maps to an IL offset.
};
struct IPmappingDsc
{
emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset
IPmappingDscKind ipmdKind; // The kind of mapping
ILLocation ipmdLoc; // The location for normal mappings
bool ipmdIsLabel; // Can this code be a branch label?
};
struct PreciseIPMapping
{
emitLocation nativeLoc;
DebugInfo debugInfo;
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX The big guy. The sections are currently organized as : XX
XX XX
XX o GenTree and BasicBlock XX
XX o LclVarsInfo XX
XX o Importer XX
XX o FlowGraph XX
XX o Optimizer XX
XX o RegAlloc XX
XX o EEInterface XX
XX o TempsInfo XX
XX o RegSet XX
XX o GCInfo XX
XX o Instruction XX
XX o ScopeInfo XX
XX o PrologScopeInfo XX
XX o CodeGenerator XX
XX o UnwindInfo XX
XX o Compiler XX
XX o typeInfo XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
struct HWIntrinsicInfo;
class Compiler
{
friend class emitter;
friend class UnwindInfo;
friend class UnwindFragmentInfo;
friend class UnwindEpilogInfo;
friend class JitTimer;
friend class LinearScan;
friend class fgArgInfo;
friend class Rationalizer;
friend class Phase;
friend class Lowering;
friend class CSE_DataFlow;
friend class CSE_Heuristic;
friend class CodeGenInterface;
friend class CodeGen;
friend class LclVarDsc;
friend class TempDsc;
friend class LIR;
friend class ObjectAllocator;
friend class LocalAddressVisitor;
friend struct GenTree;
friend class MorphInitBlockHelper;
friend class MorphCopyBlockHelper;
#ifdef FEATURE_HW_INTRINSICS
friend struct HWIntrinsicInfo;
#endif // FEATURE_HW_INTRINSICS
#ifndef TARGET_64BIT
friend class DecomposeLongs;
#endif // !TARGET_64BIT
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Misc structs definitions XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package.
#ifdef DEBUG
bool verbose;
bool verboseTrees;
bool shouldUseVerboseTrees();
bool asciiTrees; // If true, dump trees using only ASCII characters
bool shouldDumpASCIITrees();
bool verboseSsa; // If true, produce especially verbose dump output in SSA construction.
bool shouldUseVerboseSsa();
bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id:
int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely.
bool doExtraSuperPmiQueries;
void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep.
const char* VarNameToStr(VarName name)
{
return name;
}
DWORD expensiveDebugCheckLevel;
#endif
#if FEATURE_MULTIREG_RET
GenTree* impAssignMultiRegTypeToVar(GenTree* op,
CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv));
#endif // FEATURE_MULTIREG_RET
#ifdef TARGET_X86
bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const;
#endif // TARGET_X86
//-------------------------------------------------------------------------
// Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64.
// HFAs are one to four element structs where each element is the same
// type, either all float or all double. We handle HVAs (one to four elements of
// vector types) uniformly with HFAs. HFAs are treated specially
// in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in
// floating-point registers instead of the general purpose registers.
//
bool IsHfa(CORINFO_CLASS_HANDLE hClass);
bool IsHfa(GenTree* tree);
var_types GetHfaType(GenTree* tree);
unsigned GetHfaCount(GenTree* tree);
var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass);
bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv);
//-------------------------------------------------------------------------
// The following is used for validating format of EH table
//
struct EHNodeDsc;
typedef struct EHNodeDsc* pEHNodeDsc;
EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes.
EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes.
struct EHNodeDsc
{
enum EHBlockType
{
TryNode,
FilterNode,
HandlerNode,
FinallyNode,
FaultNode
};
EHBlockType ehnBlockType; // kind of EH block
IL_OFFSET ehnStartOffset; // IL offset of start of the EH block
IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to
// the last IL offset, not "one past the last one", i.e., the range Start to End is
// inclusive).
pEHNodeDsc ehnNext; // next (non-nested) block in sequential order
pEHNodeDsc ehnChild; // leftmost nested block
union {
pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node
pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node
};
pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0
pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same,
void ehnSetTryNodeType()
{
ehnBlockType = TryNode;
}
void ehnSetFilterNodeType()
{
ehnBlockType = FilterNode;
}
void ehnSetHandlerNodeType()
{
ehnBlockType = HandlerNode;
}
void ehnSetFinallyNodeType()
{
ehnBlockType = FinallyNode;
}
void ehnSetFaultNodeType()
{
ehnBlockType = FaultNode;
}
bool ehnIsTryBlock()
{
return ehnBlockType == TryNode;
}
bool ehnIsFilterBlock()
{
return ehnBlockType == FilterNode;
}
bool ehnIsHandlerBlock()
{
return ehnBlockType == HandlerNode;
}
bool ehnIsFinallyBlock()
{
return ehnBlockType == FinallyNode;
}
bool ehnIsFaultBlock()
{
return ehnBlockType == FaultNode;
}
// returns true if there is any overlap between the two nodes
static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2)
{
if (node1->ehnStartOffset < node2->ehnStartOffset)
{
return (node1->ehnEndOffset >= node2->ehnStartOffset);
}
else
{
return (node1->ehnStartOffset <= node2->ehnEndOffset);
}
}
// fails with BADCODE if inner is not completely nested inside outer
static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer)
{
return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset));
}
};
//-------------------------------------------------------------------------
// Exception handling functions
//
#if !defined(FEATURE_EH_FUNCLETS)
bool ehNeedsShadowSPslots()
{
return (info.compXcptnsCount || opts.compDbgEnC);
}
// 0 for methods with no EH
// 1 for methods with non-nested EH, or where only the try blocks are nested
// 2 for a method with a catch within a catch
// etc.
unsigned ehMaxHndNestingCount;
#endif // !FEATURE_EH_FUNCLETS
static bool jitIsBetween(unsigned value, unsigned start, unsigned end);
static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end);
bool bbInCatchHandlerILRange(BasicBlock* blk);
bool bbInFilterILRange(BasicBlock* blk);
bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk);
unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo);
unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex);
unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex);
// Returns true if "block" is the start of a try region.
bool bbIsTryBeg(BasicBlock* block);
// Returns true if "block" is the start of a handler or filter region.
bool bbIsHandlerBeg(BasicBlock* block);
// Returns true iff "block" is where control flows if an exception is raised in the
// try region, and sets "*regionIndex" to the index of the try for the handler.
// Differs from "IsHandlerBeg" in the case of filters, where this is true for the first
// block of the filter, but not for the filter's handler.
bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex);
bool ehHasCallableHandlers();
// Return the EH descriptor for the given region index.
EHblkDsc* ehGetDsc(unsigned regionIndex);
// Return the EH index given a region descriptor.
unsigned ehGetIndex(EHblkDsc* ehDsc);
// Return the EH descriptor index of the enclosing try, for the given region index.
unsigned ehGetEnclosingTryIndex(unsigned regionIndex);
// Return the EH descriptor index of the enclosing handler, for the given region index.
unsigned ehGetEnclosingHndIndex(unsigned regionIndex);
// Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this
// block is not in a 'try' region).
EHblkDsc* ehGetBlockTryDsc(BasicBlock* block);
// Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr
// if this block is not in a filter or handler region).
EHblkDsc* ehGetBlockHndDsc(BasicBlock* block);
// Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or
// nullptr if this block's exceptions propagate to caller).
EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block);
EHblkDsc* ehIsBlockTryLast(BasicBlock* block);
EHblkDsc* ehIsBlockHndLast(BasicBlock* block);
bool ehIsBlockEHLast(BasicBlock* block);
bool ehBlockHasExnFlowDsc(BasicBlock* block);
// Return the region index of the most nested EH region this block is in.
unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion);
// Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check.
unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex);
// Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX
// if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion'
// is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler.
// (It can never be a filter.)
unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion);
// A block has been deleted. Update the EH table appropriately.
void ehUpdateForDeletedBlock(BasicBlock* block);
// Determine whether a block can be deleted while preserving the EH normalization rules.
bool ehCanDeleteEmptyBlock(BasicBlock* block);
// Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region.
void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast);
// For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler,
// or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index
// is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the
// BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function
// body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the
// BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never
// lives in a filter.)
unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion);
// Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's
// handler. Set begBlk to the first block, and endBlk to the block after the last block of the range
// (nullptr if the last block is the last block in the program).
// Precondition: 'finallyIndex' is the EH region of a try/finally clause.
void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk);
#ifdef DEBUG
// Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return
// 'true' if the BBJ_CALLFINALLY is in the correct EH region.
bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex);
#endif // DEBUG
#if defined(FEATURE_EH_FUNCLETS)
// Do we need a PSPSym in the main function? For codegen purposes, we only need one
// if there is a filter that protects a region with a nested EH clause (such as a
// try/catch nested in the 'try' body of a try/filter/filter-handler). See
// genFuncletProlog() for more details. However, the VM seems to use it for more
// purposes, maybe including debugging. Until we are sure otherwise, always create
// a PSPSym for functions with any EH.
bool ehNeedsPSPSym() const
{
#ifdef TARGET_X86
return false;
#else // TARGET_X86
return compHndBBtabCount > 0;
#endif // TARGET_X86
}
bool ehAnyFunclets(); // Are there any funclets in this function?
unsigned ehFuncletCount(); // Return the count of funclets in the function
unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks
#else // !FEATURE_EH_FUNCLETS
bool ehAnyFunclets()
{
return false;
}
unsigned ehFuncletCount()
{
return 0;
}
unsigned bbThrowIndex(BasicBlock* blk)
{
return blk->bbTryIndex;
} // Get the index to use as the cache key for sharing throw blocks
#endif // !FEATURE_EH_FUNCLETS
// Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of
// "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first
// first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor,
// for example, we want to consider that the immediate dominator of the catch clause start block, so it's
// convenient to also consider it a predecessor.)
flowList* BlockPredsWithEH(BasicBlock* blk);
// This table is useful for memoization of the method above.
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap;
BlockToFlowListMap* m_blockToEHPreds;
BlockToFlowListMap* GetBlockToEHPreds()
{
if (m_blockToEHPreds == nullptr)
{
m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator());
}
return m_blockToEHPreds;
}
void* ehEmitCookie(BasicBlock* block);
UNATIVE_OFFSET ehCodeOffset(BasicBlock* block);
EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter);
EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd);
EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter);
EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast);
void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg);
void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast);
void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast);
void fgSkipRmvdBlocks(EHblkDsc* handlerTab);
void fgAllocEHTable();
void fgRemoveEHTableEntry(unsigned XTnum);
#if defined(FEATURE_EH_FUNCLETS)
EHblkDsc* fgAddEHTableEntry(unsigned XTnum);
#endif // FEATURE_EH_FUNCLETS
#if !FEATURE_EH
void fgRemoveEH();
#endif // !FEATURE_EH
void fgSortEHTable();
// Causes the EH table to obey some well-formedness conditions, by inserting
// empty BB's when necessary:
// * No block is both the first block of a handler and the first block of a try.
// * No block is the first block of multiple 'try' regions.
// * No block is the last block of multiple EH regions.
void fgNormalizeEH();
bool fgNormalizeEHCase1();
bool fgNormalizeEHCase2();
bool fgNormalizeEHCase3();
void fgCheckForLoopsInHandlers();
#ifdef DEBUG
void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
void fgVerifyHandlerTab();
void fgDispHandlerTab();
#endif // DEBUG
bool fgNeedToSortEHTable;
void verInitEHTree(unsigned numEHClauses);
void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab);
void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node);
void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node);
void verCheckNestingLevel(EHNodeDsc* initRoot);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree and BasicBlock XX
XX XX
XX Functions to allocate and display the GenTrees and BasicBlocks XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// Functions to create nodes
Statement* gtNewStmt(GenTree* expr = nullptr);
Statement* gtNewStmt(GenTree* expr, const DebugInfo& di);
// For unary opers.
GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE);
// For binary opers.
GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2);
GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon);
GenTree* gtNewLargeOperNode(genTreeOps oper,
var_types type = TYP_I_IMPL,
GenTree* op1 = nullptr,
GenTree* op2 = nullptr);
GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT);
GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq);
GenTree* gtNewPhysRegNode(regNumber reg, var_types type);
GenTree* gtNewJmpTableNode();
GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant);
GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr);
GenTreeFlags gtTokenToIconFlags(unsigned token);
GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle);
GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd);
GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd);
GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd);
GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd);
GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue);
GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node);
GenTree* gtNewLconNode(__int64 value);
GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE);
GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle);
GenTree* gtNewZeroConNode(var_types type);
GenTree* gtNewOneConNode(var_types type);
GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src);
#ifdef FEATURE_SIMD
GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize);
#endif
GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock);
GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg);
GenTree* gtNewBitCastNode(var_types type, GenTree* arg);
protected:
void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile);
public:
GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr);
void gtSetObjGcInfo(GenTreeObj* objNode);
GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr);
GenTree* gtNewBlockVal(GenTree* addr, unsigned size);
GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile);
GenTreeCall::Use* gtNewCallArgs(GenTree* node);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4);
GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args);
GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after);
GenTreeCall* gtNewCallNode(gtCallTypes callType,
CORINFO_METHOD_HANDLE handle,
var_types type,
GenTreeCall::Use* args,
const DebugInfo& di = DebugInfo());
GenTreeCall* gtNewIndCallNode(GenTree* addr,
var_types type,
GenTreeCall::Use* args,
const DebugInfo& di = DebugInfo());
GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr);
GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup,
GenTree* ctxTree,
void* compileTimeHandle);
GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET));
GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET));
GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL);
GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum,
unsigned lclOffs,
FieldSeqNode* fieldSeq,
var_types type = TYP_I_IMPL);
#ifdef FEATURE_SIMD
GenTreeSIMD* gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize);
GenTreeSIMD* gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize);
void SetOpLclRelatedToSIMDIntrinsic(GenTree* op);
#endif
#ifdef FEATURE_HW_INTRINSICS
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(
var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
GenTree* gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID);
CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType);
CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType);
#endif // FEATURE_HW_INTRINSICS
GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd);
GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset);
GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags);
GenTree* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0);
GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp);
GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block);
GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr);
GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock);
var_types gtTypeForNullCheck(GenTree* tree);
void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block);
static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum);
static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node);
fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx);
static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx);
GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src);
GenTree* gtNewTempAssign(unsigned tmp,
GenTree* val,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg);
GenTree* gtNewNothingNode();
GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd);
GenTree* gtUnusedValNode(GenTree* expr);
GenTree* gtNewKeepAliveNode(GenTree* op);
GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType);
GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType);
GenTreeAllocObj* gtNewAllocObjNode(
unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1);
GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent);
GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree);
GenTreeIndir* gtNewMethodTableLookup(GenTree* obj);
//------------------------------------------------------------------------
// Other GenTree functions
GenTree* gtClone(GenTree* tree, bool complexOK = false);
// If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise,
// create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with
// IntCnses with value `deepVarVal`.
GenTree* gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal);
// Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local
// `varNum` to int constants with value `varVal`.
GenTree* gtCloneExpr(GenTree* tree,
GenTreeFlags addFlags = GTF_EMPTY,
unsigned varNum = BAD_VAR_NUM,
int varVal = 0)
{
return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal);
}
Statement* gtCloneStmt(Statement* stmt)
{
GenTree* exprClone = gtCloneExpr(stmt->GetRootNode());
return gtNewStmt(exprClone, stmt->GetDebugInfo());
}
// Internal helper for cloning a call
GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call,
GenTreeFlags addFlags = GTF_EMPTY,
unsigned deepVarNum = BAD_VAR_NUM,
int deepVarVal = 0);
// Create copy of an inline or guarded devirtualization candidate tree.
GenTreeCall* gtCloneCandidateCall(GenTreeCall* call);
void gtUpdateSideEffects(Statement* stmt, GenTree* tree);
void gtUpdateTreeAncestorsSideEffects(GenTree* tree);
void gtUpdateStmtSideEffects(Statement* stmt);
void gtUpdateNodeSideEffects(GenTree* tree);
void gtUpdateNodeOperSideEffects(GenTree* tree);
void gtUpdateNodeOperSideEffectsPost(GenTree* tree);
// Returns "true" iff the complexity (not formally defined, but first interpretation
// is #of nodes in subtree) of "tree" is greater than "limit".
// (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used
// before they have been set.)
bool gtComplexityExceeds(GenTree** tree, unsigned limit);
GenTree* gtReverseCond(GenTree* tree);
static bool gtHasRef(GenTree* tree, ssize_t lclNum);
bool gtHasLocalsWithAddrOp(GenTree* tree);
unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz);
unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp);
void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly);
#ifdef DEBUG
unsigned gtHashValue(GenTree* tree);
GenTree* gtWalkOpEffectiveVal(GenTree* op);
#endif
void gtPrepareCost(GenTree* tree);
bool gtIsLikelyRegVar(GenTree* tree);
// Returns true iff the secondNode can be swapped with firstNode.
bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode);
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type);
unsigned gtSetEvalOrder(GenTree* tree);
void gtSetStmtInfo(Statement* stmt);
// Returns "true" iff "node" has any of the side effects in "flags".
bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags);
// Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags".
bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags);
// Appends 'expr' in front of 'list'
// 'list' will typically start off as 'nullptr'
// when 'list' is non-null a GT_COMMA node is used to insert 'expr'
GenTree* gtBuildCommaList(GenTree* list, GenTree* expr);
void gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT,
bool ignoreRoot = false);
GenTree* gtGetThisArg(GenTreeCall* call);
// Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the
// static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but
// complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing
// the given "fldHnd", is such an object pointer.
bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd);
// Return true if call is a recursive call; return false otherwise.
// Note when inlining, this looks for calls back to the root method.
bool gtIsRecursiveCall(GenTreeCall* call)
{
return gtIsRecursiveCall(call->gtCallMethHnd);
}
bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle)
{
return (callMethodHandle == impInlineRoot()->info.compMethodHnd);
}
//-------------------------------------------------------------------------
GenTree* gtFoldExpr(GenTree* tree);
GenTree* gtFoldExprConst(GenTree* tree);
GenTree* gtFoldExprSpecial(GenTree* tree);
GenTree* gtFoldBoxNullable(GenTree* tree);
GenTree* gtFoldExprCompare(GenTree* tree);
GenTree* gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult);
GenTree* gtFoldExprCall(GenTreeCall* call);
GenTree* gtFoldTypeCompare(GenTree* tree);
GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2);
// Options to control behavior of gtTryRemoveBoxUpstreamEffects
enum BoxRemovalOptions
{
BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree
BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree
BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree
BR_DONT_REMOVE, // check if removal is possible, return copy source tree
BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree
BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address
};
GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW);
GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp);
//-------------------------------------------------------------------------
// Get the handle, if any.
CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree);
// Get the handle, and assert if not found.
CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree);
// Get the handle for a ref type.
CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull);
// Get the class handle for an helper call
CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull);
// Get the element handle for an array of ref type.
CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array);
// Get a class handle from a helper call argument
CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array);
// Get the class handle for a field
CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull);
// Check if this tree is a gc static base helper call
bool gtIsStaticGCBaseHelperCall(GenTree* tree);
//-------------------------------------------------------------------------
// Functions to display the trees
#ifdef DEBUG
void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR);
void gtDispConst(GenTree* tree);
void gtDispLeaf(GenTree* tree, IndentStack* indentStack);
void gtDispNodeName(GenTree* tree);
#if FEATURE_MULTIREG_RET
unsigned gtDispMultiRegCount(GenTree* tree);
#endif
void gtDispRegVal(GenTree* tree);
void gtDispZeroFieldSeq(GenTree* tree);
void gtDispVN(GenTree* tree);
void gtDispCommonEndLine(GenTree* tree);
enum IndentInfo
{
IINone,
IIArc,
IIArcTop,
IIArcBottom,
IIEmbedded,
IIError,
IndentInfoCount
};
void gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg = nullptr,
bool topOnly = false);
void gtDispTree(GenTree* tree,
IndentStack* indentStack = nullptr,
_In_opt_ const char* msg = nullptr,
bool topOnly = false,
bool isLIR = false);
void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut);
int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining);
char* gtGetLclVarName(unsigned lclNum);
void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true);
void gtDispLclVarStructType(unsigned lclNum);
void gtDispClassLayout(ClassLayout* layout, var_types type);
void gtDispILLocation(const ILLocation& loc);
void gtDispStmt(Statement* stmt, const char* msg = nullptr);
void gtDispBlockStmts(BasicBlock* block);
void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength);
void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength);
void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack);
void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq);
void gtDispFieldSeq(FieldSeqNode* pfsn);
void gtDispRange(LIR::ReadOnlyRange const& range);
void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree);
void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr);
#endif
// For tree walks
enum fgWalkResult
{
WALK_CONTINUE,
WALK_SKIP_SUBTREES,
WALK_ABORT
};
struct fgWalkData;
typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data);
typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data);
static fgWalkPreFn gtMarkColonCond;
static fgWalkPreFn gtClearColonCond;
struct FindLinkData
{
GenTree* nodeToFind;
GenTree** result;
GenTree* parent;
};
FindLinkData gtFindLink(Statement* stmt, GenTree* node);
bool gtHasCatchArg(GenTree* tree);
typedef ArrayStack<GenTree*> GenTreeStack;
static bool gtHasCallOnStack(GenTreeStack* parentStack);
//=========================================================================
// BasicBlock functions
#ifdef DEBUG
// This is a debug flag we will use to assert when creating block during codegen
// as this interferes with procedure splitting. If you know what you're doing, set
// it to true before creating the block. (DEBUG only)
bool fgSafeBasicBlockCreation;
#endif
BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind);
void placeLoopAlignInstructions();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX LclVarsInfo XX
XX XX
XX The variables to be used by the code generator. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//
// For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will
// be placed in the stack frame and it's fields must be laid out sequentially.
//
// For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by
// a local variable that can be enregistered or placed in the stack frame.
// The fields do not need to be laid out sequentially
//
enum lvaPromotionType
{
PROMOTION_TYPE_NONE, // The struct local is not promoted
PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted,
// and its field locals are independent of its parent struct local.
PROMOTION_TYPE_DEPENDENT // The struct local is promoted,
// but its field locals depend on its parent struct local.
};
/*****************************************************************************/
enum FrameLayoutState
{
NO_FRAME_LAYOUT,
INITIAL_FRAME_LAYOUT,
PRE_REGALLOC_FRAME_LAYOUT,
REGALLOC_FRAME_LAYOUT,
TENTATIVE_FRAME_LAYOUT,
FINAL_FRAME_LAYOUT
};
public:
RefCountState lvaRefCountState; // Current local ref count state
bool lvaLocalVarRefCounted() const
{
return lvaRefCountState == RCS_NORMAL;
}
bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable
unsigned lvaCount; // total number of locals, which includes function arguments,
// special arguments, IL local variables, and JIT temporary variables
LclVarDsc* lvaTable; // variable descriptor table
unsigned lvaTableCnt; // lvaTable size (>= lvaCount)
unsigned lvaTrackedCount; // actual # of locals being tracked
unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked
#ifdef DEBUG
VARSET_TP lvaTrackedVars; // set of tracked variables
#endif
#ifndef TARGET_64BIT
VARSET_TP lvaLongVars; // set of long (64-bit) variables
#endif
VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables
unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices.
// It that changes, this changes. VarSets from different epochs
// cannot be meaningfully combined.
unsigned GetCurLVEpoch()
{
return lvaCurEpoch;
}
// reverse map of tracked number to var number
unsigned lvaTrackedToVarNumSize;
unsigned* lvaTrackedToVarNum;
#if DOUBLE_ALIGN
#ifdef DEBUG
// # of procs compiled a with double-aligned stack
static unsigned s_lvaDoubleAlignedProcsCount;
#endif
#endif
// Getters and setters for address-exposed and do-not-enregister local var properties.
bool lvaVarAddrExposed(unsigned varNum) const;
void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason));
void lvaSetVarLiveInOutOfHandler(unsigned varNum);
bool lvaVarDoNotEnregister(unsigned varNum);
void lvSetMinOptsDoNotEnreg();
bool lvaEnregEHVars;
bool lvaEnregMultiRegVars;
void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason));
unsigned lvaVarargsHandleArg;
#ifdef TARGET_X86
unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack
// arguments
#endif // TARGET_X86
unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame
unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame
#if FEATURE_FIXED_OUT_ARGS
unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining.
#endif
unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods
// that tracks whether the lock has been taken
unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg.
// However, if there is a "ldarga 0" or "starg 0" in the IL,
// we will redirect all "ldarg(a) 0" and "starg 0" to this temp.
unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression
// in case there are multiple BBJ_RETURN blocks in the inlinee
// or if the inlinee has GC ref locals.
#if FEATURE_FIXED_OUT_ARGS
unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space
PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space
#endif // FEATURE_FIXED_OUT_ARGS
static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding)
{
return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE);
}
// Variable representing the return address. The helper-based tailcall
// mechanism passes the address of the return address to a runtime helper
// where it is used to detect tail-call chains.
unsigned lvaRetAddrVar;
#if defined(DEBUG) && defined(TARGET_XARCH)
unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return.
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#if defined(DEBUG) && defined(TARGET_X86)
unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call.
#endif // defined(DEBUG) && defined(TARGET_X86)
bool lvaGenericsContextInUse;
bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or
// CORINFO_GENERICS_CTXT_FROM_THIS?
bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG?
//-------------------------------------------------------------------------
// All these frame offsets are inter-related and must be kept in sync
#if !defined(FEATURE_EH_FUNCLETS)
// This is used for the callable handlers
unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots
#endif // FEATURE_EH_FUNCLETS
int lvaCachedGenericContextArgOffs;
int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as
// THIS pointer
#ifdef JIT32_GCENCODER
unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc
#endif // JIT32_GCENCODER
unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper
// TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps.
// after the reg predict we will use a computed maxTmpSize
// which is based upon the number of spill temps predicted by reg predict
// All this is necessary because if we under-estimate the size of the spill
// temps we could fail when encoding instructions that reference stack offsets for ARM.
//
// Pre codegen max spill temp size.
static const unsigned MAX_SPILL_TEMP_SIZE = 24;
//-------------------------------------------------------------------------
unsigned lvaGetMaxSpillTempSize();
#ifdef TARGET_ARM
bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask);
#endif // TARGET_ARM
void lvaAssignFrameOffsets(FrameLayoutState curState);
void lvaFixVirtualFrameOffsets();
void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc);
void lvaUpdateArgsWithInitialReg();
void lvaAssignVirtualFrameOffsetsToArgs();
#ifdef UNIX_AMD64_ABI
int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset);
#else // !UNIX_AMD64_ABI
int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs);
#endif // !UNIX_AMD64_ABI
void lvaAssignVirtualFrameOffsetsToLocals();
int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs);
#ifdef TARGET_AMD64
// Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even.
bool lvaIsCalleeSavedIntRegCountEven();
#endif
void lvaAlignFrame();
void lvaAssignFrameOffsetsToPromotedStructs();
int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign);
#ifdef DEBUG
void lvaDumpRegLocation(unsigned lclNum);
void lvaDumpFrameLocation(unsigned lclNum);
void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6);
void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame
// layout state defined by lvaDoneFrameLayout
#endif
// Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller
// to avoid bugs from borderline cases.
#define MAX_FrameSize 0x3FFFFFFF
void lvaIncrementFrameSize(unsigned size);
unsigned lvaFrameSize(FrameLayoutState curState);
// Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based.
int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const;
// Returns the caller-SP-relative offset for the local variable "varNum."
int lvaGetCallerSPRelativeOffset(unsigned varNum);
// Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc.
int lvaGetSPRelativeOffset(unsigned varNum);
int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased);
int lvaGetInitialSPRelativeOffset(unsigned varNum);
// True if this is an OSR compilation and this local is potentially
// located on the original method stack frame.
bool lvaIsOSRLocal(unsigned varNum);
//------------------------ For splitting types ----------------------------
void lvaInitTypeRef();
void lvaInitArgs(InitVarDscInfo* varDscInfo);
void lvaInitThisPtr(InitVarDscInfo* varDscInfo);
void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg);
void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs);
void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo);
void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo);
void lvaInitVarDsc(LclVarDsc* varDsc,
unsigned varNum,
CorInfoType corInfoType,
CORINFO_CLASS_HANDLE typeHnd,
CORINFO_ARG_LIST_HANDLE varList,
CORINFO_SIG_INFO* varSig);
static unsigned lvaTypeRefMask(var_types type);
var_types lvaGetActualType(unsigned lclNum);
var_types lvaGetRealType(unsigned lclNum);
//-------------------------------------------------------------------------
void lvaInit();
LclVarDsc* lvaGetDesc(unsigned lclNum)
{
assert(lclNum < lvaCount);
return &lvaTable[lclNum];
}
LclVarDsc* lvaGetDesc(unsigned lclNum) const
{
assert(lclNum < lvaCount);
return &lvaTable[lclNum];
}
LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar)
{
return lvaGetDesc(lclVar->GetLclNum());
}
unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex)
{
assert(trackedIndex < lvaTrackedCount);
unsigned lclNum = lvaTrackedToVarNum[trackedIndex];
assert(lclNum < lvaCount);
return lclNum;
}
LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex)
{
return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex));
}
unsigned lvaGetLclNum(const LclVarDsc* varDsc)
{
assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table
assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) ==
0); // varDsc better not point in the middle of a variable
unsigned varNum = (unsigned)(varDsc - lvaTable);
assert(varDsc == &lvaTable[varNum]);
return varNum;
}
unsigned lvaLclSize(unsigned varNum);
unsigned lvaLclExactSize(unsigned varNum);
bool lvaHaveManyLocals() const;
unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason));
unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason));
unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason));
void lvaSortByRefCount();
void lvaMarkLocalVars(); // Local variable ref-counting
void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers);
void lvaMarkLocalVars(BasicBlock* block, bool isRecompute);
void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar
VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt);
#ifdef DEBUG
struct lvaStressLclFldArgs
{
Compiler* m_pCompiler;
bool m_bFirstPass;
};
static fgWalkPreFn lvaStressLclFldCB;
void lvaStressLclFld();
void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars);
void lvaDispVarSet(VARSET_VALARG_TP set);
#endif
#ifdef TARGET_ARM
int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage);
#else
int lvaFrameAddress(int varNum, bool* pFPbased);
#endif
bool lvaIsParameter(unsigned varNum);
bool lvaIsRegArgument(unsigned varNum);
bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument?
bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code
// that writes to arg0
// For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference.
// For ARM64, this is structs larger than 16 bytes that are passed by reference.
bool lvaIsImplicitByRefLocal(unsigned varNum)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
LclVarDsc* varDsc = lvaGetDesc(varNum);
if (varDsc->lvIsImplicitByRef)
{
assert(varDsc->lvIsParam);
assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF));
return true;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return false;
}
// Returns true if this local var is a multireg struct
bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg);
// If the local is a TYP_STRUCT, get/set a class handle describing it
CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum);
void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true);
void lvaSetStructUsedAsVarArg(unsigned varNum);
// If the local is TYP_REF, set or update the associated class information.
void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false);
void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr);
void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false);
void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr);
#define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct
// Info about struct type fields.
struct lvaStructFieldInfo
{
CORINFO_FIELD_HANDLE fldHnd;
unsigned char fldOffset;
unsigned char fldOrdinal;
var_types fldType;
unsigned fldSize;
CORINFO_CLASS_HANDLE fldTypeHnd;
lvaStructFieldInfo()
: fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr)
{
}
};
// Info about a struct type, instances of which may be candidates for promotion.
struct lvaStructPromotionInfo
{
CORINFO_CLASS_HANDLE typeHnd;
bool canPromote;
bool containsHoles;
bool customLayout;
bool fieldsSorted;
unsigned char fieldCnt;
lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct];
lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr)
: typeHnd(typeHnd)
, canPromote(false)
, containsHoles(false)
, customLayout(false)
, fieldsSorted(false)
, fieldCnt(0)
{
}
};
struct lvaFieldOffsetCmp
{
bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2);
};
// This class is responsible for checking validity and profitability of struct promotion.
// If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes
// nessesary information for fgMorphStructField to use.
class StructPromotionHelper
{
public:
StructPromotionHelper(Compiler* compiler);
bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd);
bool TryPromoteStructVar(unsigned lclNum);
void Clear()
{
structPromotionInfo.typeHnd = NO_CLASS_HANDLE;
}
#ifdef DEBUG
void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType);
#endif // DEBUG
private:
bool CanPromoteStructVar(unsigned lclNum);
bool ShouldPromoteStructVar(unsigned lclNum);
void PromoteStructVar(unsigned lclNum);
void SortStructFields();
lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal);
bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo);
private:
Compiler* compiler;
lvaStructPromotionInfo structPromotionInfo;
#ifdef DEBUG
typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types>
RetypedAsScalarFieldsMap;
RetypedAsScalarFieldsMap retypedFieldsMap;
#endif // DEBUG
};
StructPromotionHelper* structPromotionHelper;
unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset);
lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc);
lvaPromotionType lvaGetPromotionType(unsigned varNum);
lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc);
lvaPromotionType lvaGetParentPromotionType(unsigned varNum);
bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc);
bool lvaIsGCTracked(const LclVarDsc* varDsc);
#if defined(FEATURE_SIMD)
bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc)
{
assert(varDsc->lvType == TYP_SIMD12);
assert(varDsc->lvExactSize == 12);
#if defined(TARGET_64BIT)
assert(compMacOsArm64Abi() || varDsc->lvSize() == 16);
#endif // defined(TARGET_64BIT)
// We make local variable SIMD12 types 16 bytes instead of just 12.
// lvSize() will return 16 bytes for SIMD12, even for fields.
// However, we can't do that mapping if the var is a dependently promoted struct field.
// Such a field must remain its exact size within its parent struct unless it is a single
// field *and* it is the only field in a struct of 16 bytes.
if (varDsc->lvSize() != 16)
{
return false;
}
if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl);
return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16);
}
return true;
}
#endif // defined(FEATURE_SIMD)
unsigned lvaGSSecurityCookie; // LclVar number
bool lvaTempsHaveLargerOffsetThanVars();
// Returns "true" iff local variable "lclNum" is in SSA form.
bool lvaInSsa(unsigned lclNum)
{
assert(lclNum < lvaCount);
return lvaTable[lclNum].lvInSsa;
}
unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX
#if defined(FEATURE_EH_FUNCLETS)
unsigned lvaPSPSym; // variable representing the PSPSym
#endif
InlineInfo* impInlineInfo; // Only present for inlinees
InlineStrategy* m_inlineStrategy;
InlineContext* compInlineContext; // Always present
// The Compiler* that is the root of the inlining tree of which "this" is a member.
Compiler* impInlineRoot();
#if defined(DEBUG) || defined(INLINE_DATA)
unsigned __int64 getInlineCycleCount()
{
return m_compCycles;
}
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method.
bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method.
//=========================================================================
// PROTECTED
//=========================================================================
protected:
//---------------- Local variable ref-counting ----------------------------
void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute);
bool IsDominatedByExceptionalEntry(BasicBlock* block);
void SetVolatileHint(LclVarDsc* varDsc);
// Keeps the mapping from SSA #'s to VN's for the implicit memory variables.
SsaDefArray<SsaMemDef> lvMemoryPerSsaData;
public:
// Returns the address of the per-Ssa data for memory at the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum)
{
return lvMemoryPerSsaData.GetSsaDef(ssaNum);
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Importer XX
XX XX
XX Imports the given method and converts it to semantic trees XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
private:
// For prefixFlags
enum
{
PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
PREFIX_TAILCALL_IMPLICIT =
0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
PREFIX_TAILCALL_STRESS =
0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress
PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS),
PREFIX_VOLATILE = 0x00001000,
PREFIX_UNALIGNED = 0x00010000,
PREFIX_CONSTRAINED = 0x00100000,
PREFIX_READONLY = 0x01000000
};
static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix);
static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp);
static bool impOpcodeIsCallOpcode(OPCODE opcode);
public:
void impInit();
void impImport();
CORINFO_CLASS_HANDLE impGetRefAnyClass();
CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle();
CORINFO_CLASS_HANDLE impGetTypeHandleClass();
CORINFO_CLASS_HANDLE impGetStringClass();
CORINFO_CLASS_HANDLE impGetObjectClass();
// Returns underlying type of handles returned by ldtoken instruction
var_types GetRuntimeHandleUnderlyingType()
{
// RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes
return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF;
}
void impDevirtualizeCall(GenTreeCall* call,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* contextHandle,
CORINFO_CONTEXT_HANDLE* exactContextHandle,
bool isLateDevirtualization,
bool isExplicitTailCall,
IL_OFFSET ilOffset = BAD_IL_OFFSET);
//=========================================================================
// PROTECTED
//=========================================================================
protected:
//-------------------- Stack manipulation ---------------------------------
unsigned impStkSize; // Size of the full stack
#define SMALL_STACK_SIZE 16 // number of elements in impSmallStack
struct SavedStack // used to save/restore stack contents.
{
unsigned ssDepth; // number of values on stack
StackEntry* ssTrees; // saved tree values
};
bool impIsPrimitive(CorInfoType type);
bool impILConsumesAddr(const BYTE* codeAddr);
void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind);
void impPushOnStack(GenTree* tree, typeInfo ti);
void impPushNullObjRefOnStack();
StackEntry impPopStack();
StackEntry& impStackTop(unsigned n = 0);
unsigned impStackHeight();
void impSaveStackState(SavedStack* savePtr, bool copy);
void impRestoreStackState(SavedStack* savePtr);
GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const BYTE* codeAddr,
const BYTE* codeEndp,
bool makeInlineObservation = false);
void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken);
void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
bool impCanPInvokeInline();
bool impCanPInvokeInlineCallSite(BasicBlock* block);
void impCheckForPInvokeCall(
GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block);
GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo());
void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig);
void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
var_types impImportCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a
// type parameter?
GenTree* newobjThis,
int prefixFlags,
CORINFO_CALL_INFO* callInfo,
IL_OFFSET rawILOffset);
CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle);
bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv);
GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd);
GenTree* impFixupStructReturnType(GenTree* op,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension unmgdCallConv);
#ifdef DEBUG
var_types impImportJitTestLabelMark(int numArgs);
#endif // DEBUG
GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken);
GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp);
GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp);
static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr);
GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp);
GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp);
void impImportLeave(BasicBlock* block);
void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr);
GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom);
GenTree* impIntrinsic(GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef,
bool readonlyCall,
bool tailCall,
CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken,
CORINFO_THIS_TRANSFORM constraintCallThisTransform,
NamedIntrinsic* pIntrinsicName,
bool* isSpecialIntrinsic = nullptr);
GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
var_types callType,
NamedIntrinsic intrinsicName,
bool tailCall);
NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method);
GenTree* impUnsupportedNamedIntrinsic(unsigned helper,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand);
#ifdef FEATURE_HW_INTRINSICS
GenTree* impHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand);
GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
GenTree* newobjThis);
protected:
bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa);
GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
var_types retType,
CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* newobjThis);
GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* getArgForHWIntrinsic(var_types argType,
CORINFO_CLASS_HANDLE argClass,
bool expectAddr = false,
GenTree* newobjThis = nullptr);
GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType);
GenTree* addRangeCheckIfNeeded(
NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound);
GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound);
#ifdef TARGET_XARCH
GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
#endif // TARGET_XARCH
#endif // FEATURE_HW_INTRINSICS
GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
int memberRef,
bool readonlyCall,
NamedIntrinsic intrinsicName);
GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig);
GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig);
GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive);
GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
GenTree* impTransformThis(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM transform);
//----------------- Manipulating the trees and stmts ----------------------
Statement* impStmtList; // Statements for the BB being imported.
Statement* impLastStmt; // The last statement for the current BB.
public:
enum
{
CHECK_SPILL_ALL = -1,
CHECK_SPILL_NONE = -2
};
void impBeginTreeList();
void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt);
void impEndTreeList(BasicBlock* block);
void impAppendStmtCheck(Statement* stmt, unsigned chkLevel);
void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true);
void impAppendStmt(Statement* stmt);
void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore);
Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true);
void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore);
void impAssignTempGen(unsigned tmp,
GenTree* val,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
void impAssignTempGen(unsigned tmpNum,
GenTree* val,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
Statement* impExtractLastStmt();
GenTree* impCloneExpr(GenTree* tree,
GenTree** clone,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt DEBUGARG(const char* reason));
GenTree* impAssignStruct(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* impAssignStructPtr(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref);
var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr);
GenTree* impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization = false);
GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup = nullptr,
bool mustRestoreHandle = false,
bool importParent = false);
GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup = nullptr,
bool mustRestoreHandle = false)
{
return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true);
}
GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags flags,
void* compileTimeHandle);
GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind);
GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle);
GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle);
GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CorInfoHelpFunc helper,
var_types type,
GenTreeCall::Use* args = nullptr,
CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr);
bool impIsCastHelperEligibleForClassProbe(GenTree* tree);
bool impIsCastHelperMayHaveProfileData(GenTree* tree);
GenTree* impCastClassOrIsInstToTree(
GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset);
GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass);
bool VarTypeIsMultiByteAndCanEnreg(var_types type,
CORINFO_CLASS_HANDLE typeClass,
unsigned* typeSize,
bool forReturn,
bool isVarArg,
CorInfoCallConvExtension callConv);
bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName);
bool IsTargetIntrinsic(NamedIntrinsic intrinsicName);
bool IsMathIntrinsic(NamedIntrinsic intrinsicName);
bool IsMathIntrinsic(GenTree* tree);
private:
//----------------- Importing the method ----------------------------------
CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens.
#ifdef DEBUG
unsigned impCurOpcOffs;
const char* impCurOpcName;
bool impNestedStackSpill;
// For displaying instrs with generated native code (-n:B)
Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset().
void impNoteLastILoffs();
#endif
// Debug info of current statement being imported. It gets set to contain
// no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been
// set in the appended trees. Then it gets updated at IL instructions for
// which we have to report mapping info.
// It will always contain the current inline context.
DebugInfo impCurStmtDI;
DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall);
void impCurStmtOffsSet(IL_OFFSET offs);
void impNoteBranchOffs();
unsigned impInitBlockLineInfo();
bool impIsThis(GenTree* obj);
bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
bool impIsAnySTLOC(OPCODE opcode)
{
return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) ||
((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3)));
}
GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr);
bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const;
GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0);
//---------------- Spilling the importer stack ----------------------------
// The maximum number of bytes of IL processed without clean stack state.
// It allows to limit the maximum tree size and depth.
static const unsigned MAX_TREE_SIZE = 200;
bool impCanSpillNow(OPCODE prevOpcode);
struct PendingDsc
{
PendingDsc* pdNext;
BasicBlock* pdBB;
SavedStack pdSavedStack;
ThisInitState pdThisPtrInit;
};
PendingDsc* impPendingList; // list of BBs currently waiting to be imported.
PendingDsc* impPendingFree; // Freed up dscs that can be reused
// We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation.
JitExpandArray<BYTE> impPendingBlockMembers;
// Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
BYTE impGetPendingBlockMember(BasicBlock* blk)
{
return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd());
}
// Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
void impSetPendingBlockMember(BasicBlock* blk, BYTE val)
{
impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val);
}
bool impCanReimport;
bool impSpillStackEntry(unsigned level,
unsigned varNum
#ifdef DEBUG
,
bool bAssertOnRecursion,
const char* reason
#endif
);
void impSpillStackEnsure(bool spillLeaves = false);
void impEvalSideEffects();
void impSpillSpecialSideEff();
void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason));
void impSpillValueClasses();
void impSpillEvalStack();
static fgWalkPreFn impFindValueClasses;
void impSpillLclRefs(ssize_t lclNum);
BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter);
bool impBlockIsInALoop(BasicBlock* block);
void impImportBlockCode(BasicBlock* block);
void impReimportMarkBlock(BasicBlock* block);
void impReimportMarkSuccessors(BasicBlock* block);
void impVerifyEHBlock(BasicBlock* block, bool isTryStart);
void impImportBlockPending(BasicBlock* block);
// Similar to impImportBlockPending, but assumes that block has already been imported once and is being
// reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState
// for the block, but instead, just re-uses the block's existing EntryState.
void impReimportBlockPending(BasicBlock* block);
var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2);
void impImportBlock(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values
// on the stack to local variables (the "spill temp" variables). The successor blocks will assume that
// its incoming stack contents are in those locals. This requires "block" and its successors to agree on
// the variables that will be used -- and for all the predecessors of those successors, and the
// successors of those predecessors, etc. Call such a set of blocks closed under alternating
// successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the
// clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill
// temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series
// of local variable numbers, so we represent them with the base local variable number), returns that.
// Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of
// which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps
// chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending
// on which kind of member of the clique the block is).
unsigned impGetSpillTmpBase(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We have previously
// assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks
// will assume that its incoming stack contents are in those locals. This requires "block" and its
// successors to agree on the variables and their types that will be used. The CLI spec allows implicit
// conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can
// push an int and another can push a native int. For 64-bit we have chosen to implement this by typing
// the "spill temp" as native int, and then importing (or re-importing as needed) so that all the
// predecessors in the "spill clique" push a native int (sign-extending if needed), and all the
// successors receive a native int. Similarly float and double are unified to double.
// This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark
// blocks for re-importation as appropriate (both successors, so they get the right incoming type, and
// predecessors, so they insert an upcast if needed).
void impReimportSpillClique(BasicBlock* block);
// When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic
// block, and represent the predecessor and successor members of the clique currently being computed.
// *** Access to these will need to be locked in a parallel compiler.
JitExpandArray<BYTE> impSpillCliquePredMembers;
JitExpandArray<BYTE> impSpillCliqueSuccMembers;
enum SpillCliqueDir
{
SpillCliquePred,
SpillCliqueSucc
};
// Abstract class for receiving a callback while walking a spill clique
class SpillCliqueWalker
{
public:
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0;
};
// This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique
class SetSpillTempsBase : public SpillCliqueWalker
{
unsigned m_baseTmp;
public:
SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp)
{
}
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This class is used for implementing impReimportSpillClique part on each block within the spill clique
class ReimportSpillClique : public SpillCliqueWalker
{
Compiler* m_pComp;
public:
ReimportSpillClique(Compiler* pComp) : m_pComp(pComp)
{
}
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each
// predecessor or successor within the spill clique
void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback);
// For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the
// incoming locals. This walks that list an resets the types of the GenTrees to match the types of
// the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique).
void impRetypeEntryStateTemps(BasicBlock* blk);
BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk);
void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val);
void impPushVar(GenTree* op, typeInfo tiRetVal);
GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset));
void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal);
void impLoadVar(unsigned lclNum, IL_OFFSET offset)
{
impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo);
}
void impLoadArg(unsigned ilArgNum, IL_OFFSET offset);
void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset);
bool impReturnInstruction(int prefixFlags, OPCODE& opcode);
#ifdef TARGET_ARM
void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass);
#endif
// A free list of linked list nodes used to represent to-do stacks of basic blocks.
struct BlockListNode
{
BasicBlock* m_blk;
BlockListNode* m_next;
BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next)
{
}
void* operator new(size_t sz, Compiler* comp);
};
BlockListNode* impBlockListNodeFreeList;
void FreeBlockListNode(BlockListNode* node);
bool impIsValueType(typeInfo* pTypeInfo);
var_types mangleVarArgsType(var_types type);
regNumber getCallArgIntRegister(regNumber floatReg);
regNumber getCallArgFloatRegister(regNumber intReg);
#if defined(DEBUG)
static unsigned jitTotalMethodCompiled;
#endif
#ifdef DEBUG
static LONG jitNestingLevel;
#endif // DEBUG
static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr);
void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult);
// STATIC inlining decision based on the IL code.
void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
CORINFO_METHOD_INFO* methInfo,
bool forceInline,
InlineResult* inlineResult);
void impCheckCanInline(GenTreeCall* call,
CORINFO_METHOD_HANDLE fncHandle,
unsigned methAttr,
CORINFO_CONTEXT_HANDLE exactContextHnd,
InlineCandidateInfo** ppInlineCandidateInfo,
InlineResult* inlineResult);
void impInlineRecordArgInfo(InlineInfo* pInlineInfo,
GenTree* curArgVal,
unsigned argNum,
InlineResult* inlineResult);
void impInlineInitVars(InlineInfo* pInlineInfo);
unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason));
GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo);
bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo);
bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree,
GenTreeCall::Use* additionalCallArgs,
GenTree* dereferencedAddress,
InlArgInfo* inlArgInfo);
void impMarkInlineCandidate(GenTree* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
void impMarkInlineCandidateHelper(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
bool impTailCallRetTypeCompatible(bool allowWidening,
var_types callerRetType,
CORINFO_CLASS_HANDLE callerRetTypeClass,
CorInfoCallConvExtension callerCallConv,
var_types calleeRetType,
CORINFO_CLASS_HANDLE calleeRetTypeClass,
CorInfoCallConvExtension calleeCallConv);
bool impIsTailCallILPattern(
bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive);
bool impIsImplicitTailCallCandidate(
OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive);
bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd);
bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array);
CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX FlowGraph XX
XX XX
XX Info about the basic-blocks, their contents and the flow analysis XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
BasicBlock* fgFirstBB; // Beginning of the basic block list
BasicBlock* fgLastBB; // End of the basic block list
BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section
BasicBlock* fgEntryBB; // For OSR, the original method's entry point
BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint)
#if defined(FEATURE_EH_FUNCLETS)
BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets)
#endif
BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been
// created.
BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks
unsigned fgEdgeCount; // # of control flow edges between the BBs
unsigned fgBBcount; // # of BBs in the method
#ifdef DEBUG
unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen
#endif
unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks
unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information
BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute
// dominance. Indexed by block number. Size: fgBBNumMax + 1.
// After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute
// dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and
// postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered
// starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely
// to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array
// index). The arrays are of size fgBBNumMax + 1.
unsigned* fgDomTreePreOrder;
unsigned* fgDomTreePostOrder;
// Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree
// in order to avoid the need for SSA reconstruction and an "out of SSA" phase).
DomTreeNode* fgSsaDomTree;
bool fgBBVarSetsInited;
// Allocate array like T* a = new T[fgBBNumMax + 1];
// Using helper so we don't keep forgetting +1.
template <typename T>
T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown)
{
return getAllocator(cmk).allocate<T>(fgBBNumMax + 1);
}
// BlockSets are relative to a specific set of BasicBlock numbers. If that changes
// (if the blocks are renumbered), this changes. BlockSets from different epochs
// cannot be meaningfully combined. Note that new blocks can be created with higher
// block numbers without changing the basic block epoch. These blocks *cannot*
// participate in a block set until the blocks are all renumbered, causing the epoch
// to change. This is useful if continuing to use previous block sets is valuable.
// If the epoch is zero, then it is uninitialized, and block sets can't be used.
unsigned fgCurBBEpoch;
unsigned GetCurBasicBlockEpoch()
{
return fgCurBBEpoch;
}
// The number of basic blocks in the current epoch. When the blocks are renumbered,
// this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains
// the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered.
unsigned fgCurBBEpochSize;
// The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize
// bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called.
unsigned fgBBSetCountInSizeTUnits;
void NewBasicBlockEpoch()
{
INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits);
// We have a new epoch. Compute and cache the size needed for new BlockSets.
fgCurBBEpoch++;
fgCurBBEpochSize = fgBBNumMax + 1;
fgBBSetCountInSizeTUnits =
roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8);
#ifdef DEBUG
// All BlockSet objects are now invalid!
fgReachabilitySetsValid = false; // the bbReach sets are now invalid!
fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid!
if (verbose)
{
unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t));
printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)",
fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long");
if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1)))
{
// If we're not just establishing the first epoch, and the epoch array size has changed such that we're
// going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an
// array of size_t bitsets), then print that out.
printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long");
}
printf("\n");
}
#endif // DEBUG
}
void EnsureBasicBlockEpoch()
{
if (fgCurBBEpochSize != fgBBNumMax + 1)
{
NewBasicBlockEpoch();
}
}
BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind);
void fgEnsureFirstBBisScratch();
bool fgFirstBBisScratch();
bool fgBBisScratch(BasicBlock* block);
void fgExtendEHRegionBefore(BasicBlock* block);
void fgExtendEHRegionAfter(BasicBlock* block);
BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
unsigned tryIndex,
unsigned hndIndex,
BasicBlock* nearBlk,
bool putInFilter = false,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
BasicBlock* srcBlk,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind);
BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind,
BasicBlock* afterBlk,
unsigned xcptnIndex,
bool putInTryRegion);
void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk);
void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk);
void fgUnlinkBlock(BasicBlock* block);
#ifdef FEATURE_JIT_METHOD_PERF
unsigned fgMeasureIR();
#endif // FEATURE_JIT_METHOD_PERF
bool fgModified; // True if the flow graph has been modified recently
bool fgComputePredsDone; // Have we computed the bbPreds list
bool fgCheapPredsValid; // Is the bbCheapPreds list valid?
bool fgDomsComputed; // Have we computed the dominator sets?
bool fgReturnBlocksComputed; // Have we computed the return blocks list?
bool fgOptimizedFinally; // Did we optimize any try-finallys?
bool fgHasSwitch; // any BBJ_SWITCH jumps?
BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler
// begin blocks.
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should
// never be removed due to a requirement to use the BBJ_ALWAYS for generating code and
// not have "retless" blocks.
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
#ifdef DEBUG
bool fgReachabilitySetsValid; // Are the bbReach sets valid?
bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid?
#endif // DEBUG
bool fgRemoveRestOfBlock; // true if we know that we will throw
bool fgStmtRemoved; // true if we remove statements -> need new DFA
// There are two modes for ordering of the trees.
// - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in
// each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order)
// by traversing the tree according to the order of the operands.
// - In FGOrderLinear, the dominant ordering is the linear order.
enum FlowGraphOrder
{
FGOrderTree,
FGOrderLinear
};
FlowGraphOrder fgOrder;
// The following are boolean flags that keep track of the state of internal data structures
bool fgStmtListThreaded; // true if the node list is now threaded
bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions
bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights
bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights
bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights
bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form
bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph
weight_t fgCalledCount; // count of the number of times this method was called
// This is derived from the profile data
// or is BB_UNITY_WEIGHT when we don't have profile data
#if defined(FEATURE_EH_FUNCLETS)
bool fgFuncletsCreated; // true if the funclet creation phase has been run
#endif // FEATURE_EH_FUNCLETS
bool fgGlobalMorph; // indicates if we are during the global morphing phase
// since fgMorphTree can be called from several places
bool impBoxTempInUse; // the temp below is valid and available
unsigned impBoxTemp; // a temporary that is used for boxing
#ifdef DEBUG
bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert,
// and we are trying to compile again in a "safer", minopts mode?
#endif
#if defined(DEBUG)
unsigned impInlinedCodeSize;
bool fgPrintInlinedMethods;
#endif
jitstd::vector<flowList*>* fgPredListSortVector;
//-------------------------------------------------------------------------
void fgInit();
PhaseStatus fgImport();
PhaseStatus fgTransformIndirectCalls();
PhaseStatus fgTransformPatchpoints();
PhaseStatus fgInline();
PhaseStatus fgRemoveEmptyTry();
PhaseStatus fgRemoveEmptyFinally();
PhaseStatus fgMergeFinallyChains();
PhaseStatus fgCloneFinally();
void fgCleanupContinuation(BasicBlock* continuation);
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
PhaseStatus fgUpdateFinallyTargetFlags();
void fgClearAllFinallyTargetBits();
void fgAddFinallyTargetFlags();
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
PhaseStatus fgTailMergeThrows();
void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
BasicBlock* nonCanonicalBlock,
BasicBlock* canonicalBlock,
flowList* predEdge);
void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock,
BasicBlock* nonCanonicalBlock,
BasicBlock* canonicalBlock,
flowList* predEdge);
GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType);
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
// Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals
// when this is necessary.
bool fgNeedToAddFinallyTargetBits;
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block,
BasicBlock* handler,
BlockToBlockMap& continuationMap);
GenTree* fgGetCritSectOfStaticMethod();
#if defined(FEATURE_EH_FUNCLETS)
void fgAddSyncMethodEnterExit();
GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter);
void fgConvertSyncReturnToLeave(BasicBlock* block);
#endif // FEATURE_EH_FUNCLETS
void fgAddReversePInvokeEnterExit();
bool fgMoreThanOneReturnBlock();
// The number of separate return points in the method.
unsigned fgReturnCount;
void fgAddInternal();
enum class FoldResult
{
FOLD_DID_NOTHING,
FOLD_CHANGED_CONTROL_FLOW,
FOLD_REMOVED_LAST_STMT,
FOLD_ALTERED_LAST_STMT,
};
FoldResult fgFoldConditional(BasicBlock* block);
void fgMorphStmts(BasicBlock* block);
void fgMorphBlocks();
void fgMergeBlockReturn(BasicBlock* block);
bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg));
void fgSetOptions();
#ifdef DEBUG
static fgWalkPreFn fgAssertNoQmark;
void fgPreExpandQmarkChecks(GenTree* expr);
void fgPostExpandQmarkChecks();
static void fgCheckQmarkAllowedForm(GenTree* tree);
#endif
IL_OFFSET fgFindBlockILOffset(BasicBlock* block);
void fgFixEntryFlowForOSR();
BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr);
BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr);
BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt);
BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR
BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ);
Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di);
Statement* fgNewStmtFromTree(GenTree* tree);
Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block);
Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di);
GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr);
void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt);
void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt);
void fgExpandQmarkNodes();
// Do "simple lowering." This functionality is (conceptually) part of "general"
// lowering that is distributed between fgMorph and the lowering phase of LSRA.
void fgSimpleLowering();
GenTree* fgInitThisClass();
GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper);
GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls);
bool backendRequiresLocalVarLifetimes()
{
return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars();
}
void fgLocalVarLiveness();
void fgLocalVarLivenessInit();
void fgPerNodeLocalVarLiveness(GenTree* node);
void fgPerBlockLocalVarLiveness();
VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block);
void fgLiveVarAnalysis(bool updateInternalOnly = false);
void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call);
void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node);
bool fgComputeLifeTrackedLocalDef(VARSET_TP& life,
VARSET_VALARG_TP keepAliveVars,
LclVarDsc& varDsc,
GenTreeLclVarCommon* node);
bool fgComputeLifeUntrackedLocal(VARSET_TP& life,
VARSET_VALARG_TP keepAliveVars,
LclVarDsc& varDsc,
GenTreeLclVarCommon* lclVarNode);
bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode);
void fgComputeLife(VARSET_TP& life,
GenTree* startNode,
GenTree* endNode,
VARSET_VALARG_TP volatileVars,
bool* pStmtInfoDirty DEBUGARG(bool* treeModf));
void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars);
bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange);
void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block);
bool fgRemoveDeadStore(GenTree** pTree,
LclVarDsc* varDsc,
VARSET_VALARG_TP life,
bool* doAgain,
bool* pStmtInfoDirty,
bool* pStoreRemoved DEBUGARG(bool* treeModf));
void fgInterBlockLocalVarLiveness();
// Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.:
// 1. for (BasicBlock* const block : compiler->Blocks()) ...
// 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ...
// 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ...
// In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3),
// both `startBlock` and `endBlock` must be non-null.
//
BasicBlockSimpleList Blocks() const
{
return BasicBlockSimpleList(fgFirstBB);
}
BasicBlockSimpleList Blocks(BasicBlock* startBlock) const
{
return BasicBlockSimpleList(startBlock);
}
BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const
{
return BasicBlockRangeList(startBlock, endBlock);
}
// The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name
// of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose
// whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us
// to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree.
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap;
NodeToUnsignedMap* m_opAsgnVarDefSsaNums;
NodeToUnsignedMap* GetOpAsgnVarDefSsaNums()
{
if (m_opAsgnVarDefSsaNums == nullptr)
{
m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator());
}
return m_opAsgnVarDefSsaNums;
}
// This map tracks nodes whose value numbers explicitly or implicitly depend on memory states.
// The map provides the entry block of the most closely enclosing loop that
// defines the memory region accessed when defining the nodes's VN.
//
// This information should be consulted when considering hoisting node out of a loop, as the VN
// for the node will only be valid within the indicated loop.
//
// It is not fine-grained enough to track memory dependence within loops, so cannot be used
// for more general code motion.
//
// If a node does not have an entry in the map we currently assume the VN is not memory dependent
// and so memory does not constrain hoisting.
//
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap;
NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap;
NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap()
{
if (m_nodeToLoopMemoryBlockMap == nullptr)
{
m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator());
}
return m_nodeToLoopMemoryBlockMap;
}
void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN);
void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree);
// Requires value numbering phase to have completed. Returns the value number ("gtVN") of the
// "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the
// "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's"
// VN.
inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree);
// Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl".
// Except: assumes that lcl is a def, and if it is
// a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def",
// rather than the "use" SSA number recorded in the tree "lcl".
inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl);
inline bool PreciseRefCountsRequired();
// Performs SSA conversion.
void fgSsaBuild();
// Reset any data structures to the state expected by "fgSsaBuild", so it can be run again.
void fgResetForSsa();
unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run.
// Returns "true" if this is a special variable that is never zero initialized in the prolog.
inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum);
// Returns "true" if the variable needs explicit zero initialization.
inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn);
// The value numbers for this compilation.
ValueNumStore* vnStore;
public:
ValueNumStore* GetValueNumStore()
{
return vnStore;
}
// Do value numbering (assign a value number to each
// tree node).
void fgValueNumber();
// Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN.
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// The 'indType' is the indirection type of the lhs of the assignment and will typically
// match the element type of the array or fldSeq. When this type doesn't match
// or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN]
//
ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
ValueNum arrVN,
ValueNum inxVN,
FieldSeqNode* fldSeq,
ValueNum rhsVN,
var_types indType);
// Requires that "tree" is a GT_IND marked as an array index, and that its address argument
// has been parsed to yield the other input arguments. If evaluation of the address
// can raise exceptions, those should be captured in the exception set "addrXvnp".
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique
// VN for the conservative VN.) Also marks the tree's argument as the address of an array element.
// The type tree->TypeGet() will typically match the element type of the array or fldSeq.
// When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN
//
ValueNum fgValueNumberArrIndexVal(GenTree* tree,
CORINFO_CLASS_HANDLE elemTypeEq,
ValueNum arrVN,
ValueNum inxVN,
ValueNumPair addrXvnp,
FieldSeqNode* fldSeq);
// Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown
// by evaluating the array index expression "tree". Returns the value number resulting from
// dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the
// "GT_IND" that does the dereference, and it is given the returned value number.
ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp);
// Compute the value number for a byref-exposed load of the given type via the given pointerVN.
ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN);
unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run.
// Utility functions for fgValueNumber.
// Perform value-numbering for the trees in "blk".
void fgValueNumberBlock(BasicBlock* blk);
// Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the
// innermost loop of which "entryBlock" is the entry. Returns the value number that should be
// assumed for the memoryKind at the start "entryBlk".
ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum);
// Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated.
// As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation.
void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg));
// Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be
// mutated.
void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg));
// For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap.
// As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store.
void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg));
// For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap.
void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg));
void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN);
// Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that
// value in that SSA #.
void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree);
// The input 'tree' is a leaf node that is a constant
// Assign the proper value number to the tree
void fgValueNumberTreeConst(GenTree* tree);
// If the VN store has been initialized, reassign the
// proper value number to the constant tree.
void fgUpdateConstTreeValueNumber(GenTree* tree);
// Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree.
// (With some exceptions: the VN of the lhs of an assignment is assigned as part of the
// assignment.)
void fgValueNumberTree(GenTree* tree);
void fgValueNumberAssignment(GenTreeOp* tree);
// Does value-numbering for a block assignment.
void fgValueNumberBlockAssignment(GenTree* tree);
bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src);
// Does value-numbering for a cast tree.
void fgValueNumberCastTree(GenTree* tree);
// Does value-numbering for an intrinsic tree.
void fgValueNumberIntrinsic(GenTree* tree);
#ifdef FEATURE_SIMD
// Does value-numbering for a GT_SIMD tree
void fgValueNumberSimd(GenTreeSIMD* tree);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
// Does value-numbering for a GT_HWINTRINSIC tree
void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree);
#endif // FEATURE_HW_INTRINSICS
// Does value-numbering for a call. We interpret some helper calls.
void fgValueNumberCall(GenTreeCall* call);
// Does value-numbering for a helper representing a cast operation.
void fgValueNumberCastHelper(GenTreeCall* call);
// Does value-numbering for a helper "call" that has a VN function symbol "vnf".
void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc);
// Requires "helpCall" to be a helper call. Assigns it a value number;
// we understand the semantics of some of the calls. Returns "true" if
// the call may modify the heap (we assume arbitrary memory side effects if so).
bool fgValueNumberHelperCall(GenTreeCall* helpCall);
// Requires that "helpFunc" is one of the pure Jit Helper methods.
// Returns the corresponding VNFunc to use for value numbering
VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc);
// Adds the exception set for the current tree node which has a memory indirection operation
void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr);
// Adds the exception sets for the current tree node which is performing a division or modulus operation
void fgValueNumberAddExceptionSetForDivision(GenTree* tree);
// Adds the exception set for the current tree node which is performing a overflow checking operation
void fgValueNumberAddExceptionSetForOverflow(GenTree* tree);
// Adds the exception set for the current tree node which is performing a bounds check operation
void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree);
// Adds the exception set for the current tree node which is performing a ckfinite operation
void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree);
// Adds the exception sets for the current tree node
void fgValueNumberAddExceptionSet(GenTree* tree);
#ifdef DEBUG
void fgDebugCheckExceptionSets();
void fgDebugCheckValueNumberedTree(GenTree* tree);
#endif
// These are the current value number for the memory implicit variables while
// doing value numbering. These are the value numbers under the "liberal" interpretation
// of memory values; the "conservative" interpretation needs no VN, since every access of
// memory yields an unknown value.
ValueNum fgCurMemoryVN[MemoryKindCount];
// Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT,
// requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit
// is 1, and the rest is an encoding of "elemTyp".
static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType)
{
if (elemStructType != nullptr)
{
assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF ||
varTypeIsIntegral(elemTyp));
assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid.
return elemStructType;
}
else
{
assert(elemTyp != TYP_STRUCT);
elemTyp = varTypeToSigned(elemTyp);
return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1);
}
}
// If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the
// var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is
// the struct type of the element).
static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd)
{
size_t clsHndVal = size_t(clsHnd);
if (clsHndVal & 0x1)
{
return var_types(clsHndVal >> 1);
}
else
{
return TYP_STRUCT;
}
}
// Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types
var_types getJitGCType(BYTE gcType);
// Returns true if the provided type should be treated as a primitive type
// for the unmanaged calling conventions.
bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd);
enum structPassingKind
{
SPK_Unknown, // Invalid value, never returned
SPK_PrimitiveType, // The struct is passed/returned using a primitive type.
SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that
// require a primitive type temp that is larger than the struct size.
// Currently used for structs of size 3, 5, 6, or 7 bytes.
SPK_ByValue, // The struct is passed/returned by value (using the ABI rules)
// for ARM64 and UNIX_X64 in multiple registers. (when all of the
// parameters registers are used, then the stack will be used)
// for X86 passed on the stack, for ARM32 passed in registers
// or the stack or split between registers and the stack.
SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers.
SPK_ByReference
}; // The struct is passed/returned by reference to a copy/buffer.
// Get the "primitive" type that is is used when we are given a struct of size 'structSize'.
// For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref.
// A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double
// If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned.
//
// isVarArg is passed for use on Windows Arm64 to change the decision returned regarding
// hfa types.
//
var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg);
// Get the type that is used to pass values of the given struct type.
// isVarArg is passed for use on Windows Arm64 to change the decision returned regarding
// hfa types.
//
var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
structPassingKind* wbPassStruct,
bool isVarArg,
unsigned structSize);
// Get the type that is used to return values of the given struct type.
// If the size is unknown, pass 0 and it will be determined from 'clsHnd'.
var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
CorInfoCallConvExtension callConv,
structPassingKind* wbPassStruct = nullptr,
unsigned structSize = 0);
#ifdef DEBUG
// Print a representation of "vnp" or "vn" on standard output.
// If "level" is non-zero, we also print out a partial expansion of the value.
void vnpPrint(ValueNumPair vnp, unsigned level);
void vnPrint(ValueNum vn, unsigned level);
#endif
bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2
// Dominator computation member functions
// Not exposed outside Compiler
protected:
bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2
// Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers.
void fgComputeDoms();
void fgCompDominatedByExceptionalEntryBlocks();
BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block.
// Note: this is relatively slow compared to calling fgDominate(),
// especially if dealing with a single block versus block check.
void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.)
void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks.
void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'.
bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets.
void fgComputeReachability(); // Perform flow graph node reachability analysis.
BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets.
void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be
// processed in topological sort, this function takes care of that.
void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count);
BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph.
// Returns this as a set.
INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds.
DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph
// (performed by fgComputeDoms), this procedure builds the dominance tree represented
// adjacency lists.
// In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder
// traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B)
// && postOrder(A) >= postOrder(B) making the computation O(1).
void fgNumberDomTree(DomTreeNode* domTree);
// When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets,
// dominators, and possibly loops.
void fgUpdateChangedFlowGraph(const bool computePreds = true,
const bool computeDoms = true,
const bool computeReturnBlocks = false,
const bool computeLoops = false);
public:
// Compute the predecessors of the blocks in the control flow graph.
void fgComputePreds();
// Remove all predecessor information.
void fgRemovePreds();
// Compute the cheap flow graph predecessors lists. This is used in some early phases
// before the full predecessors lists are computed.
void fgComputeCheapPreds();
private:
void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred);
void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred);
public:
enum GCPollType
{
GCPOLL_NONE,
GCPOLL_CALL,
GCPOLL_INLINE
};
// Initialize the per-block variable sets (used for liveness analysis).
void fgInitBlockVarSets();
PhaseStatus fgInsertGCPolls();
BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block);
// Requires that "block" is a block that returns from
// a finally. Returns the number of successors (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
unsigned fgNSuccsOfFinallyRet(BasicBlock* block);
// Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from
// a finally. Returns its "i"th successor (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
// Requires that "i" < fgNSuccsOfFinallyRet(block).
BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i);
private:
// Factor out common portions of the impls of the methods above.
void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres);
public:
// For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement,
// skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.)
// SwitchUniqueSuccSet contains the non-duplicated switch targets.
// (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget,
// which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already
// been computed for the switch block. If a switch block is deleted or is transformed into a non-switch,
// we leave the entry associated with the block, but it will no longer be accessed.)
struct SwitchUniqueSuccSet
{
unsigned numDistinctSuccs; // Number of distinct targets of the switch.
BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target
// successors.
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation.
void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
};
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap;
private:
// Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow
// iteration over only the distinct successors.
BlockToSwitchDescMap* m_switchDescMap;
public:
BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true)
{
if ((m_switchDescMap == nullptr) && createIfNull)
{
m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator());
}
return m_switchDescMap;
}
// Invalidate the map of unique switch block successors. For example, since the hash key of the map
// depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that
// we don't accidentally look up and return the wrong switch data.
void InvalidateUniqueSwitchSuccMap()
{
m_switchDescMap = nullptr;
}
// Requires "switchBlock" to be a block that ends in a switch. Returns
// the corresponding SwitchUniqueSuccSet.
SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk);
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member.
void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
// Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap.
void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk);
BasicBlock* fgFirstBlockOfHandler(BasicBlock* block);
bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block);
flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred);
flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred);
flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred);
flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred);
void fgRemoveBlockAsPred(BasicBlock* block);
void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock);
void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget);
void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget);
void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred);
flowList* fgAddRefPred(BasicBlock* block,
BasicBlock* blockPred,
flowList* oldEdge = nullptr,
bool initializingPreds = false); // Only set to 'true' when we are computing preds in
// fgComputePreds()
void fgFindBasicBlocks();
bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt);
bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion);
BasicBlock* fgFindInsertPoint(unsigned regionIndex,
bool putInTryRegion,
BasicBlock* startBlk,
BasicBlock* endBlk,
BasicBlock* nearBlk,
BasicBlock* jumpBlk,
bool runRarely);
unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr);
void fgPostImportationCleanup();
void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false));
void fgUnlinkStmt(BasicBlock* block, Statement* stmt);
bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt);
void fgCreateLoopPreHeader(unsigned lnum);
void fgUnreachableBlock(BasicBlock* block);
void fgRemoveConditionalJump(BasicBlock* block);
BasicBlock* fgLastBBInMainFunction();
BasicBlock* fgEndBBAfterMainFunction();
void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd);
void fgRemoveBlock(BasicBlock* block, bool unreachable);
bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext);
void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext);
void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext);
BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst);
bool fgRenumberBlocks();
bool fgExpandRarelyRunBlocks();
bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter);
void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk);
enum FG_RELOCATE_TYPE
{
FG_RELOCATE_TRY, // relocate the 'try' region
FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary)
};
BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType);
#if defined(FEATURE_EH_FUNCLETS)
#if defined(TARGET_ARM)
void fgClearFinallyTargetBit(BasicBlock* block);
#endif // defined(TARGET_ARM)
bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block);
bool fgAnyIntraHandlerPreds(BasicBlock* block);
void fgInsertFuncletPrologBlock(BasicBlock* block);
void fgCreateFuncletPrologBlocks();
void fgCreateFunclets();
#else // !FEATURE_EH_FUNCLETS
bool fgRelocateEHRegions();
#endif // !FEATURE_EH_FUNCLETS
bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target);
bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum);
bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum);
bool fgOptimizeEmptyBlock(BasicBlock* block);
bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest);
bool fgOptimizeBranch(BasicBlock* bJump);
bool fgOptimizeSwitchBranches(BasicBlock* block);
bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev);
bool fgOptimizeSwitchJumps();
#ifdef DEBUG
void fgPrintEdgeWeights();
#endif
void fgComputeBlockAndEdgeWeights();
weight_t fgComputeMissingBlockWeights();
void fgComputeCalledCount(weight_t returnWeight);
void fgComputeEdgeWeights();
bool fgReorderBlocks();
PhaseStatus fgDetermineFirstColdBlock();
bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr);
bool fgUpdateFlowGraph(bool doTailDup = false);
void fgFindOperOrder();
// method that returns if you should split here
typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data);
void fgSetBlockOrder();
void fgRemoveReturnBlock(BasicBlock* block);
/* Helper code that has been factored out */
inline void fgConvertBBToThrowBB(BasicBlock* block);
bool fgCastNeeded(GenTree* tree, var_types toType);
GenTree* fgDoNormalizeOnStore(GenTree* tree);
GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry);
// The following check for loops that don't execute calls
bool fgLoopCallMarked;
void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB);
void fgLoopCallMark();
void fgMarkLoopHead(BasicBlock* block);
unsigned fgGetCodeEstimate(BasicBlock* block);
#if DUMP_FLOWGRAPHS
enum class PhasePosition
{
PrePhase,
PostPhase
};
const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map);
static void fgDumpTree(FILE* fgxFile, GenTree* const tree);
FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type);
bool fgDumpFlowGraph(Phases phase, PhasePosition pos);
#endif // DUMP_FLOWGRAPHS
#ifdef DEBUG
void fgDispDoms();
void fgDispReach();
void fgDispBBLiveness(BasicBlock* block);
void fgDispBBLiveness();
void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0);
void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees);
void fgDispBasicBlocks(bool dumpTrees = false);
void fgDumpStmtTree(Statement* stmt, unsigned bbNum);
void fgDumpBlock(BasicBlock* block);
void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock);
static fgWalkPreFn fgStress64RsltMulCB;
void fgStress64RsltMul();
void fgDebugCheckUpdate();
void fgDebugCheckBBNumIncreasing();
void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true);
void fgDebugCheckBlockLinks();
void fgDebugCheckLinks(bool morphTrees = false);
void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees);
void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt);
void fgDebugCheckNodesUniqueness();
void fgDebugCheckLoopTable();
void fgDebugCheckFlags(GenTree* tree);
void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags);
void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags);
void fgDebugCheckTryFinallyExits();
void fgDebugCheckProfileData();
bool fgDebugCheckIncomingProfileData(BasicBlock* block);
bool fgDebugCheckOutgoingProfileData(BasicBlock* block);
#endif // DEBUG
static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2);
static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2);
static GenTree* fgGetFirstNode(GenTree* tree);
//--------------------- Walking the trees in the IR -----------------------
struct fgWalkData
{
Compiler* compiler;
fgWalkPreFn* wtprVisitorFn;
fgWalkPostFn* wtpoVisitorFn;
void* pCallbackData; // user-provided data
GenTree* parent; // parent of current node, provided to callback
GenTreeStack* parentStack; // stack of parent nodes, if asked for
bool wtprLclsOnly; // whether to only visit lclvar nodes
#ifdef DEBUG
bool printModified; // callback can use this
#endif
};
fgWalkResult fgWalkTreePre(GenTree** pTree,
fgWalkPreFn* visitor,
void* pCallBackData = nullptr,
bool lclVarsOnly = false,
bool computeStack = false);
fgWalkResult fgWalkTree(GenTree** pTree,
fgWalkPreFn* preVisitor,
fgWalkPostFn* postVisitor,
void* pCallBackData = nullptr);
void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData);
//----- Postorder
fgWalkResult fgWalkTreePost(GenTree** pTree,
fgWalkPostFn* visitor,
void* pCallBackData = nullptr,
bool computeStack = false);
// An fgWalkPreFn that looks for expressions that have inline throws in
// minopts mode. Basically it looks for tress with gtOverflowEx() or
// GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It
// returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags
// properly propagated to parent trees). It returns WALK_CONTINUE
// otherwise.
static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data);
static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data);
static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data);
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
friend class SsaBuilder;
friend struct ValueNumberState;
//--------------------- Detect the basic blocks ---------------------------
BasicBlock** fgBBs; // Table of pointers to the BBs
void fgInitBBLookup();
BasicBlock* fgLookupBB(unsigned addr);
bool fgCanSwitchToOptimized();
void fgSwitchToOptimized(const char* reason);
bool fgMayExplicitTailCall();
void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);
void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock);
void fgLinkBasicBlocks();
unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);
void fgCheckBasicBlockControlFlow();
void fgControlFlowPermitted(BasicBlock* blkSrc,
BasicBlock* blkDest,
bool IsLeave = false /* is the src a leave block */);
bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling);
void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining);
void fgAdjustForAddressExposedOrWrittenThis();
unsigned fgStressBBProf()
{
#ifdef DEBUG
unsigned result = JitConfig.JitStressBBProf();
if (result == 0)
{
if (compStressCompile(STRESS_BB_PROFILE, 15))
{
result = 1;
}
}
return result;
#else
return 0;
#endif
}
bool fgHaveProfileData();
bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight);
Instrumentor* fgCountInstrumentor;
Instrumentor* fgClassInstrumentor;
PhaseStatus fgPrepareToInstrumentMethod();
PhaseStatus fgInstrumentMethod();
PhaseStatus fgIncorporateProfileData();
void fgIncorporateBlockCounts();
void fgIncorporateEdgeCounts();
CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema,
UINT32 countSchemaItems,
BYTE* pInstrumentationData,
int32_t ilOffset,
CLRRandom* random);
public:
const char* fgPgoFailReason;
bool fgPgoDisabled;
ICorJitInfo::PgoSource fgPgoSource;
ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema;
BYTE* fgPgoData;
UINT32 fgPgoSchemaCount;
HRESULT fgPgoQueryResult;
UINT32 fgNumProfileRuns;
UINT32 fgPgoBlockCounts;
UINT32 fgPgoEdgeCounts;
UINT32 fgPgoClassProfiles;
unsigned fgPgoInlineePgo;
unsigned fgPgoInlineeNoPgo;
unsigned fgPgoInlineeNoPgoSingleBlock;
void WalkSpanningTree(SpanningTreeVisitor* visitor);
void fgSetProfileWeight(BasicBlock* block, weight_t weight);
void fgApplyProfileScale();
bool fgHaveSufficientProfileData();
bool fgHaveTrustedProfileData();
// fgIsUsingProfileWeights - returns true if we have real profile data for this method
// or if we have some fake profile data for the stress mode
bool fgIsUsingProfileWeights()
{
return (fgHaveProfileData() || fgStressBBProf());
}
// fgProfileRunsCount - returns total number of scenario runs for the profile data
// or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data.
unsigned fgProfileRunsCount()
{
return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED;
}
//-------- Insert a statement at the start or end of a basic block --------
#ifdef DEBUG
public:
static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true);
#endif
public:
Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt);
Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
private:
void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt);
void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt);
void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt);
public:
void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt);
private:
Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList);
// Create a new temporary variable to hold the result of *ppTree,
// and transform the graph accordingly.
GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr);
GenTree* fgMakeMultiUse(GenTree** ppTree);
private:
// Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node.
GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree);
bool fgOperIsBitwiseRotationRoot(genTreeOps oper);
#if !defined(TARGET_64BIT)
// Recognize and morph a long multiplication with 32 bit operands.
GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul);
GenTreeOp* fgMorphLongMul(GenTreeOp* mul);
#endif
//-------- Determine the order in which the trees will be evaluated -------
unsigned fgTreeSeqNum;
GenTree* fgTreeSeqLst;
GenTree* fgTreeSeqBeg;
GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false);
void fgSetTreeSeqHelper(GenTree* tree, bool isLIR);
void fgSetTreeSeqFinish(GenTree* tree, bool isLIR);
void fgSetStmtSeq(Statement* stmt);
void fgSetBlockOrder(BasicBlock* block);
//------------------------- Morphing --------------------------------------
unsigned fgPtrArgCntMax;
public:
//------------------------------------------------------------------------
// fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method
// can push on the stack. This value is calculated during morph.
//
// Return Value:
// Returns fgPtrArgCntMax, that is a private field.
//
unsigned fgGetPtrArgCntMax() const
{
return fgPtrArgCntMax;
}
//------------------------------------------------------------------------
// fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method
// can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations.
//
void fgSetPtrArgCntMax(unsigned argCntMax)
{
fgPtrArgCntMax = argCntMax;
}
bool compCanEncodePtrArgCntMax();
private:
hashBv* fgOutgoingArgTemps;
hashBv* fgCurrentlyInUseArgTemps;
void fgSetRngChkTarget(GenTree* tree, bool delay = true);
BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay);
#if REARRANGE_ADDS
void fgMoveOpsLeft(GenTree* tree);
#endif
bool fgIsCommaThrow(GenTree* tree, bool forFolding = false);
bool fgIsThrow(GenTree* tree);
bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2);
bool fgIsBlockCold(BasicBlock* block);
GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper);
GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true);
GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs);
// A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address,
// it is useful to know whether the address will be immediately dereferenced, or whether the address value will
// be used, perhaps by passing it as an argument to a called method. This affects how null checking is done:
// for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we
// know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that
// all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently
// small; hence the other fields of MorphAddrContext.
enum MorphAddrContextKind
{
MACK_Ind,
MACK_Addr,
};
struct MorphAddrContext
{
MorphAddrContextKind m_kind;
bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between
// top-level indirection and here have been constants.
size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true.
// In that case, is the sum of those constant offsets.
MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0)
{
}
};
// A MACK_CopyBlock context is immutable, so we can just make one of these and share it.
static MorphAddrContext s_CopyBlockMAC;
#ifdef FEATURE_SIMD
GenTree* getSIMDStructFromField(GenTree* tree,
CorInfoType* simdBaseJitTypeOut,
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic = false);
GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree);
GenTree* fgMorphFieldToSimdGetElement(GenTree* tree);
bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt);
void impMarkContiguousSIMDFieldAssignments(Statement* stmt);
// fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment
// in function: Complier::impMarkContiguousSIMDFieldAssignments.
Statement* fgPreviousCandidateSIMDFieldAsgStmt;
#endif // FEATURE_SIMD
GenTree* fgMorphArrayIndex(GenTree* tree);
GenTree* fgMorphExpandCast(GenTreeCast* tree);
GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl);
void fgInitArgInfo(GenTreeCall* call);
GenTreeCall* fgMorphArgs(GenTreeCall* call);
void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass);
GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph);
public:
bool fgAddrCouldBeNull(GenTree* addr);
private:
GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac);
bool fgCanFastTailCall(GenTreeCall* call, const char** failReason);
#if FEATURE_FASTTAILCALL
bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee);
#endif
bool fgCheckStmtAfterTailCall();
GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help);
bool fgCanTailCallViaJitHelper();
void fgMorphTailCallViaJitHelper(GenTreeCall* call);
GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall,
CORINFO_METHOD_HANDLE callTargetStubHnd,
CORINFO_METHOD_HANDLE dispatcherHnd);
GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle);
GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle);
GenTree* getVirtMethodPointerTree(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo);
GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent);
GenTree* fgMorphPotentialTailCall(GenTreeCall* call);
GenTree* fgGetStubAddrArg(GenTreeCall* call);
unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry);
void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall);
Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg,
fgArgTabEntry* argTabEntry,
unsigned lclParamNum,
BasicBlock* block,
const DebugInfo& callDI,
Statement* tmpAssignmentInsertionPoint,
Statement* paramAssignmentInsertionPoint);
GenTree* fgMorphCall(GenTreeCall* call);
GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call);
void fgMorphCallInline(GenTreeCall* call, InlineResult* result);
void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext);
#if DEBUG
void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call);
static fgWalkPreFn fgFindNonInlineCandidate;
#endif
GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE* ExactContextHnd,
CORINFO_RESOLVED_TOKEN* ldftnToken);
GenTree* fgMorphLeaf(GenTree* tree);
void fgAssignSetVarDef(GenTree* tree);
GenTree* fgMorphOneAsgBlockOp(GenTree* tree);
GenTree* fgMorphInitBlock(GenTree* tree);
GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize);
GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false);
GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd);
GenTree* fgMorphCopyBlock(GenTree* tree);
GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree);
GenTree* fgMorphForRegisterFP(GenTree* tree);
GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr);
GenTree* fgOptimizeCast(GenTreeCast* cast);
GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp);
GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp);
#ifdef FEATURE_HW_INTRINSICS
GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node);
#endif
GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree);
GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp);
GenTree* fgOptimizeAddition(GenTreeOp* add);
GenTree* fgOptimizeMultiply(GenTreeOp* mul);
GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp);
GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects);
GenTree* fgMorphRetInd(GenTreeUnOp* tree);
GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree);
GenTree* fgMorphSmpOpOptional(GenTreeOp* tree);
GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp);
GenTree* fgMorphConst(GenTree* tree);
bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2);
GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true);
GenTreeOp* fgMorphCommutative(GenTreeOp* tree);
GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree);
GenTree* fgMorphReduceAddOps(GenTree* tree);
public:
GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr);
private:
void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree));
void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree));
void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0));
Statement* fgMorphStmt;
unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be
// used when morphing big offset.
//----------------------- Liveness analysis -------------------------------
VARSET_TP fgCurUseSet; // vars used by block (before an assignment)
VARSET_TP fgCurDefSet; // vars assigned by block (before a use)
MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory.
MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory.
MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value.
bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points.
void fgMarkUseDef(GenTreeLclVarCommon* tree);
void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope);
void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope);
void fgExtendDbgScopes();
void fgExtendDbgLifetimes();
#ifdef DEBUG
void fgDispDebugScopes();
#endif // DEBUG
//-------------------------------------------------------------------------
//
// The following keeps track of any code we've added for things like array
// range checking or explicit calls to enable GC, and so on.
//
public:
struct AddCodeDsc
{
AddCodeDsc* acdNext;
BasicBlock* acdDstBlk; // block to which we jump
unsigned acdData;
SpecialCodeKind acdKind; // what kind of a special block is this?
#if !FEATURE_FIXED_OUT_ARGS
bool acdStkLvlInit; // has acdStkLvl value been already set?
unsigned acdStkLvl; // stack level in stack slots.
#endif // !FEATURE_FIXED_OUT_ARGS
};
private:
static unsigned acdHelper(SpecialCodeKind codeKind);
AddCodeDsc* fgAddCodeList;
bool fgAddCodeModf;
bool fgRngChkThrowAdded;
AddCodeDsc* fgExcptnTargetCache[SCK_COUNT];
BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind);
BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind);
public:
AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData);
bool fgUseThrowHelperBlocks();
AddCodeDsc* fgGetAdditionalCodeDescriptors()
{
return fgAddCodeList;
}
private:
bool fgIsCodeAdded();
bool fgIsThrowHlpBlk(BasicBlock* block);
#if !FEATURE_FIXED_OUT_ARGS
unsigned fgThrowHlpBlkStkLevel(BasicBlock* block);
#endif // !FEATURE_FIXED_OUT_ARGS
unsigned fgBigOffsetMorphingTemps[TYP_COUNT];
unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo);
void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext);
void fgInsertInlineeBlocks(InlineInfo* pInlineInfo);
Statement* fgInlinePrependStatements(InlineInfo* inlineInfo);
void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt);
#if FEATURE_MULTIREG_RET
GenTree* fgGetStructAsStructPtr(GenTree* tree);
GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd);
void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd);
#endif // FEATURE_MULTIREG_RET
static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder;
static fgWalkPostFn fgLateDevirtualization;
#ifdef DEBUG
static fgWalkPreFn fgDebugCheckInlineCandidates;
void CheckNoTransformableIndirectCallsRemain();
static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls;
#endif
void fgPromoteStructs();
void fgMorphStructField(GenTree* tree, GenTree* parent);
void fgMorphLocalField(GenTree* tree, GenTree* parent);
// Reset the refCount for implicit byrefs.
void fgResetImplicitByRefRefCount();
// Change implicit byrefs' types from struct to pointer, and for any that were
// promoted, create new promoted struct temps.
void fgRetypeImplicitByRefArgs();
// Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection).
bool fgMorphImplicitByRefArgs(GenTree* tree);
GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr);
// Clear up annotations for any struct promotion temps created for implicit byrefs.
void fgMarkDemotedImplicitByRefArgs();
void fgMarkAddressExposedLocals();
void fgMarkAddressExposedLocals(Statement* stmt);
PhaseStatus fgForwardSub();
bool fgForwardSubBlock(BasicBlock* block);
bool fgForwardSubStatement(Statement* statement);
static fgWalkPreFn fgUpdateSideEffectsPre;
static fgWalkPostFn fgUpdateSideEffectsPost;
// The given local variable, required to be a struct variable, is being assigned via
// a "lclField", to make it masquerade as an integral type in the ABI. Make sure that
// the variable is not enregistered, and is therefore not promoted independently.
void fgLclFldAssign(unsigned lclNum);
static fgWalkPreFn gtHasLocalsWithAddrOpCB;
enum TypeProducerKind
{
TPK_Unknown = 0, // May not be a RuntimeType
TPK_Handle = 1, // RuntimeType via handle
TPK_GetType = 2, // RuntimeType via Object.get_Type()
TPK_Null = 3, // Tree value is null
TPK_Other = 4 // RuntimeType via other means
};
TypeProducerKind gtGetTypeProducerKind(GenTree* tree);
bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call);
bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr);
bool gtIsActiveCSE_Candidate(GenTree* tree);
bool fgIsBigOffset(size_t offset);
bool fgNeedReturnSpillTemp();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Optimizer XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
void optInit();
GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt);
GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt);
void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt);
protected:
// Do hoisting for all loops.
void optHoistLoopCode();
// To represent sets of VN's that have already been hoisted in outer loops.
typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet;
struct LoopHoistContext
{
private:
// The set of variables hoisted in the current loop (or nullptr if there are none).
VNSet* m_pHoistedInCurLoop;
public:
// Value numbers of expressions that have been hoisted in parent loops in the loop nest.
VNSet m_hoistedInParentLoops;
// Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest.
// Previous decisions on loop-invariance of value numbers in the current loop.
VNSet m_curLoopVnInvariantCache;
VNSet* GetHoistedInCurLoop(Compiler* comp)
{
if (m_pHoistedInCurLoop == nullptr)
{
m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist());
}
return m_pHoistedInCurLoop;
}
VNSet* ExtractHoistedInCurLoop()
{
VNSet* res = m_pHoistedInCurLoop;
m_pHoistedInCurLoop = nullptr;
return res;
}
LoopHoistContext(Compiler* comp)
: m_pHoistedInCurLoop(nullptr)
, m_hoistedInParentLoops(comp->getAllocatorLoopHoist())
, m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist())
{
}
};
// Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it.
// Tracks the expressions that have been hoisted by containing loops by temporarily recording their
// value numbers in "m_hoistedInParentLoops". This set is not modified by the call.
void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt);
// Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.)
// Assumes that expressions have been hoisted in containing loops if their value numbers are in
// "m_hoistedInParentLoops".
//
void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt);
// Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable)
// outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted
// expressions to "hoistInLoop".
void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext);
// Return true if the tree looks profitable to hoist out of loop 'lnum'.
bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum);
// Performs the hoisting 'tree' into the PreHeader for loop 'lnum'
void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt);
// Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum".
// Constants and init values are always loop invariant.
// VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop.
bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs);
// If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop
// in the loop table.
bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum);
// Records the set of "side effects" of all loops: fields (object instance and static)
// written to, and SZ-array element type equivalence classes updated.
void optComputeLoopSideEffects();
#ifdef DEBUG
bool optAnyChildNotRemoved(unsigned loopNum);
#endif // DEBUG
// Mark a loop as removed.
void optMarkLoopRemoved(unsigned loopNum);
private:
// Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop,
// including all nested loops, and records the set of "side effects" of the loop: fields (object instance and
// static) written to, and SZ-array element type equivalence classes updated.
void optComputeLoopNestSideEffects(unsigned lnum);
// Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc'
void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc);
// Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part.
// Returns false if we encounter a block that is not marked as being inside a loop.
//
bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk);
// Hoist the expression "expr" out of loop "lnum".
void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum);
public:
void optOptimizeBools();
public:
PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom.
PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method
PhaseStatus optSetBlockWeights();
PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table
void optFindLoops();
PhaseStatus optCloneLoops();
void optCloneLoop(unsigned loopInd, LoopCloneContext* context);
void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight);
PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info)
void optRemoveRedundantZeroInits();
protected:
// This enumeration describes what is killed by a call.
enum callInterf
{
CALLINT_NONE, // no interference (most helpers)
CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ)
CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ)
CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT)
CALLINT_ALL, // kills everything (normal method call)
};
enum class FieldKindForVN
{
SimpleStatic,
WithBaseAddr
};
public:
// A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in
// bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered"
// in bbNext order; we use comparisons on the bbNum to decide order.)
// The blocks that define the body are
// top <= entry <= bottom
// The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a
// single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at
// Compiler::optFindNaturalLoops().
struct LoopDsc
{
BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor.
BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext
// order) reachable in this loop.
BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM)
BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP)
BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM)
callInterf lpAsgCall; // "callInterf" for calls in the loop
ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked)
varRefKinds lpAsgInds : 8; // set of inds modified within the loop
LoopFlags lpFlags;
unsigned char lpExitCnt; // number of exits from the loop
unsigned char lpParent; // The index of the most-nested loop that completely contains this one,
// or else BasicBlock::NOT_IN_LOOP if no such loop exists.
unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists.
// (Actually, an "immediately" nested loop --
// no other child of this loop is a parent of lpChild.)
unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent,
// or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop
// by following "lpChild" then "lpSibling" links.
bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary
// memory side effects. If this is set, the fields below
// may not be accurate (since they become irrelevant.)
VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop
VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop
// The following counts are used for hoisting profitability checks.
int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been
// hoisted
int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop
int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop
int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been
// hoisted
int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop
int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop
typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN>
FieldHandleSet;
FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified
// in the loop.
typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet;
ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that
// arrays of that type are modified
// in the loop.
// Adds the variable liveness information for 'blk' to 'this' LoopDsc
void AddVariableLiveness(Compiler* comp, BasicBlock* blk);
inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind);
// This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles
// (shifted left, with a low-order bit set to distinguish.)
// Use the {Encode/Decode}ElemType methods to construct/destruct these.
inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd);
/* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */
GenTree* lpIterTree; // The "i = i <op> const" tree
unsigned lpIterVar() const; // iterator variable #
int lpIterConst() const; // the constant with which the iterator is incremented
genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.)
void VERIFY_lpIterTree() const;
var_types lpIterOperType() const; // For overflow instructions
// Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops.
// Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block.
BasicBlock* lpInitBlock;
union {
int lpConstInit; // initial constant value of iterator
// : Valid if LPFLG_CONST_INIT
unsigned lpVarInit; // initial local var number to which we initialize the iterator
// : Valid if LPFLG_VAR_INIT
};
// The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var")
GenTree* lpTestTree; // pointer to the node containing the loop test
genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE,
// etc.)
void VERIFY_lpTestTree() const;
bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition
GenTree* lpIterator() const; // the iterator node in the loop test
GenTree* lpLimit() const; // the limit node in the loop test
// Limit constant value of iterator - loop condition is "i RELOP const"
// : Valid if LPFLG_CONST_LIMIT
int lpConstLimit() const;
// The lclVar # in the loop condition ( "i RELOP lclVar" )
// : Valid if LPFLG_VAR_LIMIT
unsigned lpVarLimit() const;
// The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" )
// : Valid if LPFLG_ARRLEN_LIMIT
bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const;
// Returns "true" iff this is a "top entry" loop.
bool lpIsTopEntry() const
{
if (lpHead->bbNext == lpEntry)
{
assert(lpHead->bbFallsThrough());
assert(lpTop == lpEntry);
return true;
}
else
{
return false;
}
}
// Returns "true" iff "*this" contains the blk.
bool lpContains(BasicBlock* blk) const
{
return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops
// to be equal, but requiring bottoms to be different.)
bool lpContains(BasicBlock* top, BasicBlock* bottom) const
{
return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring
// bottoms to be different.)
bool lpContains(const LoopDsc& lp2) const
{
return lpContains(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff "*this" is (properly) contained by the range [top, bottom]
// (allowing tops to be equal, but requiring bottoms to be different.)
bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const
{
return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum;
}
// Returns "true" iff "*this" is (properly) contained by "lp2"
// (allowing tops to be equal, but requiring bottoms to be different.)
bool lpContainedBy(const LoopDsc& lp2) const
{
return lpContainedBy(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff "*this" is disjoint from the range [top, bottom].
bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const
{
return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum;
}
// Returns "true" iff "*this" is disjoint from "lp2".
bool lpDisjoint(const LoopDsc& lp2) const
{
return lpDisjoint(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff the loop is well-formed (see code for defn).
bool lpWellFormed() const
{
return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum &&
(lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum);
}
#ifdef DEBUG
void lpValidatePreHeader() const
{
// If this is called, we expect there to be a pre-header.
assert(lpFlags & LPFLG_HAS_PREHEAD);
// The pre-header must unconditionally enter the loop.
assert(lpHead->GetUniqueSucc() == lpEntry);
// The loop block must be marked as a pre-header.
assert(lpHead->bbFlags & BBF_LOOP_PREHEADER);
// The loop entry must have a single non-loop predecessor, which is the pre-header.
// We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained()
// check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`.
}
#endif // DEBUG
// LoopBlocks: convenience method for enabling range-based `for` iteration over all the
// blocks in a loop, e.g.:
// for (BasicBlock* const block : loop->LoopBlocks()) ...
// Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order
// from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered
// to be part of the loop.
//
BasicBlockRangeList LoopBlocks() const
{
return BasicBlockRangeList(lpTop, lpBottom);
}
};
protected:
bool fgMightHaveLoop(); // returns true if there are any back edges
bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability
public:
LoopDsc* optLoopTable; // loop descriptor table
unsigned char optLoopCount; // number of tracked loops
unsigned char loopAlignCandidates; // number of loops identified for alignment
// Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or
// loop table pointers from the previous epoch are invalid.
// TODO: validate this in some way?
unsigned optCurLoopEpoch;
void NewLoopEpoch()
{
++optCurLoopEpoch;
JITDUMP("New loop epoch %d\n", optCurLoopEpoch);
}
#ifdef DEBUG
unsigned char loopsAligned; // number of loops actually aligned
#endif // DEBUG
bool optRecordLoop(BasicBlock* head,
BasicBlock* top,
BasicBlock* entry,
BasicBlock* bottom,
BasicBlock* exit,
unsigned char exitCnt);
void optClearLoopIterInfo();
#ifdef DEBUG
void optPrintLoopInfo(unsigned lnum, bool printVerbose = false);
void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false);
void optPrintLoopTable();
#endif
protected:
unsigned optCallCount; // number of calls made in the method
unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method
unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method
unsigned optLoopsCloned; // number of loops cloned in the current method.
#ifdef DEBUG
void optCheckPreds();
#endif
void optResetLoopInfo();
void optFindAndScaleGeneralLoopBlocks();
// Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads.
void optMarkLoopHeads();
void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false);
bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt);
unsigned optIsLoopIncrTree(GenTree* incr);
bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar);
bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar);
bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar);
bool optExtractInitTestIncr(
BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr);
void optFindNaturalLoops();
void optIdentifyLoopsForAlignment();
// Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' --
// each loop has a unique "top." Returns "true" iff the flowgraph has been modified.
bool optCanonicalizeLoopNest(unsigned char loopInd);
// Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top,"
// unshared with any other loop. Returns "true" iff the flowgraph has been modified
bool optCanonicalizeLoop(unsigned char loopInd);
// Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP".
// Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP".
// Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2".
// A loop contains itself.
bool optLoopContains(unsigned l1, unsigned l2) const;
// Updates the loop table by changing loop "loopInd", whose head is required
// to be "from", to be "to". Also performs this transformation for any
// loop nested in "loopInd" that shares the same head as "loopInd".
void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to);
void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false);
// Marks the containsCall information to "lnum" and any parent loops.
void AddContainsCallAllContainingLoops(unsigned lnum);
// Adds the variable liveness information from 'blk' to "lnum" and any parent loops.
void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk);
// Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops.
void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind);
// Adds "elemType" to the set of modified array element types of "lnum" and any parent loops.
void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType);
// Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone
// of "from".) Copies the jump destination from "from" to "to".
void optCopyBlkDest(BasicBlock* from, BasicBlock* to);
// Returns true if 'block' is an entry block for any loop in 'optLoopTable'
bool optIsLoopEntry(BasicBlock* block) const;
// The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level)
unsigned optLoopDepth(unsigned lnum)
{
assert(lnum < optLoopCount);
unsigned depth = 0;
while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP)
{
++depth;
}
return depth;
}
// Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score.
struct OptInvertCountTreeInfoType
{
int sharedStaticHelperCount;
int arrayLengthCount;
};
static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data);
bool optInvertWhileLoop(BasicBlock* block);
private:
static bool optIterSmallOverflow(int iterAtExit, var_types incrType);
static bool optIterSmallUnderflow(int iterAtExit, var_types decrType);
bool optComputeLoopRep(int constInit,
int constLimit,
int iterInc,
genTreeOps iterOper,
var_types iterType,
genTreeOps testOper,
bool unsignedTest,
bool dupCond,
unsigned* iterCount);
static fgWalkPreFn optIsVarAssgCB;
protected:
bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var);
bool optIsVarAssgLoop(unsigned lnum, unsigned var);
int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE);
bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit);
protected:
// The following is the upper limit on how many expressions we'll keep track
// of for the CSE analysis.
//
static const unsigned MAX_CSE_CNT = EXPSET_SZ;
static const int MIN_CSE_COST = 2;
// BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask.
// This BitVec uses one bit per CSE candidate
BitVecTraits* cseMaskTraits; // one bit per CSE candidate
// BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm.
// Two bits are allocated per CSE candidate to compute CSE availability
// plus an extra bit to handle the initial unvisited case.
// (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.)
//
// The two bits per CSE candidate have the following meanings:
// 11 - The CSE is available, and is also available when considering calls as killing availability.
// 10 - The CSE is available, but is not available when considering calls as killing availability.
// 00 - The CSE is not available
// 01 - An illegal combination
//
BitVecTraits* cseLivenessTraits;
//-----------------------------------------------------------------------------------------------------------------
// getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index.
// Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate
// CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from
// GET_CSE_INDEX().
//
static unsigned genCSEnum2bit(unsigned CSEnum)
{
assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT));
return CSEnum - 1;
}
//-----------------------------------------------------------------------------------------------------------------
// getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE.
//
static unsigned getCSEAvailBit(unsigned CSEnum)
{
return genCSEnum2bit(CSEnum) * 2;
}
//-----------------------------------------------------------------------------------------------------------------
// getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit
// for a CSE considering calls as killing availability bit (see description above).
//
static unsigned getCSEAvailCrossCallBit(unsigned CSEnum)
{
return getCSEAvailBit(CSEnum) + 1;
}
void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true);
EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites
/* Generic list of nodes - used by the CSE logic */
struct treeLst
{
treeLst* tlNext;
GenTree* tlTree;
};
struct treeStmtLst
{
treeStmtLst* tslNext;
GenTree* tslTree; // tree node
Statement* tslStmt; // statement containing the tree
BasicBlock* tslBlock; // block containing the statement
};
// The following logic keeps track of expressions via a simple hash table.
struct CSEdsc
{
CSEdsc* csdNextInBucket; // used by the hash table
size_t csdHashKey; // the orginal hashkey
ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def
ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar
// assignment
unsigned csdIndex; // 1..optCSECandidateCount
bool csdIsSharedConst; // true if this CSE is a shared const
bool csdLiveAcrossCall;
unsigned short csdDefCount; // definition count
unsigned short csdUseCount; // use count (excluding the implicit uses at defs)
weight_t csdDefWtCnt; // weighted def count
weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
GenTree* csdTree; // treenode containing the 1st occurrence
Statement* csdStmt; // stmt containing the 1st occurrence
BasicBlock* csdBlock; // block containing the 1st occurrence
treeStmtLst* csdTreeList; // list of matching tree nodes: head
treeStmtLst* csdTreeLast; // list of matching tree nodes: tail
// ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing
// and GT_IND nodes always have valid struct handle.
//
CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE
bool csdStructHndMismatch;
ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE.
// This will be set to NoVN if we decide to abandon this CSE
ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses.
ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value
// number, this will reflect it; otherwise, NoVN.
// not used for shared const CSE's
};
static const size_t s_optCSEhashSizeInitial;
static const size_t s_optCSEhashGrowthFactor;
static const size_t s_optCSEhashBucketSize;
size_t optCSEhashSize; // The current size of hashtable
size_t optCSEhashCount; // Number of entries in hashtable
size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize
CSEdsc** optCSEhash;
CSEdsc** optCSEtab;
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap;
NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be
// re-numbered with the bound to improve range check elimination
// Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found.
void optCseUpdateCheckedBoundMap(GenTree* compare);
void optCSEstop();
CSEdsc* optCSEfindDsc(unsigned index);
bool optUnmarkCSE(GenTree* tree);
// user defined callback data for the tree walk function optCSE_MaskHelper()
struct optCSE_MaskData
{
EXPSET_TP CSE_defMask;
EXPSET_TP CSE_useMask;
};
// Treewalk helper for optCSE_DefMask and optCSE_UseMask
static fgWalkPreFn optCSE_MaskHelper;
// This function walks all the node for an given tree
// and return the mask of CSE definitions and uses for the tree
//
void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData);
// Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2.
bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode);
struct optCSEcostCmpEx
{
bool operator()(const CSEdsc* op1, const CSEdsc* op2);
};
struct optCSEcostCmpSz
{
bool operator()(const CSEdsc* op1, const CSEdsc* op2);
};
void optCleanupCSEs();
#ifdef DEBUG
void optEnsureClearCSEInfo();
#endif // DEBUG
static bool Is_Shared_Const_CSE(size_t key)
{
return ((key & TARGET_SIGN_BIT) != 0);
}
// returns the encoded key
static size_t Encode_Shared_Const_CSE_Value(size_t key)
{
return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS);
}
// returns the orginal key
static size_t Decode_Shared_Const_CSE_Value(size_t enckey)
{
assert(Is_Shared_Const_CSE(enckey));
return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS;
}
/**************************************************************************
* Value Number based CSEs
*************************************************************************/
// String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX().
#define FMT_CSE "CSE #%02u"
public:
void optOptimizeValnumCSEs();
protected:
void optValnumCSE_Init();
unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt);
bool optValnumCSE_Locate();
void optValnumCSE_InitDataFlow();
void optValnumCSE_DataFlow();
void optValnumCSE_Availablity();
void optValnumCSE_Heuristic();
bool optDoCSE; // True when we have found a duplicate CSE tree
bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase
unsigned optCSECandidateCount; // Count of CSE's candidates
unsigned optCSEstart; // The first local variable number that is a CSE
unsigned optCSEcount; // The total count of CSE's introduced.
weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE
bool optIsCSEcandidate(GenTree* tree);
// lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler
//
bool lclNumIsTrueCSE(unsigned lclNum) const
{
return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount));
}
// lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop.
//
bool lclNumIsCSE(unsigned lclNum) const
{
return lvaGetDesc(lclNum)->lvIsCSE;
}
#ifdef DEBUG
bool optConfigDisableCSE();
bool optConfigDisableCSE2();
#endif
void optOptimizeCSEs();
struct isVarAssgDsc
{
GenTree* ivaSkip;
ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars.
#ifdef DEBUG
void* ivaSelf;
#endif
unsigned ivaVar; // Variable we are interested in, or -1
varRefKinds ivaMaskInd; // What kind of indirect assignments are there?
callInterf ivaMaskCall; // What kind of calls are there?
bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to.
};
static callInterf optCallInterf(GenTreeCall* call);
public:
// VN based copy propagation.
// In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for.
// While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor,
// for locals which will use "definitions from uses", it will not be, so we store it
// in this class instead.
class CopyPropSsaDef
{
LclSsaVarDsc* m_ssaDef;
#ifdef DEBUG
GenTree* m_defNode;
#endif
public:
CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode)
: m_ssaDef(ssaDef)
#ifdef DEBUG
, m_defNode(defNode)
#endif
{
}
LclSsaVarDsc* GetSsaDef() const
{
return m_ssaDef;
}
#ifdef DEBUG
GenTree* GetDefNode() const
{
return m_defNode;
}
#endif
};
typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack;
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap;
// Copy propagation functions.
void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName);
void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName);
void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName);
void optCopyPropPushDef(GenTreeOp* asg,
GenTreeLclVarCommon* lclNode,
unsigned lclNum,
LclNumToLiveDefsMap* curSsaName);
unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode);
int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2);
void optVnCopyProp();
INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName));
/**************************************************************************
* Early value propagation
*************************************************************************/
struct SSAName
{
unsigned m_lvNum;
unsigned m_ssaNum;
SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum)
{
}
static unsigned GetHashCode(SSAName ssaNm)
{
return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum);
}
static bool Equals(SSAName ssaNm1, SSAName ssaNm2)
{
return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum);
}
};
#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
#define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check.
#define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation.
#define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack.
#define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate
#define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary.
#define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints
#define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls
#define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT.
#define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints
#define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block
bool doesMethodHaveFatPointer()
{
return (optMethodFlags & OMF_HAS_FATPOINTER) != 0;
}
void setMethodHasFatPointer()
{
optMethodFlags |= OMF_HAS_FATPOINTER;
}
void clearMethodHasFatPointer()
{
optMethodFlags &= ~OMF_HAS_FATPOINTER;
}
void addFatPointerCandidate(GenTreeCall* call);
bool doesMethodHaveFrozenString() const
{
return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0;
}
void setMethodHasFrozenString()
{
optMethodFlags |= OMF_HAS_FROZEN_STRING;
}
bool doesMethodHaveGuardedDevirtualization() const
{
return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0;
}
void setMethodHasGuardedDevirtualization()
{
optMethodFlags |= OMF_HAS_GUARDEDDEVIRT;
}
void clearMethodHasGuardedDevirtualization()
{
optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT;
}
void considerGuardedDevirtualization(GenTreeCall* call,
IL_OFFSET ilOffset,
bool isInterface,
CORINFO_METHOD_HANDLE baseMethod,
CORINFO_CLASS_HANDLE baseClass,
CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass)
DEBUGARG(const char* objClassName));
void addGuardedDevirtualizationCandidate(GenTreeCall* call,
CORINFO_METHOD_HANDLE methodHandle,
CORINFO_CLASS_HANDLE classHandle,
unsigned methodAttr,
unsigned classAttr,
unsigned likelihood);
bool doesMethodHaveExpRuntimeLookup()
{
return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0;
}
void setMethodHasExpRuntimeLookup()
{
optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP;
}
void clearMethodHasExpRuntimeLookup()
{
optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP;
}
void addExpRuntimeLookupCandidate(GenTreeCall* call);
bool doesMethodHavePatchpoints()
{
return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0;
}
void setMethodHasPatchpoint()
{
optMethodFlags |= OMF_HAS_PATCHPOINT;
}
bool doesMethodHavePartialCompilationPatchpoints()
{
return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0;
}
void setMethodHasPartialCompilationPatchpoint()
{
optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT;
}
unsigned optMethodFlags;
bool doesMethodHaveNoReturnCalls()
{
return optNoReturnCallCount > 0;
}
void setMethodHasNoReturnCalls()
{
optNoReturnCallCount++;
}
unsigned optNoReturnCallCount;
// Recursion bound controls how far we can go backwards tracking for a SSA value.
// No throughput diff was found with backward walk bound between 3-8.
static const int optEarlyPropRecurBound = 5;
enum class optPropKind
{
OPK_INVALID,
OPK_ARRAYLEN,
OPK_NULLCHECK
};
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap;
GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block));
GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth);
GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind);
GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
bool optDoEarlyPropForBlock(BasicBlock* block);
bool optDoEarlyPropForFunc();
void optEarlyProp();
void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
bool optIsNullCheckFoldingLegal(GenTree* tree,
GenTree* nullCheckTree,
GenTree** nullCheckParent,
Statement** nullCheckStmt);
bool optCanMoveNullCheckPastTree(GenTree* tree,
unsigned nullCheckLclNum,
bool isInsideTry,
bool checkSideEffectSummary);
#if DEBUG
void optCheckFlagsAreSet(unsigned methodFlag,
const char* methodFlagStr,
unsigned bbFlag,
const char* bbFlagStr,
GenTree* tree,
BasicBlock* basicBlock);
#endif
// Redundant branch opts
//
PhaseStatus optRedundantBranches();
bool optRedundantRelop(BasicBlock* const block);
bool optRedundantBranch(BasicBlock* const block);
bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop);
bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock);
/**************************************************************************
* Value/Assertion propagation
*************************************************************************/
public:
// Data structures for assertion prop
BitVecTraits* apTraits;
ASSERT_TP apFull;
enum optAssertionKind
{
OAK_INVALID,
OAK_EQUAL,
OAK_NOT_EQUAL,
OAK_SUBRANGE,
OAK_NO_THROW,
OAK_COUNT
};
enum optOp1Kind
{
O1K_INVALID,
O1K_LCLVAR,
O1K_ARR_BND,
O1K_BOUND_OPER_BND,
O1K_BOUND_LOOP_BND,
O1K_CONSTANT_LOOP_BND,
O1K_CONSTANT_LOOP_BND_UN,
O1K_EXACT_TYPE,
O1K_SUBTYPE,
O1K_VALUE_NUMBER,
O1K_COUNT
};
enum optOp2Kind
{
O2K_INVALID,
O2K_LCLVAR_COPY,
O2K_IND_CNS_INT,
O2K_CONST_INT,
O2K_CONST_LONG,
O2K_CONST_DOUBLE,
O2K_ZEROOBJ,
O2K_SUBRANGE,
O2K_COUNT
};
struct AssertionDsc
{
optAssertionKind assertionKind;
struct SsaVar
{
unsigned lclNum; // assigned to or property of this local var number
unsigned ssaNum;
};
struct ArrBnd
{
ValueNum vnIdx;
ValueNum vnLen;
};
struct AssertionDscOp1
{
optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype
ValueNum vn;
union {
SsaVar lcl;
ArrBnd bnd;
};
} op1;
struct AssertionDscOp2
{
optOp2Kind kind; // a const or copy assignment
ValueNum vn;
struct IntVal
{
ssize_t iconVal; // integer
#if !defined(HOST_64BIT)
unsigned padding; // unused; ensures iconFlags does not overlap lconVal
#endif
GenTreeFlags iconFlags; // gtFlags
};
union {
struct
{
SsaVar lcl;
FieldSeqNode* zeroOffsetFieldSeq;
};
IntVal u1;
__int64 lconVal;
double dconVal;
IntegralRange u2;
};
} op2;
bool IsCheckedBoundArithBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND);
}
bool IsCheckedBoundBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND);
}
bool IsConstantBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
(op1.kind == O1K_CONSTANT_LOOP_BND));
}
bool IsConstantBoundUnsigned()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
(op1.kind == O1K_CONSTANT_LOOP_BND_UN));
}
bool IsBoundsCheckNoThrow()
{
return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND));
}
bool IsCopyAssertion()
{
return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY));
}
bool IsConstantInt32Assertion()
{
return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT);
}
static bool SameKind(AssertionDsc* a1, AssertionDsc* a2)
{
return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind &&
a1->op2.kind == a2->op2.kind;
}
static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2)
{
if (kind == OAK_EQUAL)
{
return kind2 == OAK_NOT_EQUAL;
}
else if (kind == OAK_NOT_EQUAL)
{
return kind2 == OAK_EQUAL;
}
return false;
}
bool HasSameOp1(AssertionDsc* that, bool vnBased)
{
if (op1.kind != that->op1.kind)
{
return false;
}
else if (op1.kind == O1K_ARR_BND)
{
assert(vnBased);
return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen);
}
else
{
return ((vnBased && (op1.vn == that->op1.vn)) ||
(!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum)));
}
}
bool HasSameOp2(AssertionDsc* that, bool vnBased)
{
if (op2.kind != that->op2.kind)
{
return false;
}
switch (op2.kind)
{
case O2K_IND_CNS_INT:
case O2K_CONST_INT:
return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags));
case O2K_CONST_LONG:
return (op2.lconVal == that->op2.lconVal);
case O2K_CONST_DOUBLE:
// exact match because of positive and negative zero.
return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0);
case O2K_ZEROOBJ:
return true;
case O2K_LCLVAR_COPY:
return (op2.lcl.lclNum == that->op2.lcl.lclNum) &&
(!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) &&
(op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq);
case O2K_SUBRANGE:
return op2.u2.Equals(that->op2.u2);
case O2K_INVALID:
// we will return false
break;
default:
assert(!"Unexpected value for op2.kind in AssertionDsc.");
break;
}
return false;
}
bool Complementary(AssertionDsc* that, bool vnBased)
{
return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) &&
HasSameOp2(that, vnBased);
}
bool Equals(AssertionDsc* that, bool vnBased)
{
if (assertionKind != that->assertionKind)
{
return false;
}
else if (assertionKind == OAK_NO_THROW)
{
assert(op2.kind == O2K_INVALID);
return HasSameOp1(that, vnBased);
}
else
{
return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased);
}
}
};
protected:
static fgWalkPreFn optAddCopiesCallback;
static fgWalkPreFn optVNAssertionPropCurStmtVisitor;
unsigned optAddCopyLclNum;
GenTree* optAddCopyAsgnNode;
bool optLocalAssertionProp; // indicates that we are performing local assertion prop
bool optAssertionPropagated; // set to true if we modified the trees
bool optAssertionPropagatedCurrentStmt;
#ifdef DEBUG
GenTree* optAssertionPropCurrentTree;
#endif
AssertionIndex* optComplementaryAssertionMap;
JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions
// using the value of a local var) for each local var
AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments
AssertionIndex optAssertionCount; // total number of assertions in the assertion table
AssertionIndex optMaxAssertionCount;
public:
void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree);
fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree);
GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test);
GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree);
GenTree* optExtractSideEffListFromConst(GenTree* tree);
AssertionIndex GetAssertionCount()
{
return optAssertionCount;
}
ASSERT_TP* bbJtrueAssertionOut;
typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap;
ValueNumToAssertsMap* optValueNumToAsserts;
// Assertion prop helpers.
ASSERT_TP& GetAssertionDep(unsigned lclNum);
AssertionDsc* optGetAssertion(AssertionIndex assertIndex);
void optAssertionInit(bool isLocalProp);
void optAssertionTraitsInit(AssertionIndex assertionCount);
void optAssertionReset(AssertionIndex limit);
void optAssertionRemove(AssertionIndex index);
// Assertion prop data flow functions.
void optAssertionPropMain();
Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt);
bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags);
ASSERT_TP* optInitAssertionDataflowFlags();
ASSERT_TP* optComputeAssertionGen();
// Assertion Gen functions.
void optAssertionGen(GenTree* tree);
AssertionIndex optAssertionGenCast(GenTreeCast* cast);
AssertionIndex optAssertionGenPhiDefn(GenTree* tree);
AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree);
AssertionInfo optAssertionGenJtrue(GenTree* tree);
AssertionIndex optCreateJtrueAssertions(GenTree* op1,
GenTree* op2,
Compiler::optAssertionKind assertionKind,
bool helperCallArgs = false);
AssertionIndex optFindComplementary(AssertionIndex assertionIndex);
void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index);
// Assertion creation functions.
AssertionIndex optCreateAssertion(GenTree* op1,
GenTree* op2,
optAssertionKind assertionKind,
bool helperCallArgs = false);
AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion);
bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange);
void optCreateComplementaryAssertion(AssertionIndex assertionIndex,
GenTree* op1,
GenTree* op2,
bool helperCallArgs = false);
bool optAssertionVnInvolvesNan(AssertionDsc* assertion);
AssertionIndex optAddAssertion(AssertionDsc* assertion);
void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index);
#ifdef DEBUG
void optPrintVnAssertionMapping();
#endif
ASSERT_TP optGetVnMappedAssertions(ValueNum vn);
// Used for respective assertion propagations.
AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased));
bool optAssertionIsNonNull(GenTree* op,
ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex));
AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2);
AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1);
AssertionIndex optLocalAssertionIsEqualOrNotEqual(
optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions);
// Assertion prop for lcl var functions.
bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc);
GenTree* optCopyAssertionProp(AssertionDsc* curAssertion,
GenTreeLclVarCommon* tree,
Statement* stmt DEBUGARG(AssertionIndex index));
GenTree* optConstantAssertionProp(AssertionDsc* curAssertion,
GenTreeLclVarCommon* tree,
Statement* stmt DEBUGARG(AssertionIndex index));
bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions);
// Assertion propagation functions.
GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block);
GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt);
GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt);
GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt);
GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt);
GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt);
GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt);
GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call);
// Implied assertion functions.
void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions);
void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions);
void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result);
void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result);
#ifdef DEBUG
void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0);
void optPrintAssertionIndex(AssertionIndex index);
void optPrintAssertionIndices(ASSERT_TP assertions);
void optDebugCheckAssertion(AssertionDsc* assertion);
void optDebugCheckAssertions(AssertionIndex AssertionIndex);
#endif
static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr);
static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr);
void optAddCopies();
/**************************************************************************
* Range checks
*************************************************************************/
public:
struct LoopCloneVisitorInfo
{
LoopCloneContext* context;
unsigned loopNum;
Statement* stmt;
LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt)
: context(context), loopNum(loopNum), stmt(nullptr)
{
}
};
bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum);
bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum);
bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum);
bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context);
static fgWalkPreFn optCanOptimizeByLoopCloningVisitor;
fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info);
bool optObtainLoopCloningOpts(LoopCloneContext* context);
bool optIsLoopClonable(unsigned loopInd);
bool optLoopCloningEnabled();
#ifdef DEBUG
void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore);
#endif
void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath));
bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context);
bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context);
BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context,
unsigned loopNum,
BasicBlock* slowHead,
BasicBlock* insertAfter);
protected:
ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk));
bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB);
protected:
bool optLoopsMarked;
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX RegAlloc XX
XX XX
XX Does the register allocation and puts the remaining lclVars on the stack XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc);
void raMarkStkVars();
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#if defined(TARGET_AMD64)
static bool varTypeNeedsPartialCalleeSave(var_types type)
{
assert(type != TYP_STRUCT);
return (type == TYP_SIMD32);
}
#elif defined(TARGET_ARM64)
static bool varTypeNeedsPartialCalleeSave(var_types type)
{
assert(type != TYP_STRUCT);
// ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes
// For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes.
return ((type == TYP_SIMD16) || (type == TYP_SIMD12));
}
#else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64)
#error("Unknown target architecture for FEATURE_SIMD")
#endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64)
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
protected:
// Some things are used by both LSRA and regpredict allocators.
FrameType rpFrameType;
bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once
bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason));
private:
Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering.
LinearScanInterface* m_pLinearScan; // Linear Scan allocator
/* raIsVarargsStackArg is called by raMaskStkVars and by
lvaComputeRefCounts. It identifies the special case
where a varargs function has a parameter passed on the
stack, other than the special varargs handle. Such parameters
require special treatment, because they cannot be tracked
by the GC (their offsets in the stack are not known
at compile time).
*/
bool raIsVarargsStackArg(unsigned lclNum)
{
#ifdef TARGET_X86
LclVarDsc* varDsc = lvaGetDesc(lclNum);
assert(varDsc->lvIsParam);
return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg));
#else // TARGET_X86
return false;
#endif // TARGET_X86
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX EEInterface XX
XX XX
XX Get to the class and method info from the Execution Engine given XX
XX tokens for the class and method XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// Get handles
void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedToken,
CORINFO_CALLINFO_FLAGS flags,
CORINFO_CALL_INFO* pResult);
void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS flags,
CORINFO_FIELD_INFO* pResult);
// Get the flags
bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd);
bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn);
bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd);
var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr);
#if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS)
const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className);
const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd);
unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle);
bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method);
CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method);
#endif
var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned);
CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list);
CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context);
unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
static unsigned eeGetArgAlignment(var_types type, bool isFloatHfa);
// VOM info, method sigs
void eeGetSig(unsigned sigTok,
CORINFO_MODULE_HANDLE scope,
CORINFO_CONTEXT_HANDLE context,
CORINFO_SIG_INFO* retSig);
void eeGetCallSiteSig(unsigned sigTok,
CORINFO_MODULE_HANDLE scope,
CORINFO_CONTEXT_HANDLE context,
CORINFO_SIG_INFO* retSig);
void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr);
// Method entry-points, instrs
CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method);
CORINFO_EE_INFO eeInfo;
bool eeInfoInitialized;
CORINFO_EE_INFO* eeGetEEInfo();
// Gets the offset of a SDArray's first element
static unsigned eeGetArrayDataOffset();
// Get the offset of a MDArray's first element
static unsigned eeGetMDArrayDataOffset(unsigned rank);
// Get the offset of a MDArray's dimension length for a given dimension.
static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension);
// Get the offset of a MDArray's lower bound for a given dimension.
static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension);
GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig);
// Returns the page size for the target machine as reported by the EE.
target_size_t eeGetPageSize()
{
return (target_size_t)eeGetEEInfo()->osPageSize;
}
//------------------------------------------------------------------------
// VirtualStubParam: virtual stub dispatch extra parameter (slot address).
//
// It represents Abi and target specific registers for the parameter.
//
class VirtualStubParamInfo
{
public:
VirtualStubParamInfo(bool isCoreRTABI)
{
#if defined(TARGET_X86)
reg = REG_EAX;
regMask = RBM_EAX;
#elif defined(TARGET_AMD64)
if (isCoreRTABI)
{
reg = REG_R10;
regMask = RBM_R10;
}
else
{
reg = REG_R11;
regMask = RBM_R11;
}
#elif defined(TARGET_ARM)
if (isCoreRTABI)
{
reg = REG_R12;
regMask = RBM_R12;
}
else
{
reg = REG_R4;
regMask = RBM_R4;
}
#elif defined(TARGET_ARM64)
reg = REG_R11;
regMask = RBM_R11;
#else
#error Unsupported or unset target architecture
#endif
}
regNumber GetReg() const
{
return reg;
}
_regMask_enum GetRegMask() const
{
return regMask;
}
private:
regNumber reg;
_regMask_enum regMask;
};
VirtualStubParamInfo* virtualStubParamInfo;
bool IsTargetAbi(CORINFO_RUNTIME_ABI abi)
{
return eeGetEEInfo()->targetAbi == abi;
}
bool generateCFIUnwindCodes()
{
#if defined(FEATURE_CFI_SUPPORT)
return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI);
#else
return false;
#endif
}
// Debugging support - Line number info
void eeGetStmtOffsets();
unsigned eeBoundariesCount;
ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE
void eeSetLIcount(unsigned count);
void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc);
void eeSetLIdone();
#ifdef DEBUG
static void eeDispILOffs(IL_OFFSET offs);
static void eeDispSourceMappingOffs(uint32_t offs);
static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line);
void eeDispLineInfos();
#endif // DEBUG
// Debugging support - Local var info
void eeGetVars();
unsigned eeVarsCount;
struct VarResultInfo
{
UNATIVE_OFFSET startOffset;
UNATIVE_OFFSET endOffset;
DWORD varNumber;
CodeGenInterface::siVarLoc loc;
} * eeVars;
void eeSetLVcount(unsigned count);
void eeSetLVinfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
const CodeGenInterface::siVarLoc& loc);
void eeSetLVdone();
#ifdef DEBUG
void eeDispVar(ICorDebugInfo::NativeVarInfo* var);
void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars);
#endif // DEBUG
// ICorJitInfo wrappers
void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize);
void eeAllocUnwindInfo(BYTE* pHotCode,
BYTE* pColdCode,
ULONG startOffset,
ULONG endOffset,
ULONG unwindSize,
BYTE* pUnwindBlock,
CorJitFuncKind funcKind);
void eeSetEHcount(unsigned cEH);
void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause);
WORD eeGetRelocTypeHint(void* target);
// ICorStaticInfo wrapper functions
bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken);
#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
static void dumpSystemVClassificationType(SystemVClassificationType ct);
#endif // DEBUG
void eeGetSystemVAmd64PassStructInRegisterDescriptor(
/*IN*/ CORINFO_CLASS_HANDLE structHnd,
/*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr);
#endif // UNIX_AMD64_ABI
template <typename ParamType>
bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
}
bool eeRunWithErrorTrapImp(void (*function)(void*), void* param);
template <typename ParamType>
bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
}
bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param);
// Utility functions
const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr);
#if defined(DEBUG)
const WCHAR* eeGetCPString(size_t stringHandle);
#endif
const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd);
static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper);
static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method);
static bool IsSharedStaticHelper(GenTree* tree);
static bool IsGcSafePoint(GenTreeCall* call);
static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs);
// returns true/false if 'field' is a Jit Data offset
static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field);
// returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB)
static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field);
/*****************************************************************************/
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX CodeGenerator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
CodeGenInterface* codeGen;
// Record the instr offset mapping to the generated code
jitstd::list<IPmappingDsc> genIPmappings;
#ifdef DEBUG
jitstd::list<PreciseIPMapping> genPreciseIPmappings;
#endif
// Managed RetVal - A side hash table meant to record the mapping from a
// GT_CALL node to its debug info. This info is used to emit sequence points
// that can be used by debugger to determine the native offset at which the
// managed RetVal will be available.
//
// In fact we can store debug info in a GT_CALL node. This was ruled out in
// favor of a side table for two reasons: 1) We need debug info for only those
// GT_CALL nodes (created during importation) that correspond to an IL call and
// whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used
// structure and IL offset is needed only when generating debuggable code. Therefore
// it is desirable to avoid memory size penalty in retail scenarios.
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable;
CallSiteDebugInfoTable* genCallSite2DebugInfoMap;
unsigned genReturnLocal; // Local number for the return value when applicable.
BasicBlock* genReturnBB; // jumped to when not optimizing for speed.
// The following properties are part of CodeGenContext. Getters are provided here for
// convenience and backward compatibility, but the properties can only be set by invoking
// the setter on CodeGenContext directly.
emitter* GetEmitter() const
{
return codeGen->GetEmitter();
}
bool isFramePointerUsed() const
{
return codeGen->isFramePointerUsed();
}
bool GetInterruptible()
{
return codeGen->GetInterruptible();
}
void SetInterruptible(bool value)
{
codeGen->SetInterruptible(value);
}
#if DOUBLE_ALIGN
const bool genDoubleAlign()
{
return codeGen->doDoubleAlign();
}
DWORD getCanDoubleAlign();
bool shouldDoubleAlign(unsigned refCntStk,
unsigned refCntReg,
weight_t refCntWtdReg,
unsigned refCntStkParam,
weight_t refCntWtdStkDbl);
#endif // DOUBLE_ALIGN
bool IsFullPtrRegMapRequired()
{
return codeGen->IsFullPtrRegMapRequired();
}
void SetFullPtrRegMapRequired(bool value)
{
codeGen->SetFullPtrRegMapRequired(value);
}
// Things that MAY belong either in CodeGen or CodeGenContext
#if defined(FEATURE_EH_FUNCLETS)
FuncInfoDsc* compFuncInfos;
unsigned short compCurrFuncIdx;
unsigned short compFuncInfoCount;
unsigned short compFuncCount()
{
assert(fgFuncletsCreated);
return compFuncInfoCount;
}
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
void genUpdateCurrentFunclet(BasicBlock* block)
{
return;
}
FuncInfoDsc compFuncInfoRoot;
static const unsigned compCurrFuncIdx = 0;
unsigned short compFuncCount()
{
return 1;
}
#endif // !FEATURE_EH_FUNCLETS
FuncInfoDsc* funCurrentFunc();
void funSetCurrentFunc(unsigned funcIdx);
FuncInfoDsc* funGetFunc(unsigned funcIdx);
unsigned int funGetFuncIdx(BasicBlock* block);
// LIVENESS
VARSET_TP compCurLife; // current live variables
GenTree* compCurLifeTree; // node after which compCurLife has been computed
// Compare the given "newLife" with last set of live variables and update
// codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness.
template <bool ForCodeGen>
void compChangeLife(VARSET_VALARG_TP newLife);
// Update the GC's masks, register's masks and reports change on variable's homes given a set of
// current live variables if changes have happened since "compCurLife".
template <bool ForCodeGen>
inline void compUpdateLife(VARSET_VALARG_TP newLife);
// Gets a register mask that represent the kill set for a helper call since
// not all JIT Helper calls follow the standard ABI on the target architecture.
regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper);
#ifdef TARGET_ARM
// Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at
// "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the
// struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" --
// i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and
// a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask.
void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask);
#endif // TARGET_ARM
// If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR
// node, else NULL.
static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree);
// This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which
// have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this
// table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise,
// the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field
// vars of the promoted struct local that go dead at the given node (the set bits are the bits
// for the tracked var indices of the field vars, as in a live var set).
//
// The map is allocated on demand so all map operations should use one of the following three
// wrapper methods.
NodeToVarsetPtrMap* m_promotedStructDeathVars;
NodeToVarsetPtrMap* GetPromotedStructDeathVars()
{
if (m_promotedStructDeathVars == nullptr)
{
m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator());
}
return m_promotedStructDeathVars;
}
void ClearPromotedStructDeathVars()
{
if (m_promotedStructDeathVars != nullptr)
{
m_promotedStructDeathVars->RemoveAll();
}
}
bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits)
{
*bits = nullptr;
bool result = false;
if (m_promotedStructDeathVars != nullptr)
{
result = m_promotedStructDeathVars->Lookup(tree, bits);
}
return result;
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX UnwindInfo XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#if !defined(__GNUC__)
#pragma region Unwind information
#endif
public:
//
// Infrastructure functions: start/stop/reserve/emit.
//
void unwindBegProlog();
void unwindEndProlog();
void unwindBegEpilog();
void unwindEndEpilog();
void unwindReserve();
void unwindEmit(void* pHotCode, void* pColdCode);
//
// Specific unwind information functions: called by code generation to indicate a particular
// prolog or epilog unwindable instruction has been generated.
//
void unwindPush(regNumber reg);
void unwindAllocStack(unsigned size);
void unwindSetFrameReg(regNumber reg, unsigned offset);
void unwindSaveReg(regNumber reg, unsigned offset);
#if defined(TARGET_ARM)
void unwindPushMaskInt(regMaskTP mask);
void unwindPushMaskFloat(regMaskTP mask);
void unwindPopMaskInt(regMaskTP mask);
void unwindPopMaskFloat(regMaskTP mask);
void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr")
void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only
// called via unwindPadding().
void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
// instruction and the current location.
#endif // TARGET_ARM
#if defined(TARGET_ARM64)
void unwindNop();
void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
// instruction and the current location.
void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset]
void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]!
void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]
void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]!
void unwindSaveNext(); // unwind code: save_next
void unwindReturn(regNumber reg); // ret lr
#endif // defined(TARGET_ARM64)
//
// Private "helper" functions for the unwind implementation.
//
private:
#if defined(FEATURE_EH_FUNCLETS)
void unwindGetFuncLocations(FuncInfoDsc* func,
bool getHotSectionData,
/* OUT */ emitLocation** ppStartLoc,
/* OUT */ emitLocation** ppEndLoc);
#endif // FEATURE_EH_FUNCLETS
void unwindReserveFunc(FuncInfoDsc* func);
void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS))
void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode);
void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode);
#endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS)
UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func);
#if defined(TARGET_AMD64)
void unwindBegPrologWindows();
void unwindPushWindows(regNumber reg);
void unwindAllocStackWindows(unsigned size);
void unwindSetFrameRegWindows(regNumber reg, unsigned offset);
void unwindSaveRegWindows(regNumber reg, unsigned offset);
#ifdef UNIX_AMD64_ABI
void unwindSaveRegCFI(regNumber reg, unsigned offset);
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM)
void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16);
void unwindPushPopMaskFloat(regMaskTP mask);
#endif // TARGET_ARM
#if defined(FEATURE_CFI_SUPPORT)
short mapRegNumToDwarfReg(regNumber reg);
void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0);
void unwindPushPopCFI(regNumber reg);
void unwindBegPrologCFI();
void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat);
void unwindAllocStackCFI(unsigned size);
void unwindSetFrameRegCFI(regNumber reg, unsigned offset);
void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#ifdef DEBUG
void DumpCfiInfo(bool isHotCode,
UNATIVE_OFFSET startOffset,
UNATIVE_OFFSET endOffset,
DWORD cfiCodeBytes,
const CFI_CODE* const pCfiCode);
#endif
#endif // FEATURE_CFI_SUPPORT
#if !defined(__GNUC__)
#pragma endregion // Note: region is NOT under !defined(__GNUC__)
#endif
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX SIMD XX
XX XX
XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX
XX that contains the distinguished, well-known SIMD type definitions). XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
bool IsBaselineSimdIsaSupported()
{
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2;
#elif defined(TARGET_ARM64)
CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return compOpportunisticallyDependsOn(minimumIsa);
#else
return false;
#endif
}
#if defined(DEBUG)
bool IsBaselineSimdIsaSupportedDebugOnly()
{
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2;
#elif defined(TARGET_ARM64)
CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return compIsaSupportedDebugOnly(minimumIsa);
#else
return false;
#endif // FEATURE_SIMD
}
#endif // DEBUG
// Get highest available level for SIMD codegen
SIMDLevel getSIMDSupportLevel()
{
#if defined(TARGET_XARCH)
if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
return SIMD_AVX2_Supported;
}
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
return SIMD_SSE4_Supported;
}
// min bar is SSE2
return SIMD_SSE2_Supported;
#else
assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch");
unreached();
return SIMD_Not_Supported;
#endif
}
bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd)
{
return info.compCompHnd->isIntrinsicType(clsHnd);
}
const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName)
{
return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName);
}
CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index)
{
return info.compCompHnd->getTypeInstantiationArgument(cls, index);
}
#ifdef FEATURE_SIMD
// Should we support SIMD intrinsics?
bool featureSIMD;
// Should we recognize SIMD types?
// We always do this on ARM64 to support HVA types.
bool supportSIMDTypes()
{
#ifdef TARGET_ARM64
return true;
#else
return featureSIMD;
#endif
}
// Have we identified any SIMD types?
// This is currently used by struct promotion to avoid getting type information for a struct
// field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in
// the method.
bool _usesSIMDTypes;
bool usesSIMDTypes()
{
return _usesSIMDTypes;
}
void setUsesSIMDTypes(bool value)
{
_usesSIMDTypes = value;
}
// This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics
// that require indexed access to the individual fields of the vector, which is not well supported
// by the hardware. It is allocated when/if such situations are encountered during Lowering.
unsigned lvaSIMDInitTempVarNum;
struct SIMDHandlesCache
{
// SIMD Types
CORINFO_CLASS_HANDLE SIMDFloatHandle;
CORINFO_CLASS_HANDLE SIMDDoubleHandle;
CORINFO_CLASS_HANDLE SIMDIntHandle;
CORINFO_CLASS_HANDLE SIMDUShortHandle;
CORINFO_CLASS_HANDLE SIMDUByteHandle;
CORINFO_CLASS_HANDLE SIMDShortHandle;
CORINFO_CLASS_HANDLE SIMDByteHandle;
CORINFO_CLASS_HANDLE SIMDLongHandle;
CORINFO_CLASS_HANDLE SIMDUIntHandle;
CORINFO_CLASS_HANDLE SIMDULongHandle;
CORINFO_CLASS_HANDLE SIMDNIntHandle;
CORINFO_CLASS_HANDLE SIMDNUIntHandle;
CORINFO_CLASS_HANDLE SIMDVector2Handle;
CORINFO_CLASS_HANDLE SIMDVector3Handle;
CORINFO_CLASS_HANDLE SIMDVector4Handle;
CORINFO_CLASS_HANDLE SIMDVectorHandle;
#ifdef FEATURE_HW_INTRINSICS
#if defined(TARGET_ARM64)
CORINFO_CLASS_HANDLE Vector64FloatHandle;
CORINFO_CLASS_HANDLE Vector64DoubleHandle;
CORINFO_CLASS_HANDLE Vector64IntHandle;
CORINFO_CLASS_HANDLE Vector64UShortHandle;
CORINFO_CLASS_HANDLE Vector64UByteHandle;
CORINFO_CLASS_HANDLE Vector64ShortHandle;
CORINFO_CLASS_HANDLE Vector64ByteHandle;
CORINFO_CLASS_HANDLE Vector64LongHandle;
CORINFO_CLASS_HANDLE Vector64UIntHandle;
CORINFO_CLASS_HANDLE Vector64ULongHandle;
CORINFO_CLASS_HANDLE Vector64NIntHandle;
CORINFO_CLASS_HANDLE Vector64NUIntHandle;
#endif // defined(TARGET_ARM64)
CORINFO_CLASS_HANDLE Vector128FloatHandle;
CORINFO_CLASS_HANDLE Vector128DoubleHandle;
CORINFO_CLASS_HANDLE Vector128IntHandle;
CORINFO_CLASS_HANDLE Vector128UShortHandle;
CORINFO_CLASS_HANDLE Vector128UByteHandle;
CORINFO_CLASS_HANDLE Vector128ShortHandle;
CORINFO_CLASS_HANDLE Vector128ByteHandle;
CORINFO_CLASS_HANDLE Vector128LongHandle;
CORINFO_CLASS_HANDLE Vector128UIntHandle;
CORINFO_CLASS_HANDLE Vector128ULongHandle;
CORINFO_CLASS_HANDLE Vector128NIntHandle;
CORINFO_CLASS_HANDLE Vector128NUIntHandle;
#if defined(TARGET_XARCH)
CORINFO_CLASS_HANDLE Vector256FloatHandle;
CORINFO_CLASS_HANDLE Vector256DoubleHandle;
CORINFO_CLASS_HANDLE Vector256IntHandle;
CORINFO_CLASS_HANDLE Vector256UShortHandle;
CORINFO_CLASS_HANDLE Vector256UByteHandle;
CORINFO_CLASS_HANDLE Vector256ShortHandle;
CORINFO_CLASS_HANDLE Vector256ByteHandle;
CORINFO_CLASS_HANDLE Vector256LongHandle;
CORINFO_CLASS_HANDLE Vector256UIntHandle;
CORINFO_CLASS_HANDLE Vector256ULongHandle;
CORINFO_CLASS_HANDLE Vector256NIntHandle;
CORINFO_CLASS_HANDLE Vector256NUIntHandle;
#endif // defined(TARGET_XARCH)
#endif // FEATURE_HW_INTRINSICS
SIMDHandlesCache()
{
memset(this, 0, sizeof(*this));
}
};
SIMDHandlesCache* m_simdHandleCache;
// Get an appropriate "zero" for the given type and class handle.
GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle);
// Get the handle for a SIMD type.
CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType)
{
if (m_simdHandleCache == nullptr)
{
// This may happen if the JIT generates SIMD node on its own, without importing them.
// Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache.
return NO_CLASS_HANDLE;
}
if (simdBaseJitType == CORINFO_TYPE_FLOAT)
{
switch (simdType)
{
case TYP_SIMD8:
return m_simdHandleCache->SIMDVector2Handle;
case TYP_SIMD12:
return m_simdHandleCache->SIMDVector3Handle;
case TYP_SIMD16:
if ((getSIMDVectorType() == TYP_SIMD32) ||
(m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE))
{
return m_simdHandleCache->SIMDVector4Handle;
}
break;
case TYP_SIMD32:
break;
default:
unreached();
}
}
assert(emitTypeSize(simdType) <= largestEnregisterableStructSize());
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->SIMDFloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->SIMDDoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->SIMDIntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->SIMDUShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->SIMDUByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->SIMDShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->SIMDByteHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->SIMDLongHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->SIMDUIntHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->SIMDULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->SIMDNIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->SIMDNUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
return NO_CLASS_HANDLE;
}
// Returns true if this is a SIMD type that should be considered an opaque
// vector type (i.e. do not analyze or promote its fields).
// Note that all but the fixed vector types are opaque, even though they may
// actually be declared as having fields.
bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const
{
return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) &&
(structHandle != m_simdHandleCache->SIMDVector3Handle) &&
(structHandle != m_simdHandleCache->SIMDVector4Handle));
}
// Returns true if the tree corresponds to a TYP_SIMD lcl var.
// Note that both SIMD vector args and locals are mared as lvSIMDType = true, but
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT.
bool isSIMDTypeLocal(GenTree* tree)
{
return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType;
}
// Returns true if the lclVar is an opaque SIMD type.
bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const
{
if (!varDsc->lvSIMDType)
{
return false;
}
return isOpaqueSIMDType(varDsc->GetStructHnd());
}
static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId)
{
return (intrinsicId == SIMDIntrinsicEqual);
}
// Returns base JIT type of a TYP_SIMD local.
// Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD.
CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree)
{
if (isSIMDTypeLocal(tree))
{
return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType();
}
return CORINFO_TYPE_UNDEF;
}
bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
if (isIntrinsicType(clsHnd))
{
const char* namespaceName = nullptr;
(void)getClassNameFromMetadata(clsHnd, &namespaceName);
return strcmp(namespaceName, "System.Numerics") == 0;
}
return false;
}
bool isSIMDClass(typeInfo* pTypeInfo)
{
return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass());
}
bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
#ifdef FEATURE_HW_INTRINSICS
if (isIntrinsicType(clsHnd))
{
const char* namespaceName = nullptr;
(void)getClassNameFromMetadata(clsHnd, &namespaceName);
return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0;
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
bool isHWSIMDClass(typeInfo* pTypeInfo)
{
#ifdef FEATURE_HW_INTRINSICS
return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass());
#else
return false;
#endif
}
bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd);
}
bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo)
{
return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo);
}
// Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF
// if it is not a SIMD type or is an unsupported base JIT type.
CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr);
CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd)
{
return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr);
}
// Get SIMD Intrinsic info given the method handle.
// Also sets typeHnd, argCount, baseType and sizeBytes out params.
const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd,
CORINFO_METHOD_HANDLE methodHnd,
CORINFO_SIG_INFO* sig,
bool isNewObj,
unsigned* argCount,
CorInfoType* simdBaseJitType,
unsigned* sizeBytes);
// Pops and returns GenTree node from importers type stack.
// Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes.
GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr);
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain given relop result.
SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
CorInfoType* inOutBaseJitType,
GenTree** op1,
GenTree** op2);
#if defined(TARGET_XARCH)
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain == comparison result.
SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
GenTree** op1,
GenTree** op2);
#endif // defined(TARGET_XARCH)
void setLclRelatedToSIMDIntrinsic(GenTree* tree);
bool areFieldsContiguous(GenTree* op1, GenTree* op2);
bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second);
bool areArrayElementsContiguous(GenTree* op1, GenTree* op2);
bool areArgumentsContiguous(GenTree* op1, GenTree* op2);
GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize);
// check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT.
GenTree* impSIMDIntrinsic(OPCODE opcode,
GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef);
GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd);
// Whether SIMD vector occupies part of SIMD register.
// SSE2: vector2f/3f are considered sub register SIMD types.
// AVX: vector2f, 3f and 4f are all considered sub register SIMD types.
bool isSubRegisterSIMDType(GenTreeSIMD* simdNode)
{
unsigned vectorRegisterByteLength;
#if defined(TARGET_XARCH)
// Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded
// with the AOT compiler, so that it cannot change from aot compilation time to runtime
// This api does not require such fixing as it merely pertains to the size of the simd type
// relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here
// does not preclude the code from being used on a machine with a larger vector length.)
if (getSIMDSupportLevel() < SIMD_AVX2_Supported)
{
vectorRegisterByteLength = 16;
}
else
{
vectorRegisterByteLength = 32;
}
#else
vectorRegisterByteLength = getSIMDVectorRegisterByteLength();
#endif
return (simdNode->GetSimdSize() < vectorRegisterByteLength);
}
// Get the type for the hardware SIMD vector.
// This is the maximum SIMD type supported for this target.
var_types getSIMDVectorType()
{
#if defined(TARGET_XARCH)
if (getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
return TYP_SIMD32;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return TYP_SIMD16;
}
#elif defined(TARGET_ARM64)
return TYP_SIMD16;
#else
assert(!"getSIMDVectorType() unimplemented on target arch");
unreached();
#endif
}
// Get the size of the SIMD type in bytes
int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd)
{
unsigned sizeBytes = 0;
(void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
return sizeBytes;
}
// Get the the number of elements of baseType of SIMD vector given by its size and baseType
static int getSIMDVectorLength(unsigned simdSize, var_types baseType);
// Get the the number of elements of baseType of SIMD vector given by its type handle
int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd);
// Get preferred alignment of SIMD type.
int getSIMDTypeAlignment(var_types simdType);
// Get the number of bytes in a System.Numeric.Vector<T> for the current compilation.
// Note - cannot be used for System.Runtime.Intrinsic
unsigned getSIMDVectorRegisterByteLength()
{
#if defined(TARGET_XARCH)
if (getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
return YMM_REGSIZE_BYTES;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return XMM_REGSIZE_BYTES;
}
#elif defined(TARGET_ARM64)
return FP_REGSIZE_BYTES;
#else
assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch");
unreached();
#endif
}
// The minimum and maximum possible number of bytes in a SIMD vector.
// maxSIMDStructBytes
// The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic
// SSE: 16-byte Vector<T> and Vector128<T>
// AVX: 32-byte Vector256<T> (Vector<T> is 16-byte)
// AVX2: 32-byte Vector<T> and Vector256<T>
unsigned int maxSIMDStructBytes()
{
#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
if (compOpportunisticallyDependsOn(InstructionSet_AVX))
{
return YMM_REGSIZE_BYTES;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return XMM_REGSIZE_BYTES;
}
#else
return getSIMDVectorRegisterByteLength();
#endif
}
unsigned int minSIMDStructBytes()
{
return emitTypeSize(TYP_SIMD8);
}
public:
// Returns the codegen type for a given SIMD size.
static var_types getSIMDTypeForSize(unsigned size)
{
var_types simdType = TYP_UNDEF;
if (size == 8)
{
simdType = TYP_SIMD8;
}
else if (size == 12)
{
simdType = TYP_SIMD12;
}
else if (size == 16)
{
simdType = TYP_SIMD16;
}
else if (size == 32)
{
simdType = TYP_SIMD32;
}
else
{
noway_assert(!"Unexpected size for SIMD type");
}
return simdType;
}
private:
unsigned getSIMDInitTempVarNum(var_types simdType);
#else // !FEATURE_SIMD
bool isOpaqueSIMDLclVar(LclVarDsc* varDsc)
{
return false;
}
#endif // FEATURE_SIMD
public:
//------------------------------------------------------------------------
// largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered.
//
// Notes: It is not guaranteed that the struct of this size or smaller WILL be a
// candidate for enregistration.
unsigned largestEnregisterableStructSize()
{
#ifdef FEATURE_SIMD
#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
if (opts.IsReadyToRun())
{
// Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs
// checks that are effected by the current level of instruction set support would
// otherwise cause the highest level of instruction set support to be reported to crossgen2.
// and this api is only ever used as an optimization or assert, so no reporting should
// ever happen.
return YMM_REGSIZE_BYTES;
}
#endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
unsigned vectorRegSize = maxSIMDStructBytes();
assert(vectorRegSize >= TARGET_POINTER_SIZE);
return vectorRegSize;
#else // !FEATURE_SIMD
return TARGET_POINTER_SIZE;
#endif // !FEATURE_SIMD
}
// Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many
// structs will fit the criteria.
bool structSizeMightRepresentSIMDType(size_t structSize)
{
#ifdef FEATURE_SIMD
// Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT
// about the size of a struct under the assumption that the struct size needs to be recorded.
// By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is
// enregistered or not will not be messaged to the R2R compiler.
return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize());
#else
return false;
#endif // FEATURE_SIMD
}
#ifdef FEATURE_SIMD
static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId);
#endif // !FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID);
#endif // FEATURE_HW_INTRINSICS
private:
// These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType()
// is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use
// of this routines also avoids the need of #ifdef FEATURE_SIMD specific code.
// Is this var is of type simd struct?
bool lclVarIsSIMDType(unsigned varNum)
{
return lvaGetDesc(varNum)->lvIsSIMDType();
}
// Is this Local node a SIMD local?
bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree)
{
return lclVarIsSIMDType(lclVarTree->GetLclNum());
}
// Returns true if the TYP_SIMD locals on stack are aligned at their
// preferred byte boundary specified by getSIMDTypeAlignment().
//
// As per the Intel manual, the preferred alignment for AVX vectors is
// 32-bytes. It is not clear whether additional stack space used in
// aligning stack is worth the benefit and for now will use 16-byte
// alignment for AVX 256-bit vectors with unaligned load/stores to/from
// memory. On x86, the stack frame is aligned to 4 bytes. We need to extend
// existing support for double (8-byte) alignment to 16 or 32 byte
// alignment for frames with local SIMD vars, if that is determined to be
// profitable.
//
// On Amd64 and SysV, RSP+8 is aligned on entry to the function (before
// prolog has run). This means that in RBP-based frames RBP will be 16-byte
// aligned. For RSP-based frames these are only sometimes aligned, depending
// on the frame size.
//
bool isSIMDTypeLocalAligned(unsigned varNum)
{
#if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES
if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF)
{
// TODO-Cleanup: Can't this use the lvExactSize on the varDsc?
int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType);
if (alignment <= STACK_ALIGN)
{
bool rbpBased;
int off = lvaFrameAddress(varNum, &rbpBased);
// On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the
// first instruction of a function. If our frame is RBP based
// then RBP will always be 16 bytes aligned, so we can simply
// check the offset.
if (rbpBased)
{
return (off % alignment) == 0;
}
// For RSP-based frame the alignment of RSP depends on our
// locals. rsp+8 is aligned on entry and we just subtract frame
// size so it is not hard to compute. Note that the compiler
// tries hard to make sure the frame size means RSP will be
// 16-byte aligned, but for leaf functions without locals (i.e.
// frameSize = 0) it will not be.
int frameSize = codeGen->genTotalFrameSize();
return ((8 - frameSize + off) % alignment) == 0;
}
}
#endif // FEATURE_SIMD
return false;
}
#ifdef DEBUG
// Answer the question: Is a particular ISA supported?
// Use this api when asking the question so that future
// ISA questions can be asked correctly or when asserting
// support/nonsupport for an instruction set
bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return (opts.compSupportsISA & (1ULL << isa)) != 0;
#else
return false;
#endif
}
#endif // DEBUG
bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const;
// Answer the question: Is a particular ISA allowed to be used implicitly by optimizations?
// The result of this api call will exactly match the target machine
// on which the function is executed (except for CoreLib, where there are special rules)
bool compExactlyDependsOn(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
uint64_t isaBit = (1ULL << isa);
if ((opts.compSupportsISAReported & isaBit) == 0)
{
if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0))
((Compiler*)this)->opts.compSupportsISAExactly |= isaBit;
((Compiler*)this)->opts.compSupportsISAReported |= isaBit;
}
return (opts.compSupportsISAExactly & isaBit) != 0;
#else
return false;
#endif
}
// Ensure that code will not execute if an instruction set is usable. Call only
// if the instruction set has previously reported as unusable, but when
// that that status has not yet been recorded to the AOT compiler
void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa)
{
// use compExactlyDependsOn to capture are record the use of the isa
bool isaUsable = compExactlyDependsOn(isa);
// Assert that the is unusable. If true, this function should never be called.
assert(!isaUsable);
}
// Answer the question: Is a particular ISA allowed to be used implicitly by optimizations?
// The result of this api call will match the target machine if the result is true
// If the result is false, then the target machine may have support for the instruction
bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const
{
if ((opts.compSupportsISA & (1ULL << isa)) != 0)
{
return compExactlyDependsOn(isa);
}
else
{
return false;
}
}
// Answer the question: Is a particular ISA supported for explicit hardware intrinsics?
bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const
{
// Report intent to use the ISA to the EE
compExactlyDependsOn(isa);
return ((opts.compSupportsISA & (1ULL << isa)) != 0);
}
bool canUseVexEncoding() const
{
#ifdef TARGET_XARCH
return compOpportunisticallyDependsOn(InstructionSet_AVX);
#else
return false;
#endif
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Compiler XX
XX XX
XX Generic info about the compilation and the method being compiled. XX
XX It is responsible for driving the other phases. XX
XX It is also responsible for all the memory management. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
Compiler* InlineeCompiler; // The Compiler instance for the inlinee
InlineResult* compInlineResult; // The result of importing the inlinee method.
bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE
bool compJmpOpUsed; // Does the method do a JMP
bool compLongUsed; // Does the method use TYP_LONG
bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
bool compTailCallUsed; // Does the method do a tailcall
bool compTailPrefixSeen; // Does the method IL have tail. prefix
bool compLocallocSeen; // Does the method IL have localloc opcode
bool compLocallocUsed; // Does the method use localloc.
bool compLocallocOptimized; // Does the method have an optimized localloc
bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node.
bool compUnsafeCastUsed; // Does the method use LDIND/STIND to cast between scalar/refernce types
bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump?
bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler?
bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts
bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts
bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set
// NOTE: These values are only reliable after
// the importing is completely finished.
#ifdef DEBUG
// State information - which phases have completed?
// These are kept together for easy discoverability
bool bRangeAllowStress;
bool compCodeGenDone;
int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks
bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done?
size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`.
size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder`
#endif // DEBUG
bool fgLocalVarLivenessDone; // Note that this one is used outside of debug.
bool fgLocalVarLivenessChanged;
bool compLSRADone;
bool compRationalIRForm;
bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method.
bool compGeneratingProlog;
bool compGeneratingEpilog;
bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack.
// Insert cookie on frame and code to check the cookie, like VC++ -GS.
bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local
// copies of susceptible parameters to avoid buffer overrun attacks through locals/params
bool getNeedsGSSecurityCookie() const
{
return compNeedsGSSecurityCookie;
}
void setNeedsGSSecurityCookie()
{
compNeedsGSSecurityCookie = true;
}
FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During
// frame layout calculations, this is the level we are currently
// computing.
//---------------------------- JITing options -----------------------------
enum codeOptimize
{
BLENDED_CODE,
SMALL_CODE,
FAST_CODE,
COUNT_OPT_CODE
};
struct Options
{
JitFlags* jitFlags; // all flags passed from the EE
// The instruction sets that the compiler is allowed to emit.
uint64_t compSupportsISA;
// The instruction sets that were reported to the VM as being used by the current method. Subset of
// compSupportsISA.
uint64_t compSupportsISAReported;
// The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations.
// Subset of compSupportsISA.
// The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only
// used via explicit hardware intrinsics.
uint64_t compSupportsISAExactly;
void setSupportedISAs(CORINFO_InstructionSetFlags isas)
{
compSupportsISA = isas.GetFlagsRaw();
}
unsigned compFlags; // method attributes
unsigned instrCount;
unsigned lvRefCount;
codeOptimize compCodeOpt; // what type of code optimizations
bool compUseCMOV;
// optimize maximally and/or favor speed over size?
#define DEFAULT_MIN_OPTS_CODE_SIZE 60000
#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000
#define DEFAULT_MIN_OPTS_BB_COUNT 2000
#define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000
#define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000
// Maximun number of locals before turning off the inlining
#define MAX_LV_NUM_COUNT_FOR_INLINING 512
bool compMinOpts;
bool compMinOptsIsSet;
#ifdef DEBUG
mutable bool compMinOptsIsUsed;
bool MinOpts() const
{
assert(compMinOptsIsSet);
compMinOptsIsUsed = true;
return compMinOpts;
}
bool IsMinOptsSet() const
{
return compMinOptsIsSet;
}
#else // !DEBUG
bool MinOpts() const
{
return compMinOpts;
}
bool IsMinOptsSet() const
{
return compMinOptsIsSet;
}
#endif // !DEBUG
bool OptimizationDisabled() const
{
return MinOpts() || compDbgCode;
}
bool OptimizationEnabled() const
{
return !OptimizationDisabled();
}
void SetMinOpts(bool val)
{
assert(!compMinOptsIsUsed);
assert(!compMinOptsIsSet || (compMinOpts == val));
compMinOpts = val;
compMinOptsIsSet = true;
}
// true if the CLFLG_* for an optimization is set.
bool OptEnabled(unsigned optFlag) const
{
return !!(compFlags & optFlag);
}
#ifdef FEATURE_READYTORUN
bool IsReadyToRun() const
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN);
}
#else
bool IsReadyToRun() const
{
return false;
}
#endif
// Check if the compilation is control-flow guard enabled.
bool IsCFGEnabled() const
{
#if defined(TARGET_ARM64) || defined(TARGET_AMD64)
// On these platforms we assume the register that the target is
// passed in is preserved by the validator and take care to get the
// target from the register for the call (even in debug mode).
static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0);
if (JitConfig.JitForceControlFlowGuard())
return true;
return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG);
#else
// The remaining platforms are not supported and would require some
// work to support.
//
// ARM32:
// The ARM32 validator does not preserve any volatile registers
// which means we have to take special care to allocate and use a
// callee-saved register (reloading the target from memory is a
// security issue).
//
// x86:
// On x86 some VSD calls disassemble the call site and expect an
// indirect call which is fundamentally incompatible with CFG.
// This would require a different way to pass this information
// through.
//
return false;
#endif
}
#ifdef FEATURE_ON_STACK_REPLACEMENT
bool IsOSR() const
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR);
}
#else
bool IsOSR() const
{
return false;
}
#endif
// true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating
// PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as
// the current logic for frame setup initializes and pushes
// the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot
// safely be pushed/popped while the thread is in a preemptive state.).
bool ShouldUsePInvokeHelpers()
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) ||
jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE);
}
// true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method
// prolog/epilog
bool IsReversePInvoke()
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE);
}
bool compScopeInfo; // Generate the LocalVar info ?
bool compDbgCode; // Generate debugger-friendly code?
bool compDbgInfo; // Gather debugging info?
bool compDbgEnC;
#ifdef PROFILING_SUPPORTED
bool compNoPInvokeInlineCB;
#else
static const bool compNoPInvokeInlineCB;
#endif
#ifdef DEBUG
bool compGcChecks; // Check arguments and return values to ensure they are sane
#endif
#if defined(DEBUG) && defined(TARGET_XARCH)
bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct.
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#if defined(DEBUG) && defined(TARGET_X86)
bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86.
#endif // defined(DEBUG) && defined(TARGET_X86)
bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen
#ifdef DEBUG
#if defined(TARGET_XARCH)
bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible
#endif
#endif // DEBUG
#ifdef UNIX_AMD64_ABI
// This flag is indicating if there is a need to align the frame.
// On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for
// FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called.
// On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of
// 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that
// there are calls and making sure the frame alignment logic is executed.
bool compNeedToAlignFrame;
#endif // UNIX_AMD64_ABI
bool compProcedureSplitting; // Separate cold code from hot code
bool genFPorder; // Preserve FP order (operations are non-commutative)
bool genFPopt; // Can we do frame-pointer-omission optimization?
bool altJit; // True if we are an altjit and are compiling this method
#ifdef OPT_CONFIG
bool optRepeat; // Repeat optimizer phases k times
#endif
#ifdef DEBUG
bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH
bool dspCode; // Display native code generated
bool dspEHTable; // Display the EH table reported to the VM
bool dspDebugInfo; // Display the Debug info reported to the VM
bool dspInstrs; // Display the IL instructions intermixed with the native code output
bool dspLines; // Display source-code lines intermixed with native code output
bool dmpHex; // Display raw bytes in hex of native code output
bool varNames; // Display variables names in native code output
bool disAsm; // Display native code as it is generated
bool disAsmSpilled; // Display native code when any register spilling occurs
bool disasmWithGC; // Display GC info interleaved with disassembly.
bool disDiffable; // Makes the Disassembly code 'diff-able'
bool disAddr; // Display process address next to each instruction in disassembly code
bool disAlignment; // Display alignment boundaries in disassembly code
bool disAsm2; // Display native code after it is generated using external disassembler
bool dspOrder; // Display names of each of the methods that we ngen/jit
bool dspUnwind; // Display the unwind info output
bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable)
bool compLongAddress; // Force using large pseudo instructions for long address
// (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC)
bool dspGCtbls; // Display the GC tables
#endif
bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method
// Default numbers used to perform loop alignment. All the numbers are choosen
// based on experimenting with various benchmarks.
// Default minimum loop block weight required to enable loop alignment.
#define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4
// By default a loop will be aligned at 32B address boundary to get better
// performance as per architecture manuals.
#define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20
// For non-adaptive loop alignment, by default, only align a loop whose size is
// at most 3 times the alignment block size. If the loop is bigger than that, it is most
// likely complicated enough that loop alignment will not impact performance.
#define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3
#ifdef DEBUG
// Loop alignment variables
// If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary.
bool compJitAlignLoopForJcc;
#endif
// For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done.
unsigned short compJitAlignLoopMaxCodeSize;
// Minimum weight needed for the first block of a loop to make it a candidate for alignment.
unsigned short compJitAlignLoopMinBlockWeight;
// For non-adaptive alignment, address boundary (power of 2) at which loop alignment should
// be done. By default, 32B.
unsigned short compJitAlignLoopBoundary;
// Padding limit to align a loop.
unsigned short compJitAlignPaddingLimit;
// If set, perform adaptive loop alignment that limits number of padding based on loop size.
bool compJitAlignLoopAdaptive;
// If set, tries to hide alignment instructions behind unconditional jumps.
bool compJitHideAlignBehindJmp;
#ifdef LATE_DISASM
bool doLateDisasm; // Run the late disassembler
#endif // LATE_DISASM
#if DUMP_GC_TABLES && !defined(DEBUG)
#pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!")
static const bool dspGCtbls = true;
#endif
#ifdef PROFILING_SUPPORTED
// Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()).
// This option helps make the JIT behave as if it is running under a profiler.
bool compJitELTHookEnabled;
#endif // PROFILING_SUPPORTED
#if FEATURE_TAILCALL_OPT
// Whether opportunistic or implicit tail call optimization is enabled.
bool compTailCallOpt;
// Whether optimization of transforming a recursive tail call into a loop is enabled.
bool compTailCallLoopOpt;
#endif
#if FEATURE_FASTTAILCALL
// Whether fast tail calls are allowed.
bool compFastTailCalls;
#endif // FEATURE_FASTTAILCALL
#if defined(TARGET_ARM64)
// Decision about whether to save FP/LR registers with callee-saved registers (see
// COMPlus_JitSaveFpLrWithCalleSavedRegisters).
int compJitSaveFpLrWithCalleeSavedRegisters;
#endif // defined(TARGET_ARM64)
#ifdef CONFIGURABLE_ARM_ABI
bool compUseSoftFP = false;
#else
#ifdef ARM_SOFTFP
static const bool compUseSoftFP = true;
#else // !ARM_SOFTFP
static const bool compUseSoftFP = false;
#endif // ARM_SOFTFP
#endif // CONFIGURABLE_ARM_ABI
} opts;
static bool s_pAltJitExcludeAssembliesListInitialized;
static AssemblyNamesList2* s_pAltJitExcludeAssembliesList;
#ifdef DEBUG
static bool s_pJitDisasmIncludeAssembliesListInitialized;
static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList;
static bool s_pJitFunctionFileInitialized;
static MethodSet* s_pJitMethodSet;
#endif // DEBUG
#ifdef DEBUG
// silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and
// it is safe in this case
#pragma warning(push)
#pragma warning(disable : 4312)
template <typename T>
T dspPtr(T p)
{
return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p);
}
template <typename T>
T dspOffset(T o)
{
return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o);
}
#pragma warning(pop)
static int dspTreeID(GenTree* tree)
{
return tree->gtTreeID;
}
static void printStmtID(Statement* stmt)
{
assert(stmt != nullptr);
printf(FMT_STMT, stmt->GetID());
}
static void printTreeID(GenTree* tree)
{
if (tree == nullptr)
{
printf("[------]");
}
else
{
printf("[%06d]", dspTreeID(tree));
}
}
const char* pgoSourceToString(ICorJitInfo::PgoSource p);
const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail);
#endif // DEBUG
// clang-format off
#define STRESS_MODES \
\
STRESS_MODE(NONE) \
\
/* "Variations" stress areas which we try to mix up with each other. */ \
/* These should not be exhaustively used as they might */ \
/* hide/trivialize other areas */ \
\
STRESS_MODE(REGS) \
STRESS_MODE(DBL_ALN) \
STRESS_MODE(LCL_FLDS) \
STRESS_MODE(UNROLL_LOOPS) \
STRESS_MODE(MAKE_CSE) \
STRESS_MODE(LEGACY_INLINE) \
STRESS_MODE(CLONE_EXPR) \
STRESS_MODE(USE_CMOV) \
STRESS_MODE(FOLD) \
STRESS_MODE(MERGED_RETURNS) \
STRESS_MODE(BB_PROFILE) \
STRESS_MODE(OPT_BOOLS_GC) \
STRESS_MODE(REMORPH_TREES) \
STRESS_MODE(64RSLT_MUL) \
STRESS_MODE(DO_WHILE_LOOPS) \
STRESS_MODE(MIN_OPTS) \
STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \
STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \
STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \
STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \
STRESS_MODE(UNSAFE_BUFFER_CHECKS) \
STRESS_MODE(NULL_OBJECT_CHECK) \
STRESS_MODE(PINVOKE_RESTORE_ESP) \
STRESS_MODE(RANDOM_INLINE) \
STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \
STRESS_MODE(GENERIC_VARN) \
STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \
STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \
STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \
STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \
\
/* After COUNT_VARN, stress level 2 does all of these all the time */ \
\
STRESS_MODE(COUNT_VARN) \
\
/* "Check" stress areas that can be exhaustively used if we */ \
/* dont care about performance at all */ \
\
STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \
STRESS_MODE(CHK_FLOW_UPDATE) \
STRESS_MODE(EMITTER) \
STRESS_MODE(CHK_REIMPORT) \
STRESS_MODE(FLATFP) \
STRESS_MODE(GENERIC_CHECK) \
STRESS_MODE(COUNT)
enum compStressArea
{
#define STRESS_MODE(mode) STRESS_##mode,
STRESS_MODES
#undef STRESS_MODE
};
// clang-format on
#ifdef DEBUG
static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1];
BYTE compActiveStressModes[STRESS_COUNT];
#endif // DEBUG
#define MAX_STRESS_WEIGHT 100
bool compStressCompile(compStressArea stressArea, unsigned weightPercentage);
bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage);
#ifdef DEBUG
bool compInlineStress()
{
return compStressCompile(STRESS_LEGACY_INLINE, 50);
}
bool compRandomInlineStress()
{
return compStressCompile(STRESS_RANDOM_INLINE, 50);
}
bool compPromoteFewerStructs(unsigned lclNum);
#endif // DEBUG
bool compTailCallStress()
{
#ifdef DEBUG
// Do not stress tailcalls in IL stubs as the runtime creates several IL
// stubs to implement the tailcall mechanism, which would then
// recursively create more IL stubs.
return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) &&
(JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5));
#else
return false;
#endif
}
const char* compGetTieringName(bool wantShortName = false) const;
const char* compGetStressMessage() const;
codeOptimize compCodeOpt() const
{
#if 0
// Switching between size & speed has measurable throughput impact
// (3.5% on NGen CoreLib when measured). It used to be enabled for
// DEBUG, but should generate identical code between CHK & RET builds,
// so that's not acceptable.
// TODO-Throughput: Figure out what to do about size vs. speed & throughput.
// Investigate the cause of the throughput regression.
return opts.compCodeOpt;
#else
return BLENDED_CODE;
#endif
}
//--------------------- Info about the procedure --------------------------
struct Info
{
COMP_HANDLE compCompHnd;
CORINFO_MODULE_HANDLE compScopeHnd;
CORINFO_CLASS_HANDLE compClassHnd;
CORINFO_METHOD_HANDLE compMethodHnd;
CORINFO_METHOD_INFO* compMethodInfo;
bool hasCircularClassConstraints;
bool hasCircularMethodConstraints;
#if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS
const char* compMethodName;
const char* compClassName;
const char* compFullName;
double compPerfScore;
int compMethodSuperPMIIndex; // useful when debugging under SuperPMI
#endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS
#if defined(DEBUG) || defined(INLINE_DATA)
// Method hash is logically const, but computed
// on first demand.
mutable unsigned compMethodHashPrivate;
unsigned compMethodHash() const;
#endif // defined(DEBUG) || defined(INLINE_DATA)
#ifdef PSEUDORANDOM_NOP_INSERTION
// things for pseudorandom nop insertion
unsigned compChecksum;
CLRRandom compRNG;
#endif
// The following holds the FLG_xxxx flags for the method we're compiling.
unsigned compFlags;
// The following holds the class attributes for the method we're compiling.
unsigned compClassAttr;
const BYTE* compCode;
IL_OFFSET compILCodeSize; // The IL code size
IL_OFFSET compILImportSize; // Estimated amount of IL actually imported
IL_OFFSET compILEntry; // The IL entry point (normally 0)
PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr)
UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This
// is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if:
// (1) the code is not hot/cold split, and we issued less code than we expected, or
// (2) the code is hot/cold split, and we issued less code than we expected
// in the cold section (the hot section will always be padded out to compTotalHotCodeSize).
bool compIsStatic : 1; // Is the method static (no 'this' pointer)?
bool compIsVarArgs : 1; // Does the method have varargs parameters?
bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options?
bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback
bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic
bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used.
var_types compRetType; // Return type of the method as declared in IL
var_types compRetNativeType; // Normalized return type as per target arch ABI
unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden)
unsigned compArgsCount; // Number of arguments (incl. implicit and hidden)
#if FEATURE_FASTTAILCALL
unsigned compArgStackSize; // Incoming argument stack size in bytes
#endif // FEATURE_FASTTAILCALL
unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present);
int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE)
unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var)
unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden)
unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden)
unsigned compMaxStack;
UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method
UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method
unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition.
CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method.
unsigned compLvFrameListRoot; // lclNum for the Frame root
unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL.
// You should generally use compHndBBtabCount instead: it is the
// current number of EH clauses (after additions like synchronized
// methods and funclets, and removals like unreachable code deletion).
Target::ArgOrder compArgOrder;
bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler
// and the VM expects that, or the JIT is a "self-host" compiler
// (e.g., x86 hosted targeting x86) and the VM expects that.
/* The following holds IL scope information about local variables.
*/
unsigned compVarScopesCount;
VarScopeDsc* compVarScopes;
/* The following holds information about instr offsets for
* which we need to report IP-mappings
*/
IL_OFFSET* compStmtOffsets; // sorted
unsigned compStmtOffsetsCount;
ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit;
#define CPU_X86 0x0100 // The generic X86 CPU
#define CPU_X86_PENTIUM_4 0x0110
#define CPU_X64 0x0200 // The generic x64 CPU
#define CPU_AMD_X64 0x0210 // AMD x64 CPU
#define CPU_INTEL_X64 0x0240 // Intel x64 CPU
#define CPU_ARM 0x0300 // The generic ARM CPU
#define CPU_ARM64 0x0400 // The generic ARM64 CPU
unsigned genCPU; // What CPU are we running on
// Number of class profile probes in this method
unsigned compClassProbeCount;
} info;
// Returns true if the method being compiled returns a non-void and non-struct value.
// Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a
// single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2,
// 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs).
// Methods returning such structs are considered to return non-struct return value and
// this method returns true in that case.
bool compMethodReturnsNativeScalarType()
{
return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType);
}
// Returns true if the method being compiled returns RetBuf addr as its return value
bool compMethodReturnsRetBufAddr()
{
// There are cases where implicit RetBuf argument should be explicitly returned in a register.
// In such cases the return type is changed to TYP_BYREF and appropriate IR is generated.
// These cases are:
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_AMD64
// 1. on x64 Windows and Unix the address of RetBuf needs to be returned by
// methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
// returning the address of RetBuf.
return (info.compRetBuffArg != BAD_VAR_NUM);
#else // TARGET_AMD64
#ifdef PROFILING_SUPPORTED
// 2. Profiler Leave callback expects the address of retbuf as return value for
// methods with hidden RetBuf argument. impReturnInstruction() when profiler
// callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for
// methods with hidden RetBufArg.
if (compIsProfilerHookNeeded())
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif
// 3. Windows ARM64 native instance calling convention requires the address of RetBuff
// to be returned in x0.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
auto callConv = info.compCallConv;
if (callConvIsInstanceMethodCallConv(callConv))
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
}
#endif // TARGET_ARM64
// 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
if (info.compCallConv != CorInfoCallConvExtension::Managed)
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif
return false;
#endif // TARGET_AMD64
}
// Returns true if the method returns a value in more than one return register
// TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs?
// TODO-ARM64: Does this apply for ARM64 too?
bool compMethodReturnsMultiRegRetType()
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
// On x86, 64-bit longs and structs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType) ||
(varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM);
#endif // TARGET_XXX
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
#endif // FEATURE_MULTIREG_RET
}
bool compEnregLocals()
{
return ((opts.compFlags & CLFLG_REGVAR) != 0);
}
bool compEnregStructLocals()
{
return (JitConfig.JitEnregStructLocals() != 0);
}
bool compObjectStackAllocation()
{
return (JitConfig.JitObjectStackAllocation() != 0);
}
// Returns true if the method returns a value in more than one return register,
// it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed.
// The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling,
// this method correctly returns false for it (it is passed as HVA), when the original returns true.
bool compMethodReturnsMultiRegRegTypeAlternate()
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
// On x86, 64-bit longs and structs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType) ||
(varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
#if defined(TARGET_ARM64)
// TYP_SIMD* are returned in one register.
if (varTypeIsSIMD(info.compRetNativeType))
{
return false;
}
#endif
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM);
#endif // TARGET_XXX
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
#endif // FEATURE_MULTIREG_RET
}
// Returns true if the method being compiled returns a value
bool compMethodHasRetVal()
{
return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() ||
compMethodReturnsMultiRegRetType();
}
// Returns true if the method requires a PInvoke prolog and epilog
bool compMethodRequiresPInvokeFrame()
{
return (info.compUnmanagedCallCountWithGCTransition > 0);
}
// Returns true if address-exposed user variables should be poisoned with a recognizable value
bool compShouldPoisonFrame()
{
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (opts.IsOSR())
return false;
#endif
return !info.compInitMem && opts.compDbgCode;
}
// Returns true if the jit supports having patchpoints in this method.
// Optionally, get the reason why not.
bool compCanHavePatchpoints(const char** reason = nullptr);
#if defined(DEBUG)
void compDispLocalVars();
#endif // DEBUG
private:
class ClassLayoutTable* m_classLayoutTable;
class ClassLayoutTable* typCreateClassLayoutTable();
class ClassLayoutTable* typGetClassLayoutTable();
public:
// Get the layout having the specified layout number.
ClassLayout* typGetLayoutByNum(unsigned layoutNum);
// Get the layout number of the specified layout.
unsigned typGetLayoutNum(ClassLayout* layout);
// Get the layout having the specified size but no class handle.
ClassLayout* typGetBlkLayout(unsigned blockSize);
// Get the number of a layout having the specified size but no class handle.
unsigned typGetBlkLayoutNum(unsigned blockSize);
// Get the layout for the specified class handle.
ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle);
// Get the number of a layout for the specified class handle.
unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle);
//-------------------------- Global Compiler Data ------------------------------------
#ifdef DEBUG
private:
static LONG s_compMethodsCount; // to produce unique label names
#endif
public:
#ifdef DEBUG
LONG compMethodID;
unsigned compGenTreeID;
unsigned compStatementID;
unsigned compBasicBlockID;
#endif
BasicBlock* compCurBB; // the current basic block in process
Statement* compCurStmt; // the current statement in process
GenTree* compCurTree; // the current tree in process
// The following is used to create the 'method JIT info' block.
size_t compInfoBlkSize;
BYTE* compInfoBlkAddr;
EHblkDsc* compHndBBtab; // array of EH data
unsigned compHndBBtabCount; // element count of used elements in EH data array
unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array
#if defined(TARGET_X86)
//-------------------------------------------------------------------------
// Tracking of region covered by the monitor in synchronized methods
void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER
void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT
#endif // !TARGET_X86
Phases mostRecentlyActivePhase; // the most recently active phase
PhaseChecks activePhaseChecks; // the currently active phase checks
//-------------------------------------------------------------------------
// The following keeps track of how many bytes of local frame space we've
// grabbed so far in the current function, and how many argument bytes we
// need to pop when we return.
//
unsigned compLclFrameSize; // secObject+lclBlk+locals+temps
// Count of callee-saved regs we pushed in the prolog.
// Does not include EBP for isFramePointerUsed() and double-aligned frames.
// In case of Amd64 this doesn't include float regs saved on stack.
unsigned compCalleeRegsPushed;
#if defined(TARGET_XARCH)
// Mask of callee saved float regs on stack.
regMaskTP compCalleeFPRegsSavedMask;
#endif
#ifdef TARGET_AMD64
// Quirk for VS debug-launch scenario to work:
// Bytes of padding between save-reg area and locals.
#define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES)
unsigned compVSQuirkStackPaddingNeeded;
#endif
unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg))
unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args
unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args
unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args
#if defined(TARGET_ARM64)
struct FrameInfo
{
// Frame type (1-5)
int frameType;
// Distance from established (method body) SP to base of callee save area
int calleeSaveSpOffset;
// Amount to subtract from SP before saving (prolog) OR
// to add to SP after restoring (epilog) callee saves
int calleeSaveSpDelta;
// Distance from established SP to where caller's FP was saved
int offsetSpToSavedFp;
} compFrameInfo;
#endif
//-------------------------------------------------------------------------
static void compStartup(); // One-time initialization
static void compShutdown(); // One-time finalization
void compInit(ArenaAllocator* pAlloc,
CORINFO_METHOD_HANDLE methodHnd,
COMP_HANDLE compHnd,
CORINFO_METHOD_INFO* methodInfo,
InlineInfo* inlineInfo);
void compDone();
static void compDisplayStaticSizes(FILE* fout);
//------------ Some utility functions --------------
void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
void** ppIndirection); /* OUT */
// Several JIT/EE interface functions return a CorInfoType, and also return a
// class handle as an out parameter if the type is a value class. Returns the
// size of the type these describe.
unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd);
// Returns true if the method being compiled has a return buffer.
bool compHasRetBuffArg();
#ifdef DEBUG
// Components used by the compiler may write unit test suites, and
// have them run within this method. They will be run only once per process, and only
// in debug. (Perhaps should be under the control of a COMPlus_ flag.)
// These should fail by asserting.
void compDoComponentUnitTestsOnce();
#endif // DEBUG
int compCompile(CORINFO_MODULE_HANDLE classPtr,
void** methodCodePtr,
uint32_t* methodCodeSize,
JitFlags* compileFlags);
void compCompileFinish();
int compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
COMP_HANDLE compHnd,
CORINFO_METHOD_INFO* methodInfo,
void** methodCodePtr,
uint32_t* methodCodeSize,
JitFlags* compileFlag);
ArenaAllocator* compGetArenaAllocator();
void generatePatchpointInfo();
#if MEASURE_MEM_ALLOC
static bool s_dspMemStats; // Display per-phase memory statistics for every function
#endif // MEASURE_MEM_ALLOC
#if LOOP_HOIST_STATS
unsigned m_loopsConsidered;
bool m_curLoopHasHoistedExpression;
unsigned m_loopsWithHoistedExpressions;
unsigned m_totalHoistedExpressions;
void AddLoopHoistStats();
void PrintPerMethodLoopHoistStats();
static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below.
static unsigned s_loopsConsidered;
static unsigned s_loopsWithHoistedExpressions;
static unsigned s_totalHoistedExpressions;
static void PrintAggregateLoopHoistStats(FILE* f);
#endif // LOOP_HOIST_STATS
#if TRACK_ENREG_STATS
class EnregisterStats
{
private:
unsigned m_totalNumberOfVars;
unsigned m_totalNumberOfStructVars;
unsigned m_totalNumberOfEnregVars;
unsigned m_totalNumberOfStructEnregVars;
unsigned m_addrExposed;
unsigned m_VMNeedsStackAddr;
unsigned m_localField;
unsigned m_blockOp;
unsigned m_dontEnregStructs;
unsigned m_notRegSizeStruct;
unsigned m_structArg;
unsigned m_lclAddrNode;
unsigned m_castTakesAddr;
unsigned m_storeBlkSrc;
unsigned m_oneAsgRetyping;
unsigned m_swizzleArg;
unsigned m_blockOpRet;
unsigned m_returnSpCheck;
unsigned m_simdUserForcesDep;
unsigned m_liveInOutHndlr;
unsigned m_depField;
unsigned m_noRegVars;
unsigned m_minOptsGC;
#ifdef JIT32_GCENCODER
unsigned m_PinningRef;
#endif // JIT32_GCENCODER
#if !defined(TARGET_64BIT)
unsigned m_longParamField;
#endif // !TARGET_64BIT
unsigned m_parentExposed;
unsigned m_tooConservative;
unsigned m_escapeAddress;
unsigned m_osrExposed;
unsigned m_stressLclFld;
unsigned m_copyFldByFld;
unsigned m_dispatchRetBuf;
unsigned m_wideIndir;
public:
void RecordLocal(const LclVarDsc* varDsc);
void Dump(FILE* fout) const;
};
static EnregisterStats s_enregisterStats;
#endif // TRACK_ENREG_STATS
bool compIsForImportOnly();
bool compIsForInlining() const;
bool compDonotInline();
#ifdef DEBUG
// Get the default fill char value we randomize this value when JitStress is enabled.
static unsigned char compGetJitDefaultFill(Compiler* comp);
const char* compLocalVarName(unsigned varNum, unsigned offs);
VarName compVarName(regNumber reg, bool isFloatReg = false);
const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false);
const char* compRegNameForSize(regNumber reg, size_t size);
const char* compFPregVarName(unsigned fpReg, bool displayVar = false);
void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP);
void compDspSrcLinesByLineNum(unsigned line, bool seek = false);
#endif // DEBUG
//-------------------------------------------------------------------------
struct VarScopeListNode
{
VarScopeDsc* data;
VarScopeListNode* next;
static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc)
{
VarScopeListNode* node = new (alloc) VarScopeListNode;
node->data = value;
node->next = nullptr;
return node;
}
};
struct VarScopeMapInfo
{
VarScopeListNode* head;
VarScopeListNode* tail;
static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc)
{
VarScopeMapInfo* info = new (alloc) VarScopeMapInfo;
info->head = node;
info->tail = node;
return info;
}
};
// Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup.
static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32;
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap;
// Map to keep variables' scope indexed by varNum containing it's scope dscs at the index.
VarNumToScopeDscMap* compVarScopeMap;
VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd);
VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs);
VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs);
void compInitVarScopeMap();
VarScopeDsc** compEnterScopeList; // List has the offsets where variables
// enter scope, sorted by instr offset
unsigned compNextEnterScope;
VarScopeDsc** compExitScopeList; // List has the offsets where variables
// go out of scope, sorted by instr offset
unsigned compNextExitScope;
void compInitScopeLists();
void compResetScopeLists();
VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false);
VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false);
void compProcessScopesUntil(unsigned offset,
VARSET_TP* inScope,
void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*),
void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*));
#ifdef DEBUG
void compDispScopeLists();
#endif // DEBUG
bool compIsProfilerHookNeeded();
//-------------------------------------------------------------------------
/* Statistical Data Gathering */
void compJitStats(); // call this function and enable
// various ifdef's below for statistical data
#if CALL_ARG_STATS
void compCallArgStats();
static void compDispCallArgStats(FILE* fout);
#endif
//-------------------------------------------------------------------------
protected:
#ifdef DEBUG
bool skipMethod();
#endif
ArenaAllocator* compArenaAllocator;
public:
void compFunctionTraceStart();
void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI);
protected:
size_t compMaxUncheckedOffsetForNullObject;
void compInitOptions(JitFlags* compileFlags);
void compSetProcessor();
void compInitDebuggingInfo();
void compSetOptimizationLevel();
#ifdef TARGET_ARMARCH
bool compRsvdRegCheck(FrameLayoutState curState);
#endif
void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags);
// Clear annotations produced during optimizations; to be used between iterations when repeating opts.
void ResetOptAnnotations();
// Regenerate loop descriptors; to be used between iterations when repeating opts.
void RecomputeLoopInfo();
#ifdef PROFILING_SUPPORTED
// Data required for generating profiler Enter/Leave/TailCall hooks
bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method
void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks
bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle
#endif
public:
// Assumes called as part of process shutdown; does any compiler-specific work associated with that.
static void ProcessShutdownWork(ICorStaticInfo* statInfo);
CompAllocator getAllocator(CompMemKind cmk = CMK_Generic)
{
return CompAllocator(compArenaAllocator, cmk);
}
CompAllocator getAllocatorGC()
{
return getAllocator(CMK_GC);
}
CompAllocator getAllocatorLoopHoist()
{
return getAllocator(CMK_LoopHoist);
}
#ifdef DEBUG
CompAllocator getAllocatorDebugOnly()
{
return getAllocator(CMK_DebugOnly);
}
#endif // DEBUG
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX typeInfo XX
XX XX
XX Checks for type compatibility and merges types XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// Returns true if child is equal to or a subtype of parent for merge purposes
// This support is necessary to suport attributes that are not described in
// for example, signatures. For example, the permanent home byref (byref that
// points to the gc heap), isn't a property of method signatures, therefore,
// it is safe to have mismatches here (that tiCompatibleWith will not flag),
// but when deciding if we need to reimport a block, we need to take these
// in account
bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Returns true if child is equal to or a subtype of parent.
// normalisedForStack indicates that both types are normalised for the stack
bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Merges pDest and pSrc. Returns false if merge is undefined.
// *pDest is modified to represent the merged type. Sets "*changed" to true
// if this changes "*pDest".
bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const;
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX IL verification stuff XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// The following is used to track liveness of local variables, initialization
// of valueclass constructors, and type safe use of IL instructions.
// dynamic state info needed for verification
EntryState verCurrentState;
// this ptr of object type .ctors are considered intited only after
// the base class ctor is called, or an alternate ctor is called.
// An uninited this ptr can be used to access fields, but cannot
// be used to call a member function.
bool verTrackObjCtorInitState;
void verInitBBEntryState(BasicBlock* block, EntryState* currentState);
// Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state.
void verSetThisInit(BasicBlock* block, ThisInitState tis);
void verInitCurrentState();
void verResetCurrentState(BasicBlock* block, EntryState* currentState);
// Merges the current verification state into the entry state of "block", return false if that merge fails,
// TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block".
bool verMergeEntryStates(BasicBlock* block, bool* changed);
void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg));
void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg));
typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd,
bool bashStructToRef = false); // converts from jit type representation to typeInfo
typeInfo verMakeTypeInfo(CorInfoType ciType,
CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo
bool verIsSDArray(const typeInfo& ti);
typeInfo verGetArrayElemType(const typeInfo& ti);
typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args);
bool verIsByRefLike(const typeInfo& ti);
bool verIsSafeToReturnByRef(const typeInfo& ti);
// generic type variables range over types that satisfy IsBoxable
bool verIsBoxable(const typeInfo& ti);
void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file)
DEBUGARG(unsigned line));
void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file)
DEBUGARG(unsigned line));
bool verCheckTailCallConstraint(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call
// on a type parameter?
bool speculative // If true, won't throw if verificatoin fails. Instead it will
// return false to the caller.
// If false, it will throw.
);
bool verIsBoxedValueType(const typeInfo& ti);
void verVerifyCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
bool tailCall,
bool readonlyCall, // is this a "readonly." call?
const BYTE* delegateCreateStart,
const BYTE* codeAddr,
CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName));
bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef);
typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType);
typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType);
void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const CORINFO_FIELD_INFO& fieldInfo,
const typeInfo* tiThis,
bool mutator,
bool allowPlainStructAsThis = false);
void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode);
void verVerifyThisPtrInitialised();
bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target);
#ifdef DEBUG
// One line log function. Default level is 0. Increasing it gives you
// more log information
// levels are currently unused: #define JITDUMP(level,...) ();
void JitLogEE(unsigned level, const char* fmt, ...);
bool compDebugBreak;
bool compJitHaltMethod();
#endif
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GS Security checks for unsafe buffers XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
struct ShadowParamVarInfo
{
FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other
unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM
static bool mayNeedShadowCopy(LclVarDsc* varDsc)
{
#if defined(TARGET_AMD64)
// GS cookie logic to create shadow slots, create trees to copy reg args to shadow
// slots and update all trees to refer to shadow slots is done immediately after
// fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines
// not to shadow a parameter. Also, LSRA could potentially spill a param which is passed
// in register. Therefore, conservatively all params may need a shadow copy. Note that
// GS cookie logic further checks whether the param is a ptr or an unsafe buffer before
// creating a shadow slot even though this routine returns true.
//
// TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than
// required. There are two cases under which a reg arg could potentially be used from its
// home location:
// a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates())
// b) LSRA spills it
//
// Possible solution to address case (a)
// - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked
// in this routine. Note that live out of exception handler is something we may not be
// able to do it here since GS cookie logic is invoked ahead of liveness computation.
// Therefore, for methods with exception handling and need GS cookie check we might have
// to take conservative approach.
//
// Possible solution to address case (b)
// - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we
// create a new spill temp if the method needs GS cookie check.
return varDsc->lvIsParam;
#else // !defined(TARGET_AMD64)
return varDsc->lvIsParam && !varDsc->lvIsRegArg;
#endif
}
#ifdef DEBUG
void Print()
{
printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy);
}
#endif
};
GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks
GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL
ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code
void gsGSChecksInitCookie(); // Grabs cookie variable
void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies
bool gsFindVulnerableParams(); // Shadow param analysis code
void gsParamsToShadows(); // Insert copy code and replave param uses by shadow
static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk
static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk
#define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined.
// This can be overwritten by setting complus_JITInlineSize env variable.
#define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined
#define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers
private:
#ifdef FEATURE_JIT_METHOD_PERF
JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation.
static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run.
static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD.
static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to.
#endif
void BeginPhase(Phases phase); // Indicate the start of the given phase.
void EndPhase(Phases phase); // Indicate the end of the given phase.
#if MEASURE_CLRAPI_CALLS
// Thin wrappers that call into JitTimer (if present).
inline void CLRApiCallEnter(unsigned apix);
inline void CLRApiCallLeave(unsigned apix);
public:
inline void CLR_API_Enter(API_ICorJitInfo_Names ename);
inline void CLR_API_Leave(API_ICorJitInfo_Names ename);
private:
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These variables are associated with maintaining SQM data about compile time.
unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase
// in the current compilation.
unsigned __int64 m_compCycles; // Net cycle count for current compilation
DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of
// the inlining phase in the current compilation.
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete.
// (We do this after inlining because this marks the last point at which the JIT is likely to cause
// type-loading and class initialization).
void RecordStateAtEndOfInlining();
// Assumes being called at the end of compilation. Update the SQM state.
void RecordStateAtEndOfCompilation();
public:
#if FUNC_INFO_LOGGING
static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the
// filename to write it to.
static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to.
#endif // FUNC_INFO_LOGGING
Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers.
#if MEASURE_NOWAY
void RecordNowayAssert(const char* filename, unsigned line, const char* condStr);
#endif // MEASURE_NOWAY
#ifndef FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway();
#else // FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway(const char* filename, unsigned line);
// Telemetry instance to use per method compilation.
JitTelemetry compJitTelemetry;
// Get common parameters that have to be logged with most telemetry data.
void compGetTelemetryDefaults(const char** assemblyName,
const char** scopeName,
const char** methodName,
unsigned* methodHash);
#endif // !FEATURE_TRACELOGGING
#ifdef DEBUG
private:
NodeToTestDataMap* m_nodeTestData;
static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000;
unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we
// label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS.
// Current kept in this.
public:
NodeToTestDataMap* GetNodeTestData()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_nodeTestData == nullptr)
{
compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly());
}
return compRoot->m_nodeTestData;
}
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap;
// Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and
// currently occur in the AST graph.
NodeToIntMap* FindReachableNodesInNodeTestData();
// Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated
// test data, associate that data with "to".
void TransferTestDataToNode(GenTree* from, GenTree* to);
// These are the methods that test that the various conditions implied by the
// test attributes are satisfied.
void JitTestCheckSSA(); // SSA builder tests.
void JitTestCheckVN(); // Value numbering tests.
#endif // DEBUG
// The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for
// operations.
FieldSeqStore* m_fieldSeqStore;
FieldSeqStore* GetFieldSeqStore()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_fieldSeqStore == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_FieldSeqStore));
compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc);
}
return compRoot->m_fieldSeqStore;
}
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap;
// Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since
// the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant
// that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to
// attach the field sequence directly to the address node.
NodeToFieldSeqMap* m_zeroOffsetFieldMap;
NodeToFieldSeqMap* GetZeroOffsetFieldMap()
{
// Don't need to worry about inlining here
if (m_zeroOffsetFieldMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for
// allocation.
CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap));
m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
}
return m_zeroOffsetFieldMap;
}
// Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in
// "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on
// "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has
// a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const
// has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we
// record the the field sequence using the ZeroOffsetFieldMap described above.
//
// One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR.
// This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in
// CoreRT. Such case is handled same as the default case.
void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq);
typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap;
NodeToArrayInfoMap* m_arrayInfoMap;
NodeToArrayInfoMap* GetArrayInfoMap()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_arrayInfoMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap));
compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc);
}
return compRoot->m_arrayInfoMap;
}
//-----------------------------------------------------------------------------------------------------------------
// Compiler::TryGetArrayInfo:
// Given an indirection node, checks to see whether or not that indirection represents an array access, and
// if so returns information about the array.
//
// Arguments:
// indir - The `GT_IND` node.
// arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise.
//
// Returns:
// True if the `GT_IND` node represents an array access; false otherwise.
bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo)
{
if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
return false;
}
if (indir->gtOp1->OperIs(GT_INDEX_ADDR))
{
GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr();
*arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset,
indexAddr->gtStructElemClass);
return true;
}
bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo);
assert(found);
return true;
}
NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount];
// In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory
// states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory
// state, all the possible memory states are possible initial states of the corresponding catch block(s).)
NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind)
{
if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates)
{
// Use the same map for GCHeap and ByrefExposed when their states match.
memoryKind = ByrefExposed;
}
assert(memoryKind < MemoryKindCount);
Compiler* compRoot = impInlineRoot();
if (compRoot->m_memorySsaMap[memoryKind] == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap));
compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc);
}
return compRoot->m_memorySsaMap[memoryKind];
}
// The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields.
CORINFO_CLASS_HANDLE m_refAnyClass;
CORINFO_FIELD_HANDLE GetRefanyDataField()
{
if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
return info.compCompHnd->getFieldInClass(m_refAnyClass, 0);
}
CORINFO_FIELD_HANDLE GetRefanyTypeField()
{
if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
return info.compCompHnd->getFieldInClass(m_refAnyClass, 1);
}
#if VARSET_COUNTOPS
static BitSetSupport::BitSetOpCounter m_varsetOpCounter;
#endif
#if ALLVARSET_COUNTOPS
static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter;
#endif
static HelperCallProperties s_helperCallProperties;
#ifdef UNIX_AMD64_ABI
static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size);
static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
unsigned slotNum);
static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
var_types* type0,
var_types* type1,
unsigned __int8* offset0,
unsigned __int8* offset1);
void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd,
var_types* type0,
var_types* type1,
unsigned __int8* offset0,
unsigned __int8* offset1);
#endif // defined(UNIX_AMD64_ABI)
void fgMorphMultiregStructArgs(GenTreeCall* call);
GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr);
bool killGCRefs(GenTree* tree);
}; // end of class Compiler
//---------------------------------------------------------------------------------------------------------------------
// GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern.
//
// This class implements a configurable walker for IR trees. There are five configuration options (defaults values are
// shown in parentheses):
//
// - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit
// of a misnomer, as the first entry will always be the current node.
//
// - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an
// argument before visiting the node's operands.
//
// - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an
// argument after visiting the node's operands.
//
// - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes.
// `DoPreOrder` must be true if this option is true.
//
// - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a
// binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be
// visited before the first).
//
// At least one of `DoPreOrder` and `DoPostOrder` must be specified.
//
// A simple pre-order visitor might look something like the following:
//
// class CountingVisitor final : public GenTreeVisitor<CountingVisitor>
// {
// public:
// enum
// {
// DoPreOrder = true
// };
//
// unsigned m_count;
//
// CountingVisitor(Compiler* compiler)
// : GenTreeVisitor<CountingVisitor>(compiler), m_count(0)
// {
// }
//
// Compiler::fgWalkResult PreOrderVisit(GenTree* node)
// {
// m_count++;
// }
// };
//
// This visitor would then be used like so:
//
// CountingVisitor countingVisitor(compiler);
// countingVisitor.WalkTree(root);
//
template <typename TVisitor>
class GenTreeVisitor
{
protected:
typedef Compiler::fgWalkResult fgWalkResult;
enum
{
ComputeStack = false,
DoPreOrder = false,
DoPostOrder = false,
DoLclVarsOnly = false,
UseExecutionOrder = false,
};
Compiler* m_compiler;
ArrayStack<GenTree*> m_ancestors;
GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack))
{
assert(compiler != nullptr);
static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder);
static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder);
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
return fgWalkResult::WALK_CONTINUE;
}
fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
return fgWalkResult::WALK_CONTINUE;
}
public:
fgWalkResult WalkTree(GenTree** use, GenTree* user)
{
assert(use != nullptr);
GenTree* node = *use;
if (TVisitor::ComputeStack)
{
m_ancestors.Push(node);
}
fgWalkResult result = fgWalkResult::WALK_CONTINUE;
if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
node = *use;
if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES))
{
goto DONE;
}
}
switch (node->OperGet())
{
// Leaf lclVars
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
if (TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
FALLTHROUGH;
// Leaf nodes
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
break;
// Lclvar unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
if (TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
FALLTHROUGH;
// Standard unary operators
case GT_NOT:
case GT_NEG:
case GT_BSWAP:
case GT_BSWAP16:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
case GT_RUNTIMELOOKUP:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
{
GenTreeUnOp* const unOp = node->AsUnOp();
if (unOp->gtOp1 != nullptr)
{
result = WalkTree(&unOp->gtOp1, unOp);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& use : node->AsPhi()->Uses())
{
result = WalkTree(&use.NodeRef(), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses())
{
result = WalkTree(&use.NodeRef(), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg();
result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&cmpXchg->gtOpValue, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = node->AsArrElem();
result = WalkTree(&arrElem->gtArrObj, arrElem);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
const unsigned rank = arrElem->gtArrRank;
for (unsigned dim = 0; dim < rank; dim++)
{
result = WalkTree(&arrElem->gtArrInds[dim], arrElem);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = node->AsArrOffs();
result = WalkTree(&arrOffs->gtOffset, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&arrOffs->gtIndex, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&arrOffs->gtArrObj, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk();
GenTree** op1Use = &dynBlock->gtOp1;
GenTree** op2Use = &dynBlock->gtOp2;
GenTree** op3Use = &dynBlock->gtDynamicSize;
result = WalkTree(op1Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(op2Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(op3Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_CALL:
{
GenTreeCall* const call = node->AsCall();
if (call->gtCallThisArg != nullptr)
{
result = WalkTree(&call->gtCallThisArg->NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
for (GenTreeCall::Use& use : call->Args())
{
result = WalkTree(&use.NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
for (GenTreeCall::Use& use : call->LateArgs())
{
result = WalkTree(&use.NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (call->gtCallType == CT_INDIRECT)
{
if (call->gtCallCookie != nullptr)
{
result = WalkTree(&call->gtCallCookie, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
result = WalkTree(&call->gtCallAddr, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (call->gtControlExpr != nullptr)
{
result = WalkTree(&call->gtControlExpr, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
if (TVisitor::UseExecutionOrder && node->IsReverseOp())
{
assert(node->AsMultiOp()->GetOperandCount() == 2);
result = WalkTree(&node->AsMultiOp()->Op(2), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&node->AsMultiOp()->Op(1), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
else
{
for (GenTree** use : node->AsMultiOp()->UseEdges())
{
result = WalkTree(use, node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Binary nodes
default:
{
assert(node->OperIsBinary());
GenTreeOp* const op = node->AsOp();
GenTree** op1Use = &op->gtOp1;
GenTree** op2Use = &op->gtOp2;
if (TVisitor::UseExecutionOrder && node->IsReverseOp())
{
std::swap(op1Use, op2Use);
}
if (*op1Use != nullptr)
{
result = WalkTree(op1Use, op);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (*op2Use != nullptr)
{
result = WalkTree(op2Use, op);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
}
DONE:
// Finally, visit the current node
if (TVisitor::DoPostOrder)
{
result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user);
}
if (TVisitor::ComputeStack)
{
m_ancestors.Pop();
}
return result;
}
};
template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder>
class GenericTreeWalker final
: public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>
{
public:
enum
{
ComputeStack = computeStack,
DoPreOrder = doPreOrder,
DoPostOrder = doPostOrder,
DoLclVarsOnly = doLclVarsOnly,
UseExecutionOrder = useExecutionOrder,
};
private:
Compiler::fgWalkData* m_walkData;
public:
GenericTreeWalker(Compiler::fgWalkData* walkData)
: GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>(
walkData->compiler)
, m_walkData(walkData)
{
assert(walkData != nullptr);
if (computeStack)
{
walkData->parentStack = &this->m_ancestors;
}
}
Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
m_walkData->parent = user;
return m_walkData->wtprVisitorFn(use, m_walkData);
}
Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
m_walkData->parent = user;
return m_walkData->wtpoVisitorFn(use, m_walkData);
}
};
// A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor.
template <typename TVisitor>
class DomTreeVisitor
{
protected:
Compiler* const m_compiler;
DomTreeNode* const m_domTree;
DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree)
{
}
void Begin()
{
}
void PreOrderVisit(BasicBlock* block)
{
}
void PostOrderVisit(BasicBlock* block)
{
}
void End()
{
}
public:
//------------------------------------------------------------------------
// WalkTree: Walk the dominator tree, starting from fgFirstBB.
//
// Notes:
// This performs a non-recursive, non-allocating walk of the tree by using
// DomTreeNode's firstChild and nextSibling links to locate the children of
// a node and BasicBlock's bbIDom parent link to go back up the tree when
// no more children are left.
//
// Forests are also supported, provided that all the roots are chained via
// DomTreeNode::nextSibling to fgFirstBB.
//
void WalkTree()
{
static_cast<TVisitor*>(this)->Begin();
for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next)
{
static_cast<TVisitor*>(this)->PreOrderVisit(block);
next = m_domTree[block->bbNum].firstChild;
if (next != nullptr)
{
assert(next->bbIDom == block);
continue;
}
do
{
static_cast<TVisitor*>(this)->PostOrderVisit(block);
next = m_domTree[block->bbNum].nextSibling;
if (next != nullptr)
{
assert(next->bbIDom == block->bbIDom);
break;
}
block = block->bbIDom;
} while (block != nullptr);
}
static_cast<TVisitor*>(this)->End();
}
};
// EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.:
// for (EHblkDsc* const ehDsc : EHClauses(compiler))
//
class EHClauses
{
EHblkDsc* m_begin;
EHblkDsc* m_end;
// Forward iterator for the exception handling table entries. Iteration is in table order.
//
class iterator
{
EHblkDsc* m_ehDsc;
public:
iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc)
{
}
EHblkDsc* operator*() const
{
return m_ehDsc;
}
iterator& operator++()
{
++m_ehDsc;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_ehDsc != i.m_ehDsc;
}
};
public:
EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount)
{
assert((m_begin != nullptr) || (m_begin == m_end));
}
iterator begin() const
{
return iterator(m_begin);
}
iterator end() const
{
return iterator(m_end);
}
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Miscellaneous Compiler stuff XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// Values used to mark the types a stack slot is used for
const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int
const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long
const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float
const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float
const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer
const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer
const unsigned TYPE_REF_STC = 0x40; // slot used as a struct
const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type
// const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken
/*****************************************************************************
*
* Variables to keep track of total code amounts.
*/
#if DISPLAY_SIZES
extern size_t grossVMsize;
extern size_t grossNCsize;
extern size_t totalNCsize;
extern unsigned genMethodICnt;
extern unsigned genMethodNCnt;
extern size_t gcHeaderISize;
extern size_t gcPtrMapISize;
extern size_t gcHeaderNSize;
extern size_t gcPtrMapNSize;
#endif // DISPLAY_SIZES
/*****************************************************************************
*
* Variables to keep track of basic block counts (more data on 1 BB methods)
*/
#if COUNT_BASIC_BLOCKS
extern Histogram bbCntTable;
extern Histogram bbOneBBSizeTable;
#endif
/*****************************************************************************
*
* Used by optFindNaturalLoops to gather statistical information such as
* - total number of natural loops
* - number of loops with 1, 2, ... exit conditions
* - number of loops that have an iterator (for like)
* - number of loops that have a constant iterator
*/
#if COUNT_LOOPS
extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops
extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has
extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent
extern unsigned totalLoopCount; // counts the total number of natural loops
extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops
extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent
extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like)
extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter <
// const)
extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like)
extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops
extern unsigned loopsThisMethod; // counts the number of loops in the current method
extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method.
extern Histogram loopCountTable; // Histogram of loop counts
extern Histogram loopExitCountTable; // Histogram of loop exit counts
#endif // COUNT_LOOPS
/*****************************************************************************
* variables to keep track of how many iterations we go in a dataflow pass
*/
#if DATAFLOW_ITER
extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow
extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow
#endif // DATAFLOW_ITER
#if MEASURE_BLOCK_SIZE
extern size_t genFlowNodeSize;
extern size_t genFlowNodeCnt;
#endif // MEASURE_BLOCK_SIZE
#if MEASURE_NODE_SIZE
struct NodeSizeStats
{
void Init()
{
genTreeNodeCnt = 0;
genTreeNodeSize = 0;
genTreeNodeActualSize = 0;
}
// Count of tree nodes allocated.
unsigned __int64 genTreeNodeCnt;
// The size we allocate.
unsigned __int64 genTreeNodeSize;
// The actual size of the node. Note that the actual size will likely be smaller
// than the allocated size, but we sometimes use SetOper()/ChangeOper() to change
// a smaller node to a larger one. TODO-Cleanup: add stats on
// SetOper()/ChangeOper() usage to quantify this.
unsigned __int64 genTreeNodeActualSize;
};
extern NodeSizeStats genNodeSizeStats; // Total node size stats
extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats
extern Histogram genTreeNcntHist;
extern Histogram genTreeNsizHist;
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
* Count fatal errors (including noway_asserts).
*/
#if MEASURE_FATAL
extern unsigned fatal_badCode;
extern unsigned fatal_noWay;
extern unsigned fatal_implLimitation;
extern unsigned fatal_NOMEM;
extern unsigned fatal_noWayAssertBody;
#ifdef DEBUG
extern unsigned fatal_noWayAssertBodyArgs;
#endif // DEBUG
extern unsigned fatal_NYI;
#endif // MEASURE_FATAL
/*****************************************************************************
* Codegen
*/
#ifdef TARGET_XARCH
const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl;
const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr;
const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar;
const instruction INS_AND = INS_and;
const instruction INS_OR = INS_or;
const instruction INS_XOR = INS_xor;
const instruction INS_NEG = INS_neg;
const instruction INS_TEST = INS_test;
const instruction INS_MUL = INS_imul;
const instruction INS_SIGNED_DIVIDE = INS_idiv;
const instruction INS_UNSIGNED_DIVIDE = INS_div;
const instruction INS_BREAKPOINT = INS_int3;
const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbb;
const instruction INS_NOT = INS_not;
#endif // TARGET_XARCH
#ifdef TARGET_ARM
const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl;
const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr;
const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr;
const instruction INS_AND = INS_and;
const instruction INS_OR = INS_orr;
const instruction INS_XOR = INS_eor;
const instruction INS_NEG = INS_rsb;
const instruction INS_TEST = INS_tst;
const instruction INS_MUL = INS_mul;
const instruction INS_MULADD = INS_mla;
const instruction INS_SIGNED_DIVIDE = INS_sdiv;
const instruction INS_UNSIGNED_DIVIDE = INS_udiv;
const instruction INS_BREAKPOINT = INS_bkpt;
const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbc;
const instruction INS_NOT = INS_mvn;
const instruction INS_ABS = INS_vabs;
const instruction INS_SQRT = INS_vsqrt;
#endif // TARGET_ARM
#ifdef TARGET_ARM64
const instruction INS_MULADD = INS_madd;
inline const instruction INS_BREAKPOINT_osHelper()
{
// GDB needs the encoding of brk #0
// Windbg needs the encoding of brk #F000
return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows;
}
#define INS_BREAKPOINT INS_BREAKPOINT_osHelper()
const instruction INS_ABS = INS_fabs;
const instruction INS_SQRT = INS_fsqrt;
#endif // TARGET_ARM64
/*****************************************************************************/
extern const BYTE genTypeSizes[];
extern const BYTE genTypeAlignments[];
extern const BYTE genTypeStSzs[];
extern const BYTE genActualTypes[];
/*****************************************************************************/
#ifdef DEBUG
void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars);
#endif // DEBUG
#include "compiler.hpp" // All the shared inline functions
/*****************************************************************************/
#endif //_COMPILER_H_
/*****************************************************************************/
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/gentree.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#include "hwintrinsic.h"
#include "simd.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
/*****************************************************************************/
const unsigned char GenTree::gtOperKindTable[] = {
#define GTNODE(en, st, cm, ok) ((ok)>K_MASK) + GTK_COMMUTE *cm,
#include "gtlist.h"
};
#ifdef DEBUG
const GenTreeDebugOperKind GenTree::gtDebugOperKindTable[] = {
#define GTNODE(en, st, cm, ok) static_cast<GenTreeDebugOperKind>((ok)&DBK_MASK),
#include "gtlist.h"
};
#endif // DEBUG
/*****************************************************************************
*
* The types of different GenTree nodes
*/
#ifdef DEBUG
#define INDENT_SIZE 3
//--------------------------------------------
//
// IndentStack: This struct is used, along with its related enums and strings,
// to control both the indendtation and the printing of arcs.
//
// Notes:
// The mode of printing is set in the Constructor, using its 'compiler' argument.
// Currently it only prints arcs when fgOrder == fgOrderLinear.
// The type of arc to print is specified by the IndentInfo enum, and is controlled
// by the caller of the Push() method.
enum IndentChars
{
ICVertical,
ICBottom,
ICTop,
ICMiddle,
ICDash,
ICTerminal,
ICError,
IndentCharCount
};
// clang-format off
// Sets of strings for different dumping options vert bot top mid dash embedded terminal error
static const char* emptyIndents[IndentCharCount] = { " ", " ", " ", " ", " ", "", "?" };
static const char* asciiIndents[IndentCharCount] = { "|", "\\", "/", "+", "-", "*", "?" };
static const char* unicodeIndents[IndentCharCount] = { "\xe2\x94\x82", "\xe2\x94\x94", "\xe2\x94\x8c", "\xe2\x94\x9c", "\xe2\x94\x80", "\xe2\x96\x8c", "?" };
// clang-format on
typedef ArrayStack<Compiler::IndentInfo> IndentInfoStack;
struct IndentStack
{
IndentInfoStack stack;
const char** indents;
// Constructor for IndentStack. Uses 'compiler' to determine the mode of printing.
IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly))
{
if (compiler->asciiTrees)
{
indents = asciiIndents;
}
else
{
indents = unicodeIndents;
}
}
// Return the depth of the current indentation.
unsigned Depth()
{
return stack.Height();
}
// Push a new indentation onto the stack, of the given type.
void Push(Compiler::IndentInfo info)
{
stack.Push(info);
}
// Pop the most recent indentation type off the stack.
Compiler::IndentInfo Pop()
{
return stack.Pop();
}
// Print the current indentation and arcs.
void print()
{
unsigned indentCount = Depth();
for (unsigned i = 0; i < indentCount; i++)
{
unsigned index = indentCount - 1 - i;
switch (stack.Top(index))
{
case Compiler::IndentInfo::IINone:
printf(" ");
break;
case Compiler::IndentInfo::IIArc:
if (index == 0)
{
printf("%s%s%s", indents[ICMiddle], indents[ICDash], indents[ICDash]);
}
else
{
printf("%s ", indents[ICVertical]);
}
break;
case Compiler::IndentInfo::IIArcBottom:
printf("%s%s%s", indents[ICBottom], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIArcTop:
printf("%s%s%s", indents[ICTop], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIError:
printf("%s%s%s", indents[ICError], indents[ICDash], indents[ICDash]);
break;
default:
unreached();
}
}
printf("%s", indents[ICTerminal]);
}
};
//------------------------------------------------------------------------
// printIndent: This is a static method which simply invokes the 'print'
// method on its 'indentStack' argument.
//
// Arguments:
// indentStack - specifies the information for the indentation & arcs to be printed
//
// Notes:
// This method exists to localize the checking for the case where indentStack is null.
static void printIndent(IndentStack* indentStack)
{
if (indentStack == nullptr)
{
return;
}
indentStack->print();
}
#endif
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* opNames[] = {
#define GTNODE(en, st, cm, ok) #en,
#include "gtlist.h"
};
const char* GenTree::OpName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opNames));
return opNames[op];
}
#endif
#if MEASURE_NODE_SIZE
static const char* opStructNames[] = {
#define GTNODE(en, st, cm, ok) #st,
#include "gtlist.h"
};
const char* GenTree::OpStructName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opStructNames));
return opStructNames[op];
}
#endif
//
// We allocate tree nodes in 2 different sizes:
// - TREE_NODE_SZ_SMALL for most nodes
// - TREE_NODE_SZ_LARGE for the few nodes (such as calls) that have
// more fields and take up a lot more space.
//
/* GT_COUNT'th oper is overloaded as 'undefined oper', so allocate storage for GT_COUNT'th oper also */
/* static */
unsigned char GenTree::s_gtNodeSizes[GT_COUNT + 1];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
unsigned char GenTree::s_gtTrueSizes[GT_COUNT + 1]{
#define GTNODE(en, st, cm, ok) sizeof(st),
#include "gtlist.h"
};
#endif // NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
#if COUNT_AST_OPERS
unsigned GenTree::s_gtNodeCounts[GT_COUNT + 1] = {0};
#endif // COUNT_AST_OPERS
/* static */
void GenTree::InitNodeSize()
{
/* Set all sizes to 'small' first */
for (unsigned op = 0; op <= GT_COUNT; op++)
{
GenTree::s_gtNodeSizes[op] = TREE_NODE_SZ_SMALL;
}
// Now set all of the appropriate entries to 'large'
CLANG_FORMAT_COMMENT_ANCHOR;
// clang-format off
if (GlobalJitOptions::compFeatureHfa
#if defined(UNIX_AMD64_ABI)
|| true
#endif // defined(UNIX_AMD64_ABI)
)
{
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
}
GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FTN_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOUNDS_CHECK] = TREE_NODE_SZ_SMALL;
GenTree::s_gtNodeSizes[GT_ARR_ELEM] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ALLOCOBJ] = TREE_NODE_SZ_LARGE;
#if USE_HELPERS_FOR_INT_DIV
GenTree::s_gtNodeSizes[GT_DIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UDIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_MOD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UMOD] = TREE_NODE_SZ_LARGE;
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
GenTree::s_gtNodeSizes[GT_PUTARG_STK] = TREE_NODE_SZ_LARGE;
#if FEATURE_ARG_SPLIT
GenTree::s_gtNodeSizes[GT_PUTARG_SPLIT] = TREE_NODE_SZ_LARGE;
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
assert(GenTree::s_gtNodeSizes[GT_RETURN] == GenTree::s_gtNodeSizes[GT_ASG]);
// This list of assertions should come to contain all GenTree subtypes that are declared
// "small".
assert(sizeof(GenTreeLclFld) <= GenTree::s_gtNodeSizes[GT_LCL_FLD]);
assert(sizeof(GenTreeLclVar) <= GenTree::s_gtNodeSizes[GT_LCL_VAR]);
static_assert_no_msg(sizeof(GenTree) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVarCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclFld) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCC) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCast) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBox) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeField) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFieldList) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeColon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCall) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeCmpXchg) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFptrVal) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeQmark) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIntrinsic) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndexAddr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrLen) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBoundsChk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArrElem) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrOffs) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAllocObj) <= TREE_NODE_SZ_LARGE); // *** large node
#ifndef FEATURE_PUT_STRUCT_ARG_STK
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_SMALL);
#else // FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_LARGE);
#if FEATURE_ARG_SPLIT
static_assert_no_msg(sizeof(GenTreePutArgSplit) <= TREE_NODE_SZ_LARGE);
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
#ifdef FEATURE_SIMD
static_assert_no_msg(sizeof(GenTreeSIMD) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static_assert_no_msg(sizeof(GenTreeHWIntrinsic) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_HW_INTRINSICS
// clang-format on
}
size_t GenTree::GetNodeSize() const
{
return GenTree::s_gtNodeSizes[gtOper];
}
#ifdef DEBUG
bool GenTree::IsNodeProperlySized() const
{
size_t size;
if (gtDebugFlags & GTF_DEBUG_NODE_SMALL)
{
size = TREE_NODE_SZ_SMALL;
}
else
{
assert(gtDebugFlags & GTF_DEBUG_NODE_LARGE);
size = TREE_NODE_SZ_LARGE;
}
return GenTree::s_gtNodeSizes[gtOper] <= size;
}
#endif
//------------------------------------------------------------------------
// ReplaceWith: replace this with the src node. The source must be an isolated node
// and cannot be used after the replacement.
//
// Arguments:
// src - source tree, that replaces this.
// comp - the compiler instance to transfer annotations for arrays.
//
void GenTree::ReplaceWith(GenTree* src, Compiler* comp)
{
// The source may be big only if the target is also a big node
assert((gtDebugFlags & GTF_DEBUG_NODE_LARGE) || GenTree::s_gtNodeSizes[src->gtOper] == TREE_NODE_SZ_SMALL);
// The check is effective only if nodes have been already threaded.
assert((src->gtPrev == nullptr) && (src->gtNext == nullptr));
RecordOperBashing(OperGet(), src->OperGet()); // nop unless NODEBASH_STATS is enabled
GenTree* prev = gtPrev;
GenTree* next = gtNext;
// The VTable pointer is copied intentionally here
memcpy((void*)this, (void*)src, src->GetNodeSize());
this->gtPrev = prev;
this->gtNext = next;
#ifdef DEBUG
gtSeqNum = 0;
#endif
// Transfer any annotations.
if (src->OperGet() == GT_IND && src->gtFlags & GTF_IND_ARR_INDEX)
{
ArrayInfo arrInfo;
bool b = comp->GetArrayInfoMap()->Lookup(src, &arrInfo);
assert(b);
comp->GetArrayInfoMap()->Set(this, arrInfo);
}
DEBUG_DESTROY_NODE(src);
}
/*****************************************************************************
*
* When 'NODEBASH_STATS' is enabled in "jit.h" we record all instances of
* an existing GenTree node having its operator changed. This can be useful
* for two (related) things - to see what is being bashed (and what isn't),
* and to verify that the existing choices for what nodes are marked 'large'
* are reasonable (to minimize "wasted" space).
*
* And yes, the hash function / logic is simplistic, but it is conflict-free
* and transparent for what we need.
*/
#if NODEBASH_STATS
#define BASH_HASH_SIZE 211
inline unsigned hashme(genTreeOps op1, genTreeOps op2)
{
return ((op1 * 104729) ^ (op2 * 56569)) % BASH_HASH_SIZE;
}
struct BashHashDsc
{
unsigned __int32 bhFullHash; // the hash value (unique for all old->new pairs)
unsigned __int32 bhCount; // the same old->new bashings seen so far
unsigned __int8 bhOperOld; // original gtOper
unsigned __int8 bhOperNew; // new gtOper
};
static BashHashDsc BashHash[BASH_HASH_SIZE];
void GenTree::RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{
unsigned hash = hashme(operOld, operNew);
BashHashDsc* desc = BashHash + hash;
if (desc->bhFullHash != hash)
{
noway_assert(desc->bhCount == 0); // if this ever fires, need fix the hash fn
desc->bhFullHash = hash;
}
desc->bhCount += 1;
desc->bhOperOld = operOld;
desc->bhOperNew = operNew;
}
void GenTree::ReportOperBashing(FILE* f)
{
unsigned total = 0;
fflush(f);
fprintf(f, "\n");
fprintf(f, "Bashed gtOper stats:\n");
fprintf(f, "\n");
fprintf(f, " Old operator New operator #bytes old->new Count\n");
fprintf(f, " ---------------------------------------------------------------\n");
for (unsigned h = 0; h < BASH_HASH_SIZE; h++)
{
unsigned count = BashHash[h].bhCount;
if (count == 0)
continue;
unsigned opOld = BashHash[h].bhOperOld;
unsigned opNew = BashHash[h].bhOperNew;
fprintf(f, " GT_%-13s -> GT_%-13s [size: %3u->%3u] %c %7u\n", OpName((genTreeOps)opOld),
OpName((genTreeOps)opNew), s_gtTrueSizes[opOld], s_gtTrueSizes[opNew],
(s_gtTrueSizes[opOld] < s_gtTrueSizes[opNew]) ? 'X' : ' ', count);
total += count;
}
fprintf(f, "\n");
fprintf(f, "Total bashings: %u\n", total);
fprintf(f, "\n");
fflush(f);
}
#endif // NODEBASH_STATS
/*****************************************************************************/
#if MEASURE_NODE_SIZE
void GenTree::DumpNodeSizes(FILE* fp)
{
// Dump the sizes of the various GenTree flavors
fprintf(fp, "Small tree node size = %zu bytes\n", TREE_NODE_SZ_SMALL);
fprintf(fp, "Large tree node size = %zu bytes\n", TREE_NODE_SZ_LARGE);
fprintf(fp, "\n");
// Verify that node sizes are set kosherly and dump sizes
for (unsigned op = GT_NONE + 1; op < GT_COUNT; op++)
{
unsigned needSize = s_gtTrueSizes[op];
unsigned nodeSize = s_gtNodeSizes[op];
const char* structNm = OpStructName((genTreeOps)op);
const char* operName = OpName((genTreeOps)op);
bool repeated = false;
// Have we seen this struct flavor before?
for (unsigned mop = GT_NONE + 1; mop < op; mop++)
{
if (strcmp(structNm, OpStructName((genTreeOps)mop)) == 0)
{
repeated = true;
break;
}
}
// Don't repeat the same GenTree flavor unless we have an error
if (!repeated || needSize > nodeSize)
{
unsigned sizeChar = '?';
if (nodeSize == TREE_NODE_SZ_SMALL)
sizeChar = 'S';
else if (nodeSize == TREE_NODE_SZ_LARGE)
sizeChar = 'L';
fprintf(fp, "GT_%-16s ... %-19s = %3u bytes (%c)", operName, structNm, needSize, sizeChar);
if (needSize > nodeSize)
{
fprintf(fp, " -- ERROR -- allocation is only %u bytes!", nodeSize);
}
else if (needSize <= TREE_NODE_SZ_SMALL && nodeSize == TREE_NODE_SZ_LARGE)
{
fprintf(fp, " ... could be small");
}
fprintf(fp, "\n");
}
}
}
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
*
* Walk all basic blocks and call the given function pointer for all tree
* nodes contained therein.
*/
void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), visitor, pCallBackData);
}
}
}
//-----------------------------------------------------------
// CopyReg: Copy the _gtRegNum/gtRegTag fields.
//
// Arguments:
// from - GenTree node from which to copy
//
// Return Value:
// None
void GenTree::CopyReg(GenTree* from)
{
_gtRegNum = from->_gtRegNum;
INDEBUG(gtRegTag = from->gtRegTag;)
// Also copy multi-reg state if this is a call node
if (IsCall())
{
assert(from->IsCall());
this->AsCall()->CopyOtherRegs(from->AsCall());
}
else if (IsCopyOrReload())
{
this->AsCopyOrReload()->CopyOtherRegs(from->AsCopyOrReload());
}
}
//------------------------------------------------------------------
// gtHasReg: Whether node beeen assigned a register by LSRA
//
// Arguments:
// None
//
// Return Value:
// Returns true if the node was assigned a register.
//
// In case of multi-reg call nodes, it is considered
// having a reg if regs are allocated for all its
// return values.
//
// In case of GT_COPY or GT_RELOAD of a multi-reg call,
// GT_COPY/GT_RELOAD is considered having a reg if it
// has a reg assigned to any of its positions.
//
bool GenTree::gtHasReg() const
{
bool hasReg = false;
if (IsMultiRegCall())
{
const GenTreeCall* call = AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg call node is said to have regs, if it has
// reg assigned to each of its result registers.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (call->GetRegNumByIdx(i) != REG_NA);
if (!hasReg)
{
break;
}
}
}
else if (IsCopyOrReloadOfMultiRegCall())
{
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg copy or reload node is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (copyOrReload->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else
{
hasReg = (GetRegNum() != REG_NA);
}
return hasReg;
}
//-----------------------------------------------------------------------------
// GetRegisterDstCount: Get the number of registers defined by the node.
//
// Arguments:
// None
//
// Return Value:
// The number of registers that this node defines.
//
// Notes:
// This should not be called on a contained node.
// This does not look at the actual register assignments, if any, and so
// is valid after Lowering.
//
int GenTree::GetRegisterDstCount(Compiler* compiler) const
{
assert(!isContained());
if (!IsMultiRegNode())
{
return (IsValue()) ? 1 : 0;
}
else if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
else if (IsCopyOrReload())
{
return gtGetOp1()->GetRegisterDstCount(compiler);
}
#if FEATURE_ARG_SPLIT
else if (OperIsPutArgSplit())
{
return (const_cast<GenTree*>(this))->AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
else if (OperIsMultiRegOp())
{
// A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST.
// For the latter two (ARM-only), they only have multiple registers if they produce a long value
// (GT_MUL_LONG always produces a long value).
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
return (TypeGet() == TYP_LONG) ? 2 : 1;
#else
assert(OperIs(GT_MUL_LONG));
return 2;
#endif
}
#endif
#ifdef FEATURE_HW_INTRINSICS
else if (OperIsHWIntrinsic())
{
assert(TypeIs(TYP_STRUCT));
const GenTreeHWIntrinsic* intrinsic = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = intrinsic->GetHWIntrinsicId();
assert(HWIntrinsicInfo::IsMultiReg(intrinsicId));
return HWIntrinsicInfo::GetMultiRegCount(intrinsicId);
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetFieldCount(compiler);
}
assert(!"Unexpected multi-reg node");
return 0;
}
//-----------------------------------------------------------------------------------
// IsMultiRegNode: whether a node returning its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi-reg node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
bool GenTree::IsMultiRegNode() const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return true;
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return true;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return true;
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return true;
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::IsMultiReg(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetMultiRegCount: Return the register count for a multi-reg node.
//
// Arguments:
// None
//
// Return Value:
// Returns the number of registers defined by this node.
//
unsigned GenTree::GetMultiRegCount() const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegCount();
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegCount();
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::GetMultiRegCount(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register count for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must handle this separately.
// The register count for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldCount
// method on GenTreeLclVar.
assert(!"MultiRegCount for LclVar");
}
assert(!"GetMultiRegCount called with non-multireg node");
return 1;
}
//---------------------------------------------------------------
// gtGetRegMask: Get the reg mask of the node.
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetRegMask() const
{
regMaskTP resultMask;
if (IsMultiRegCall())
{
resultMask = genRegMask(GetRegNum());
resultMask |= AsCall()->GetOtherRegMask();
}
else if (IsCopyOrReloadOfMultiRegCall())
{
// A multi-reg copy or reload, will have valid regs for only those
// positions that need to be copied or reloaded. Hence we need
// to consider only those registers for computing reg mask.
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = copyOrReload->GetRegNumByIdx(i);
if (reg != REG_NA)
{
resultMask |= genRegMask(reg);
}
}
}
#if FEATURE_ARG_SPLIT
else if (compFeatureArgSplit() && OperIsPutArgSplit())
{
const GenTreePutArgSplit* splitArg = AsPutArgSplit();
const unsigned regCount = splitArg->gtNumRegs;
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = splitArg->GetRegNumByIdx(i);
assert(reg != REG_NA);
resultMask |= genRegMask(reg);
}
}
#endif // FEATURE_ARG_SPLIT
else
{
resultMask = genRegMask(GetRegNum());
}
return resultMask;
}
void GenTreeFieldList::AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
}
void GenTreeFieldList::InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::InsertFieldLIR(
Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
}
//---------------------------------------------------------------
// GetOtherRegMask: Get the reg mask of gtOtherRegs of call node
//
// Arguments:
// None
//
// Return Value:
// Reg mask of gtOtherRegs of call node.
//
regMaskTP GenTreeCall::GetOtherRegMask() const
{
regMaskTP resultMask = RBM_NONE;
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
if (gtOtherRegs[i] != REG_NA)
{
resultMask |= genRegMask((regNumber)gtOtherRegs[i]);
continue;
}
break;
}
#endif
return resultMask;
}
//-------------------------------------------------------------------------
// IsPure:
// Returns true if this call is pure. For now, this uses the same
// definition of "pure" that is that used by HelperCallProperties: a
// pure call does not read or write any aliased (e.g. heap) memory or
// have other global side effects (e.g. class constructors, finalizers),
// but is allowed to throw an exception.
//
// NOTE: this call currently only returns true if the call target is a
// helper method that is known to be pure. No other analysis is
// performed.
//
// Arguments:
// Copiler - the compiler context.
//
// Returns:
// True if the call is pure; false otherwise.
//
bool GenTreeCall::IsPure(Compiler* compiler) const
{
return (gtCallType == CT_HELPER) &&
compiler->s_helperCallProperties.IsPure(compiler->eeGetHelperNum(gtCallMethHnd));
}
//-------------------------------------------------------------------------
// HasSideEffects:
// Returns true if this call has any side effects. All non-helpers are considered to have side-effects. Only helpers
// that do not mutate the heap, do not run constructors, may not throw, and are either a) pure or b) non-finalizing
// allocation functions are considered side-effect-free.
//
// Arguments:
// compiler - the compiler instance
// ignoreExceptions - when `true`, ignores exception side effects
// ignoreCctors - when `true`, ignores class constructor side effects
//
// Return Value:
// true if this call has any side-effects; false otherwise.
bool GenTreeCall::HasSideEffects(Compiler* compiler, bool ignoreExceptions, bool ignoreCctors) const
{
// Generally all GT_CALL nodes are considered to have side-effects, but we may have extra information about helper
// calls that can prove them side-effect-free.
if (gtCallType != CT_HELPER)
{
return true;
}
CorInfoHelpFunc helper = compiler->eeGetHelperNum(gtCallMethHnd);
HelperCallProperties& helperProperties = compiler->s_helperCallProperties;
// We definitely care about the side effects if MutatesHeap is true
if (helperProperties.MutatesHeap(helper))
{
return true;
}
// Unless we have been instructed to ignore cctors (CSE, for example, ignores cctors), consider them side effects.
if (!ignoreCctors && helperProperties.MayRunCctor(helper))
{
return true;
}
// If we also care about exceptions then check if the helper can throw
if (!ignoreExceptions && !helperProperties.NoThrow(helper))
{
return true;
}
// If this is not a Pure helper call or an allocator (that will not need to run a finalizer)
// then this call has side effects.
return !helperProperties.IsPure(helper) &&
(!helperProperties.IsAllocator(helper) || ((gtCallMoreFlags & GTF_CALL_M_ALLOC_SIDE_EFFECTS) != 0));
}
//-------------------------------------------------------------------------
// HasNonStandardAddedArgs: Return true if the method has non-standard args added to the call
// argument list during argument morphing (fgMorphArgs), e.g., passed in R10 or R11 on AMD64.
// See also GetNonStandardAddedArgCount().
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// true if there are any such args, false otherwise.
//
bool GenTreeCall::HasNonStandardAddedArgs(Compiler* compiler) const
{
return GetNonStandardAddedArgCount(compiler) != 0;
}
//-------------------------------------------------------------------------
// GetNonStandardAddedArgCount: Get the count of non-standard arguments that have been added
// during call argument morphing (fgMorphArgs). Do not count non-standard args that are already
// counted in the argument list prior to morphing.
//
// This function is used to help map the caller and callee arguments during tail call setup.
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// The count of args, as described.
//
// Notes:
// It would be more general to have fgMorphArgs set a bit on the call node when such
// args are added to a call, and a bit on each such arg, and then have this code loop
// over the call args when the special call bit is set, counting the args with the special
// arg bit. This seems pretty heavyweight, though. Instead, this logic needs to be kept
// in sync with fgMorphArgs.
//
int GenTreeCall::GetNonStandardAddedArgCount(Compiler* compiler) const
{
if (IsUnmanaged() && !compiler->opts.ShouldUsePInvokeHelpers())
{
// R11 = PInvoke cookie param
return 1;
}
else if (IsVirtualStub())
{
// R11 = Virtual stub param
return 1;
}
else if ((gtCallType == CT_INDIRECT) && (gtCallCookie != nullptr))
{
// R10 = PInvoke target param
// R11 = PInvoke cookie param
return 2;
}
return 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
// These two Jit Helpers that we handle here by returning true
// aren't actually defined to return a struct, so they don't expect
// their RetBuf to be passed in x8, instead they expect it in x0.
//
bool GenTreeCall::TreatAsHasRetBufArg(Compiler* compiler) const
{
if (HasRetBufArg())
{
return true;
}
else
{
// If we see a Jit helper call that returns a TYP_STRUCT we will
// transform it as if it has a Return Buffer Argument
//
if (IsHelperCall() && (gtReturnType == TYP_STRUCT))
{
// There are two possible helper calls that use this path:
// CORINFO_HELP_GETFIELDSTRUCT and CORINFO_HELP_UNBOX_NULLABLE
//
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(gtCallMethHnd);
if (helpFunc == CORINFO_HELP_GETFIELDSTRUCT)
{
return true;
}
else if (helpFunc == CORINFO_HELP_UNBOX_NULLABLE)
{
return true;
}
else
{
assert(!"Unexpected JIT helper in TreatAsHasRetBufArg");
}
}
}
return false;
}
//-------------------------------------------------------------------------
// IsHelperCall: Determine if this GT_CALL node is a specific helper call.
//
// Arguments:
// compiler - the compiler instance so that we can call eeFindHelper
//
// Return Value:
// Returns true if this GT_CALL node is a call to the specified helper.
//
bool GenTreeCall::IsHelperCall(Compiler* compiler, unsigned helper) const
{
return IsHelperCall(compiler->eeFindHelper(helper));
}
//------------------------------------------------------------------------
// GenTreeCall::ReplaceCallOperand:
// Replaces a given operand to a call node and updates the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTreeCall::ReplaceCallOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
GenTree* originalOperand = *useEdge;
*useEdge = replacement;
const bool isArgument =
(replacement != gtControlExpr) &&
((gtCallType != CT_INDIRECT) || ((replacement != gtCallCookie) && (replacement != gtCallAddr)));
if (isArgument)
{
if ((originalOperand->gtFlags & GTF_LATE_ARG) != 0)
{
replacement->gtFlags |= GTF_LATE_ARG;
}
else
{
assert((replacement->gtFlags & GTF_LATE_ARG) == 0);
fgArgTabEntry* fp = Compiler::gtArgEntryByNode(this, replacement);
assert(fp->GetNode() == replacement);
}
}
}
//-------------------------------------------------------------------------
// AreArgsComplete: Determine if this GT_CALL node's arguments have been processed.
//
// Return Value:
// Returns true if fgMorphArgs has processed the arguments.
//
bool GenTreeCall::AreArgsComplete() const
{
if (fgArgInfo == nullptr)
{
return false;
}
if (fgArgInfo->AreArgsComplete())
{
assert((gtCallLateArgs != nullptr) || !fgArgInfo->HasRegArgs());
return true;
}
#if defined(FEATURE_FASTTAILCALL)
// If we have FEATURE_FASTTAILCALL, 'fgCanFastTailCall()' can call 'fgInitArgInfo()', and in that
// scenario it is valid to have 'fgArgInfo' be non-null when 'fgMorphArgs()' first queries this,
// when it hasn't yet morphed the arguments.
#else
assert(gtCallArgs == nullptr);
#endif
return false;
}
//--------------------------------------------------------------------------
// Equals: Check if 2 CALL nodes are equal.
//
// Arguments:
// c1 - The first call node
// c2 - The second call node
//
// Return Value:
// true if the 2 CALL nodes have the same type and operands
//
bool GenTreeCall::Equals(GenTreeCall* c1, GenTreeCall* c2)
{
assert(c1->OperGet() == c2->OperGet());
if (c1->TypeGet() != c2->TypeGet())
{
return false;
}
if (c1->gtCallType != c2->gtCallType)
{
return false;
}
if (c1->gtCallType != CT_INDIRECT)
{
if (c1->gtCallMethHnd != c2->gtCallMethHnd)
{
return false;
}
#ifdef FEATURE_READYTORUN
if (c1->gtEntryPoint.addr != c2->gtEntryPoint.addr)
{
return false;
}
#endif
}
else
{
if (!Compare(c1->gtCallAddr, c2->gtCallAddr))
{
return false;
}
}
if ((c1->gtCallThisArg != nullptr) != (c2->gtCallThisArg != nullptr))
{
return false;
}
if ((c1->gtCallThisArg != nullptr) && !Compare(c1->gtCallThisArg->GetNode(), c2->gtCallThisArg->GetNode()))
{
return false;
}
GenTreeCall::UseIterator i1 = c1->Args().begin();
GenTreeCall::UseIterator end1 = c1->Args().end();
GenTreeCall::UseIterator i2 = c2->Args().begin();
GenTreeCall::UseIterator end2 = c2->Args().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
i1 = c1->LateArgs().begin();
end1 = c1->LateArgs().end();
i2 = c2->LateArgs().begin();
end2 = c2->LateArgs().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
if (!Compare(c1->gtControlExpr, c2->gtControlExpr))
{
return false;
}
return true;
}
//--------------------------------------------------------------------------
// ResetArgInfo: The argument info needs to be reset so it can be recomputed based on some change
// in conditions, such as changing the return type of a call due to giving up on doing a tailcall.
// If there is no fgArgInfo computed yet for this call, then there is nothing to reset.
//
void GenTreeCall::ResetArgInfo()
{
if (fgArgInfo == nullptr)
{
return;
}
// We would like to just set `fgArgInfo = nullptr`. But `fgInitArgInfo()` not
// only sets up fgArgInfo, it also adds non-standard args to the IR, and we need
// to remove that extra IR so it doesn't get added again.
//
unsigned argNum = 0;
if (gtCallThisArg != nullptr)
{
argNum++;
}
Use** link = >CallArgs;
while ((*link) != nullptr)
{
const fgArgTabEntry* entry = fgArgInfo->GetArgEntry(argNum);
if (entry->isNonStandard() && entry->isNonStandardArgAddedLate())
{
JITDUMP("Removing non-standarg arg %s [%06u] to prepare for re-morphing call [%06u]\n",
getNonStandardArgKindName(entry->nonStandardArgKind), Compiler::dspTreeID((*link)->GetNode()),
gtTreeID);
*link = (*link)->GetNext();
}
else
{
link = &(*link)->NextRef();
}
argNum++;
}
fgArgInfo = nullptr;
}
#if !defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned GenTreePutArgStk::GetStackByteSize() const
{
return genTypeSize(genActualType(gtOp1->gtType));
}
#endif // !defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Returns non-zero if the two trees are identical.
*/
bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK)
{
genTreeOps oper;
unsigned kind;
// printf("tree1:\n"); gtDispTree(op1);
// printf("tree2:\n"); gtDispTree(op2);
AGAIN:
if (op1 == nullptr)
{
return (op2 == nullptr);
}
if (op2 == nullptr)
{
return false;
}
if (op1 == op2)
{
return true;
}
oper = op1->OperGet();
/* The operators must be equal */
if (oper != op2->gtOper)
{
return false;
}
/* The types must be equal */
if (op1->gtType != op2->gtType)
{
return false;
}
/* Overflow must be equal */
if (op1->gtOverflowEx() != op2->gtOverflowEx())
{
return false;
}
/* Sensible flags must be equal */
if ((op1->gtFlags & (GTF_UNSIGNED)) != (op2->gtFlags & (GTF_UNSIGNED)))
{
return false;
}
/* Figure out what kind of nodes we're comparing */
kind = op1->OperKind();
/* Is this a constant node? */
if (op1->OperIsConst())
{
switch (oper)
{
case GT_CNS_INT:
if (op1->AsIntCon()->gtIconVal == op2->AsIntCon()->gtIconVal)
{
return true;
}
break;
case GT_CNS_STR:
if ((op1->AsStrCon()->gtSconCPX == op2->AsStrCon()->gtSconCPX) &&
(op1->AsStrCon()->gtScpHnd == op2->AsStrCon()->gtScpHnd))
{
return true;
}
break;
#if 0
// TODO-CQ: Enable this in the future
case GT_CNS_LNG:
if (op1->AsLngCon()->gtLconVal == op2->AsLngCon()->gtLconVal)
return true;
break;
case GT_CNS_DBL:
if (op1->AsDblCon()->gtDconVal == op2->AsDblCon()->gtDconVal)
return true;
break;
#endif
default:
break;
}
return false;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_LCL_VAR:
if (op1->AsLclVarCommon()->GetLclNum() != op2->AsLclVarCommon()->GetLclNum())
{
break;
}
return true;
case GT_LCL_FLD:
if ((op1->AsLclFld()->GetLclNum() != op2->AsLclFld()->GetLclNum()) ||
(op1->AsLclFld()->GetLclOffs() != op2->AsLclFld()->GetLclOffs()))
{
break;
}
return true;
case GT_CLS_VAR:
if (op1->AsClsVar()->gtClsVarHnd != op2->AsClsVar()->gtClsVarHnd)
{
break;
}
return true;
case GT_LABEL:
return true;
case GT_ARGPLACE:
if ((op1->gtType == TYP_STRUCT) &&
(op1->AsArgPlace()->gtArgPlaceClsHnd != op2->AsArgPlace()->gtArgPlaceClsHnd))
{
break;
}
return true;
default:
break;
}
return false;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_UNOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the comparison.
switch (oper)
{
case GT_ARR_LENGTH:
if (op1->AsArrLen()->ArrLenOffset() != op2->AsArrLen()->ArrLenOffset())
{
return false;
}
break;
case GT_CAST:
if (op1->AsCast()->gtCastType != op2->AsCast()->gtCastType)
{
return false;
}
break;
case GT_BLK:
case GT_OBJ:
if (op1->AsBlk()->GetLayout() != op2->AsBlk()->GetLayout())
{
return false;
}
break;
case GT_FIELD:
if (op1->AsField()->gtFldHnd != op2->AsField()->gtFldHnd)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
case GT_RUNTIMELOOKUP:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
return Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1);
}
if (kind & GTK_BINOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
if (op1->AsIntrinsic()->gtIntrinsicName != op2->AsIntrinsic()->gtIntrinsicName)
{
return false;
}
break;
case GT_LEA:
if (op1->AsAddrMode()->gtScale != op2->AsAddrMode()->gtScale)
{
return false;
}
if (op1->AsAddrMode()->Offset() != op2->AsAddrMode()->Offset())
{
return false;
}
break;
case GT_BOUNDS_CHECK:
if (op1->AsBoundsChk()->gtThrowKind != op2->AsBoundsChk()->gtThrowKind)
{
return false;
}
break;
case GT_INDEX:
if (op1->AsIndex()->gtIndElemSize != op2->AsIndex()->gtIndElemSize)
{
return false;
}
break;
case GT_INDEX_ADDR:
if (op1->AsIndexAddr()->gtElemSize != op2->AsIndexAddr()->gtElemSize)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_QMARK:
break;
default:
assert(!"unexpected binary ExOp operator");
}
}
if (op1->AsOp()->gtOp2)
{
if (!Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1, swapOK))
{
if (swapOK && OperIsCommutative(oper) &&
((op1->AsOp()->gtOp1->gtFlags | op1->AsOp()->gtOp2->gtFlags | op2->AsOp()->gtOp1->gtFlags |
op2->AsOp()->gtOp2->gtFlags) &
GTF_ALL_EFFECT) == 0)
{
if (Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp2, swapOK))
{
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
}
}
return false;
}
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp2;
goto AGAIN;
}
else
{
op1 = op1->AsOp()->gtOp1;
op2 = op2->AsOp()->gtOp1;
if (!op1)
{
return (op2 == nullptr);
}
if (!op2)
{
return false;
}
goto AGAIN;
}
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
return GenTreeCall::Equals(op1->AsCall(), op2->AsCall());
#ifdef FEATURE_SIMD
case GT_SIMD:
return GenTreeSIMD::Equals(op1->AsSIMD(), op2->AsSIMD());
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
return GenTreeHWIntrinsic::Equals(op1->AsHWIntrinsic(), op2->AsHWIntrinsic());
#endif
case GT_ARR_ELEM:
if (op1->AsArrElem()->gtArrRank != op2->AsArrElem()->gtArrRank)
{
return false;
}
// NOTE: gtArrElemSize may need to be handled
unsigned dim;
for (dim = 0; dim < op1->AsArrElem()->gtArrRank; dim++)
{
if (!Compare(op1->AsArrElem()->gtArrInds[dim], op2->AsArrElem()->gtArrInds[dim]))
{
return false;
}
}
op1 = op1->AsArrElem()->gtArrObj;
op2 = op2->AsArrElem()->gtArrObj;
goto AGAIN;
case GT_ARR_OFFSET:
if (op1->AsArrOffs()->gtCurrDim != op2->AsArrOffs()->gtCurrDim ||
op1->AsArrOffs()->gtArrRank != op2->AsArrOffs()->gtArrRank)
{
return false;
}
return (Compare(op1->AsArrOffs()->gtOffset, op2->AsArrOffs()->gtOffset) &&
Compare(op1->AsArrOffs()->gtIndex, op2->AsArrOffs()->gtIndex) &&
Compare(op1->AsArrOffs()->gtArrObj, op2->AsArrOffs()->gtArrObj));
case GT_PHI:
return GenTreePhi::Equals(op1->AsPhi(), op2->AsPhi());
case GT_FIELD_LIST:
return GenTreeFieldList::Equals(op1->AsFieldList(), op2->AsFieldList());
case GT_CMPXCHG:
return Compare(op1->AsCmpXchg()->gtOpLocation, op2->AsCmpXchg()->gtOpLocation) &&
Compare(op1->AsCmpXchg()->gtOpValue, op2->AsCmpXchg()->gtOpValue) &&
Compare(op1->AsCmpXchg()->gtOpComparand, op2->AsCmpXchg()->gtOpComparand);
case GT_STORE_DYN_BLK:
return Compare(op1->AsStoreDynBlk()->Addr(), op2->AsStoreDynBlk()->Addr()) &&
Compare(op1->AsStoreDynBlk()->Data(), op2->AsStoreDynBlk()->Data()) &&
Compare(op1->AsStoreDynBlk()->gtDynamicSize, op2->AsStoreDynBlk()->gtDynamicSize);
default:
assert(!"unexpected operator");
}
return false;
}
//------------------------------------------------------------------------
// gtHasRef: Find out whether the given tree contains a local/field.
//
// Arguments:
// tree - tree to find the local in
// lclNum - the local's number, *or* the handle for the field
//
// Return Value:
// Whether "tree" has any LCL_VAR/LCL_FLD nodes that refer to the
// local, LHS or RHS, or FIELD nodes with the specified handle.
//
// Notes:
// Does not pay attention to local address nodes.
//
/* static */ bool Compiler::gtHasRef(GenTree* tree, ssize_t lclNum)
{
if (tree == nullptr)
{
return false;
}
if (tree->OperIsLeaf())
{
if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD) && (tree->AsLclVarCommon()->GetLclNum() == (unsigned)lclNum))
{
return true;
}
if (tree->OperIs(GT_RET_EXPR))
{
return gtHasRef(tree->AsRetExpr()->gtInlineCandidate, lclNum);
}
return false;
}
if (tree->OperIsUnary())
{
// Code in importation (see CEE_STFLD in impImportBlockCode), when
// spilling, can pass us "lclNum" that is actually a field handle...
if (tree->OperIs(GT_FIELD) && (lclNum == (ssize_t)tree->AsField()->gtFldHnd))
{
return true;
}
return gtHasRef(tree->AsUnOp()->gtGetOp1(), lclNum);
}
if (tree->OperIsBinary())
{
return gtHasRef(tree->AsOp()->gtGetOp1(), lclNum) || gtHasRef(tree->AsOp()->gtGetOp2(), lclNum);
}
bool result = false;
tree->VisitOperands([lclNum, &result](GenTree* operand) -> GenTree::VisitResult {
if (gtHasRef(operand, lclNum))
{
result = true;
return GenTree::VisitResult::Abort;
}
return GenTree::VisitResult::Continue;
});
return result;
}
struct AddrTakenDsc
{
Compiler* comp;
bool hasAddrTakenLcl;
};
/* static */
Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
Compiler* comp = data->compiler;
if (tree->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = comp->lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
((AddrTakenDsc*)data->pCallbackData)->hasAddrTakenLcl = true;
return WALK_ABORT;
}
}
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Return true if this tree contains locals with lvHasLdAddrOp or IsAddressExposed()
* flag(s) set.
*/
bool Compiler::gtHasLocalsWithAddrOp(GenTree* tree)
{
AddrTakenDsc desc;
desc.comp = this;
desc.hasAddrTakenLcl = false;
fgWalkTreePre(&tree, gtHasLocalsWithAddrOpCB, &desc);
return desc.hasAddrTakenLcl;
}
#ifdef DEBUG
/*****************************************************************************
*
* Helper used to compute hash values for trees.
*/
inline unsigned genTreeHashAdd(unsigned old, unsigned add)
{
return (old + old / 2) ^ add;
}
inline unsigned genTreeHashAdd(unsigned old, void* add)
{
return genTreeHashAdd(old, (unsigned)(size_t)add);
}
/*****************************************************************************
*
* Given an arbitrary expression tree, compute a hash value for it.
*/
unsigned Compiler::gtHashValue(GenTree* tree)
{
genTreeOps oper;
unsigned kind;
unsigned hash = 0;
GenTree* temp;
AGAIN:
assert(tree);
/* Figure out what kind of a node we have */
oper = tree->OperGet();
kind = tree->OperKind();
/* Include the operator value in the hash */
hash = genTreeHashAdd(hash, oper);
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
size_t add;
switch (oper)
{
UINT64 bits;
case GT_LCL_VAR:
add = tree->AsLclVar()->GetLclNum();
break;
case GT_LCL_FLD:
hash = genTreeHashAdd(hash, tree->AsLclFld()->GetLclNum());
add = tree->AsLclFld()->GetLclOffs();
break;
case GT_CNS_INT:
add = tree->AsIntCon()->gtIconVal;
break;
case GT_CNS_LNG:
bits = (UINT64)tree->AsLngCon()->gtLconVal;
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_DBL:
bits = *(UINT64*)(&tree->AsDblCon()->gtDconVal);
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_STR:
add = tree->AsStrCon()->gtSconCPX;
break;
case GT_JMP:
add = tree->AsVal()->gtVal1;
break;
default:
add = 0;
break;
}
// clang-format off
// narrow 'add' into a 32-bit 'val'
unsigned val;
#ifdef HOST_64BIT
val = genTreeHashAdd(uhi32(add), ulo32(add));
#else // 32-bit host
val = add;
#endif
// clang-format on
hash = genTreeHashAdd(hash, val);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
GenTree* op1;
if (kind & GTK_UNOP)
{
op1 = tree->AsOp()->gtOp1;
/* Special case: no sub-operand at all */
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_ARR_LENGTH:
hash += tree->AsArrLen()->ArrLenOffset();
break;
case GT_CAST:
hash ^= tree->AsCast()->gtCastType;
break;
case GT_INDEX:
hash += tree->AsIndex()->gtIndElemSize;
break;
case GT_INDEX_ADDR:
hash += tree->AsIndexAddr()->gtElemSize;
break;
case GT_ALLOCOBJ:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsAllocObj()->gtAllocObjClsHnd)));
hash = genTreeHashAdd(hash, tree->AsAllocObj()->gtNewHelper);
break;
case GT_RUNTIMELOOKUP:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsRuntimeLookup()->gtHnd)));
break;
case GT_BLK:
case GT_OBJ:
hash =
genTreeHashAdd(hash,
static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->AsBlk()->GetLayout())));
break;
case GT_FIELD:
hash = genTreeHashAdd(hash, tree->AsField()->gtFldHnd);
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
if (!op1)
{
goto DONE;
}
tree = op1;
goto AGAIN;
}
if (kind & GTK_BINOP)
{
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
hash += tree->AsIntrinsic()->gtIntrinsicName;
break;
case GT_LEA:
hash += static_cast<unsigned>(tree->AsAddrMode()->Offset() << 3) + tree->AsAddrMode()->gtScale;
break;
case GT_BOUNDS_CHECK:
hash = genTreeHashAdd(hash, tree->AsBoundsChk()->gtThrowKind);
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
hash ^= PtrToUlong(tree->AsBlk()->GetLayout());
break;
// For the ones below no extra argument matters for comparison.
case GT_ARR_INDEX:
case GT_QMARK:
case GT_INDEX:
case GT_INDEX_ADDR:
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
hash += tree->AsSIMD()->GetSIMDIntrinsicId();
hash += tree->AsSIMD()->GetSimdBaseType();
hash += tree->AsSIMD()->GetSimdSize();
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
hash += tree->AsHWIntrinsic()->GetHWIntrinsicId();
hash += tree->AsHWIntrinsic()->GetSimdBaseType();
hash += tree->AsHWIntrinsic()->GetSimdSize();
hash += tree->AsHWIntrinsic()->GetAuxiliaryType();
hash += tree->AsHWIntrinsic()->GetOtherReg();
break;
#endif // FEATURE_HW_INTRINSICS
default:
assert(!"unexpected binary ExOp operator");
}
}
op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
/* Is there a second sub-operand? */
if (!op2)
{
/* Special case: no sub-operands at all */
if (!op1)
{
goto DONE;
}
/* This is a unary operator */
tree = op1;
goto AGAIN;
}
/* This is a binary operator */
unsigned hsh1 = gtHashValue(op1);
/* Add op1's hash to the running value and continue with op2 */
hash = genTreeHashAdd(hash, hsh1);
tree = op2;
goto AGAIN;
}
/* See what kind of a special operator we have here */
switch (tree->gtOper)
{
case GT_ARR_ELEM:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrObj));
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrInds[dim]));
}
break;
case GT_ARR_OFFSET:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtOffset));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtIndex));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtArrObj));
break;
case GT_CALL:
if ((tree->AsCall()->gtCallThisArg != nullptr) && !tree->AsCall()->gtCallThisArg->GetNode()->OperIs(GT_NOP))
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCall()->gtCallThisArg->GetNode()));
}
for (GenTreeCall::Use& use : tree->AsCall()->Args())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
temp = tree->AsCall()->gtCallAddr;
assert(temp);
hash = genTreeHashAdd(hash, gtHashValue(temp));
}
else
{
hash = genTreeHashAdd(hash, tree->AsCall()->gtCallMethHnd);
}
for (GenTreeCall::Use& use : tree->AsCall()->LateArgs())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
// TODO-List: rewrite with a general visitor / iterator?
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
hash = genTreeHashAdd(hash, gtHashValue(operand));
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_CMPXCHG:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpLocation));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpValue));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpComparand));
break;
case GT_STORE_DYN_BLK:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Data()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Addr()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->gtDynamicSize));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
assert(!"unexpected operator");
break;
}
DONE:
return hash;
}
#endif // DEBUG
/*****************************************************************************
*
* Return a relational operator that is the reverse of the given one.
*/
/* static */
genTreeOps GenTree::ReverseRelop(genTreeOps relop)
{
static const genTreeOps reverseOps[] = {
GT_NE, // GT_EQ
GT_EQ, // GT_NE
GT_GE, // GT_LT
GT_GT, // GT_LE
GT_LT, // GT_GE
GT_LE, // GT_GT
GT_TEST_NE, // GT_TEST_EQ
GT_TEST_EQ, // GT_TEST_NE
};
assert(reverseOps[GT_EQ - GT_EQ] == GT_NE);
assert(reverseOps[GT_NE - GT_EQ] == GT_EQ);
assert(reverseOps[GT_LT - GT_EQ] == GT_GE);
assert(reverseOps[GT_LE - GT_EQ] == GT_GT);
assert(reverseOps[GT_GE - GT_EQ] == GT_LT);
assert(reverseOps[GT_GT - GT_EQ] == GT_LE);
assert(reverseOps[GT_TEST_EQ - GT_EQ] == GT_TEST_NE);
assert(reverseOps[GT_TEST_NE - GT_EQ] == GT_TEST_EQ);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(reverseOps));
return reverseOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Return a relational operator that will work for swapped operands.
*/
/* static */
genTreeOps GenTree::SwapRelop(genTreeOps relop)
{
static const genTreeOps swapOps[] = {
GT_EQ, // GT_EQ
GT_NE, // GT_NE
GT_GT, // GT_LT
GT_GE, // GT_LE
GT_LE, // GT_GE
GT_LT, // GT_GT
GT_TEST_EQ, // GT_TEST_EQ
GT_TEST_NE, // GT_TEST_NE
};
assert(swapOps[GT_EQ - GT_EQ] == GT_EQ);
assert(swapOps[GT_NE - GT_EQ] == GT_NE);
assert(swapOps[GT_LT - GT_EQ] == GT_GT);
assert(swapOps[GT_LE - GT_EQ] == GT_GE);
assert(swapOps[GT_GE - GT_EQ] == GT_LE);
assert(swapOps[GT_GT - GT_EQ] == GT_LT);
assert(swapOps[GT_TEST_EQ - GT_EQ] == GT_TEST_EQ);
assert(swapOps[GT_TEST_NE - GT_EQ] == GT_TEST_NE);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(swapOps));
return swapOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Reverse the meaning of the given test condition.
*/
GenTree* Compiler::gtReverseCond(GenTree* tree)
{
if (tree->OperIsCompare())
{
tree->SetOper(GenTree::ReverseRelop(tree->OperGet()));
// Flip the GTF_RELOP_NAN_UN bit
// a ord b === (a != NaN && b != NaN)
// a unord b === (a == NaN || b == NaN)
// => !(a ord b) === (a unord b)
if (varTypeIsFloating(tree->AsOp()->gtOp1->TypeGet()))
{
tree->gtFlags ^= GTF_RELOP_NAN_UN;
}
}
else if (tree->OperIs(GT_JCC, GT_SETCC))
{
GenTreeCC* cc = tree->AsCC();
cc->gtCondition = GenCondition::Reverse(cc->gtCondition);
}
else if (tree->OperIs(GT_JCMP))
{
// Flip the GTF_JCMP_EQ
//
// This causes switching
// cbz <=> cbnz
// tbz <=> tbnz
tree->gtFlags ^= GTF_JCMP_EQ;
}
else
{
tree = gtNewOperNode(GT_NOT, TYP_INT, tree);
}
return tree;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
//------------------------------------------------------------------------------
// IsValidLongMul : Check for long multiplication with 32 bit operands.
//
// Recognizes the following tree: MUL(CAST(long <- int), CAST(long <- int) or CONST),
// where CONST must be an integer constant that fits in 32 bits. Will try to detect
// cases when the multiplication cannot overflow and return "true" for them.
//
// This function does not change the state of the tree and is usable in LIR.
//
// Return Value:
// Whether this GT_MUL tree is a valid long multiplication candidate.
//
bool GenTreeOp::IsValidLongMul()
{
assert(OperIs(GT_MUL));
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
if (!TypeIs(TYP_LONG))
{
return false;
}
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
if (!(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp())))
{
return false;
}
if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
!(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())))
{
return false;
}
if (op1->gtOverflow() || op2->gtOverflowEx())
{
return false;
}
if (gtOverflow())
{
auto getMaxValue = [this](GenTree* op) -> int64_t {
if (op->OperIs(GT_CAST))
{
if (op->IsUnsigned())
{
switch (op->AsCast()->CastOp()->TypeGet())
{
case TYP_UBYTE:
return UINT8_MAX;
case TYP_USHORT:
return UINT16_MAX;
default:
return UINT32_MAX;
}
}
return IsUnsigned() ? static_cast<int64_t>(UINT64_MAX) : INT32_MIN;
}
return op->AsIntConCommon()->IntegralValue();
};
int64_t maxOp1 = getMaxValue(op1);
int64_t maxOp2 = getMaxValue(op2);
if (CheckedOps::MulOverflows(maxOp1, maxOp2, IsUnsigned()))
{
return false;
}
}
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
{
return false;
}
return true;
}
#if !defined(TARGET_64BIT) && defined(DEBUG)
//------------------------------------------------------------------------------
// DebugCheckLongMul : Checks that a GTF_MUL_64RSLT tree is a valid MUL_LONG.
//
// Notes:
// This function is defined for 32 bit targets only because we *must* maintain
// the MUL_LONG-compatible tree shape throughout the compilation from morph to
// decomposition, since we do not have (great) ability to create new calls in LIR.
//
// It is for this reason that we recognize MUL_LONGs early in morph, mark them with
// a flag and then pessimize various places (e. g. assertion propagation) to not look
// at them. In contrast, on ARM64 we recognize MUL_LONGs late, in lowering, and thus
// do not need this function.
//
void GenTreeOp::DebugCheckLongMul()
{
assert(OperIs(GT_MUL));
assert(Is64RsltMul());
assert(TypeIs(TYP_LONG));
assert(!gtOverflow());
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
// op1 has to be CAST(long <- int)
assert(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp()));
assert(!op1->gtOverflow());
// op2 has to be CAST(long <- int) or a suitably small constant.
assert((op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) ||
(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())));
assert(!op2->gtOverflowEx());
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
assert((op1ZeroExtends == op2ZeroExtends) || op2AnyExtensionIsSuitable);
// Do unsigned mul iff both operands are zero-extending.
assert(op1->IsUnsigned() == IsUnsigned());
}
#endif // !defined(TARGET_64BIT) && defined(DEBUG)
#endif // !defined(TARGET_64BIT) || defined(TARGET_ARM64)
unsigned Compiler::gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz)
{
unsigned level = 0;
unsigned costEx = 0;
unsigned costSz = 0;
for (GenTreeCall::Use& use : args)
{
GenTree* argNode = use.GetNode();
unsigned argLevel = gtSetEvalOrder(argNode);
if (argLevel > level)
{
level = argLevel;
}
if (argNode->GetCostEx() != 0)
{
costEx += argNode->GetCostEx();
costEx += lateArgs ? 0 : IND_COST_EX;
}
if (argNode->GetCostSz() != 0)
{
costSz += argNode->GetCostSz();
#ifdef TARGET_XARCH
if (lateArgs) // push is smaller than mov to reg
#endif
{
costSz += 1;
}
}
}
*callCostEx += costEx;
*callCostSz += costSz;
return level;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// gtSetMultiOpOrder: Calculate the costs for a MultiOp.
//
// Currently this function just preserves the previous behavior.
// TODO-List-Cleanup: implement proper costing for these trees.
//
// Arguments:
// multiOp - The MultiOp tree in question
//
// Return Value:
// The Sethi "complexity" for this tree (the idealized number of
// registers needed to evaluate it).
//
unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
{
// These default costs preserve previous behavior.
// TODO-CQ: investigate opportunities for tuning them.
int costEx = 1;
int costSz = 1;
unsigned level = 0;
unsigned lvl2 = 0;
#if defined(FEATURE_HW_INTRINSICS)
if (multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hwTree = multiOp->AsHWIntrinsic();
#if defined(TARGET_XARCH)
if ((hwTree->GetOperandCount() == 1) && hwTree->OperIsMemoryLoadOrStore())
{
costEx = IND_COST_EX;
costSz = 2;
GenTree* const addrNode = hwTree->Op(1);
level = gtSetEvalOrder(addrNode);
GenTree* const addr = addrNode->gtEffectiveVal();
// See if we can form a complex addressing mode.
if (addr->OperIs(GT_ADD) && gtMarkAddrMode(addr, &costEx, &costSz, hwTree->TypeGet()))
{
// Nothing to do, costs have been set.
}
else
{
costEx += addr->GetCostEx();
costSz += addr->GetCostSz();
}
hwTree->SetCosts(costEx, costSz);
return level;
}
#endif
switch (hwTree->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_Vector128_Create:
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
case NI_Vector128_Create:
#endif
{
if ((hwTree->GetOperandCount() == 1) && hwTree->Op(1)->OperIsConst())
{
// Vector.Create(cns) is cheap but not that cheap to be (1,1)
costEx = IND_COST_EX;
costSz = 2;
level = gtSetEvalOrder(hwTree->Op(1));
hwTree->SetCosts(costEx, costSz);
return level;
}
break;
}
default:
break;
}
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// This code is here to preserve previous behavior.
switch (multiOp->GetOperandCount())
{
case 0:
// This is a constant HWIntrinsic, we already have correct costs.
break;
case 1:
// A "unary" case.
level = gtSetEvalOrder(multiOp->Op(1));
costEx += multiOp->Op(1)->GetCostEx();
costSz += multiOp->Op(1)->GetCostSz();
break;
case 2:
// A "binary" case.
// This way we have "level" be the complexity of the
// first tree to be evaluated, and "lvl2" - the second.
if (multiOp->IsReverseOp())
{
level = gtSetEvalOrder(multiOp->Op(2));
lvl2 = gtSetEvalOrder(multiOp->Op(1));
}
else
{
level = gtSetEvalOrder(multiOp->Op(1));
lvl2 = gtSetEvalOrder(multiOp->Op(2));
}
// We want the more complex tree to be evaluated first.
if (level < lvl2)
{
bool canSwap = multiOp->IsReverseOp() ? gtCanSwapOrder(multiOp->Op(2), multiOp->Op(1))
: gtCanSwapOrder(multiOp->Op(1), multiOp->Op(2));
if (canSwap)
{
if (multiOp->IsReverseOp())
{
multiOp->ClearReverseOp();
}
else
{
multiOp->SetReverseOp();
}
std::swap(level, lvl2);
}
}
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx += (multiOp->Op(1)->GetCostEx() + multiOp->Op(2)->GetCostEx());
costSz += (multiOp->Op(1)->GetCostSz() + multiOp->Op(2)->GetCostSz());
break;
default:
// The former "ArgList" case... we'll be emulating it here.
// The old implementation pushed the nodes on the list, in pre-order.
// Then it popped and costed them in "reverse order", so that's what
// we'll be doing here as well.
unsigned nxtlvl = 0;
for (size_t i = multiOp->GetOperandCount(); i >= 1; i--)
{
GenTree* op = multiOp->Op(i);
unsigned lvl = gtSetEvalOrder(op);
if (lvl < 1)
{
level = nxtlvl;
}
else if (lvl == nxtlvl)
{
level = lvl + 1;
}
else
{
level = lvl;
}
costEx += op->GetCostEx();
costSz += op->GetCostSz();
// Preserving previous behavior...
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_XARCH
if (op->GetCostSz() != 0)
{
costSz += 1;
}
#endif
nxtlvl = level;
}
break;
}
multiOp->SetCosts(costEx, costSz);
return level;
}
#endif
//-----------------------------------------------------------------------------
// gtWalkOp: Traverse and mark an address expression
//
// Arguments:
// op1WB - An out parameter which is either the address expression, or one
// of its operands.
// op2WB - An out parameter which starts as either null or one of the operands
// of the address expression.
// base - The base address of the addressing mode, or null if 'constOnly' is false
// constOnly - True if we will only traverse into ADDs with constant op2.
//
// This routine is a helper routine for gtSetEvalOrder() and is used to identify the
// base and index nodes, which will be validated against those identified by
// genCreateAddrMode().
// It also marks the ADD nodes involved in the address expression with the
// GTF_ADDRMODE_NO_CSE flag which prevents them from being considered for CSE's.
//
// Its two output parameters are modified under the following conditions:
//
// It is called once with the original address expression as 'op1WB', and
// with 'constOnly' set to false. On this first invocation, *op1WB is always
// an ADD node, and it will consider the operands of the ADD even if its op2 is
// not a constant. However, when it encounters a non-constant or the base in the
// op2 position, it stops iterating. That operand is returned in the 'op2WB' out
// parameter, and will be considered on the third invocation of this method if
// it is an ADD.
//
// It is called the second time with the two operands of the original expression, in
// the original order, and the third time in reverse order. For these invocations
// 'constOnly' is true, so it will only traverse cascaded ADD nodes if they have a
// constant op2.
//
// The result, after three invocations, is that the values of the two out parameters
// correspond to the base and index in some fashion. This method doesn't attempt
// to determine or validate the scale or offset, if any.
//
// Assumptions (presumed to be ensured by genCreateAddrMode()):
// If an ADD has a constant operand, it is in the op2 position.
//
// Notes:
// This method, and its invocation sequence, are quite confusing, and since they
// were not originally well-documented, this specification is a possibly-imperfect
// reconstruction.
// The motivation for the handling of the NOP case is unclear.
// Note that 'op2WB' is only modified in the initial (!constOnly) case,
// or if a NOP is encountered in the op1 position.
//
void Compiler::gtWalkOp(GenTree** op1WB, GenTree** op2WB, GenTree* base, bool constOnly)
{
GenTree* op1 = *op1WB;
GenTree* op2 = *op2WB;
op1 = op1->gtEffectiveVal();
// Now we look for op1's with non-overflow GT_ADDs [of constants]
while ((op1->gtOper == GT_ADD) && (!op1->gtOverflow()) && (!constOnly || (op1->AsOp()->gtOp2->IsCnsIntOrI())))
{
// mark it with GTF_ADDRMODE_NO_CSE
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (!constOnly)
{
op2 = op1->AsOp()->gtOp2;
}
op1 = op1->AsOp()->gtOp1;
// If op1 is a GT_NOP then swap op1 and op2.
// (Why? Also, presumably op2 is not a GT_NOP in this case?)
if (op1->gtOper == GT_NOP)
{
GenTree* tmp;
tmp = op1;
op1 = op2;
op2 = tmp;
}
if (!constOnly && ((op2 == base) || (!op2->IsCnsIntOrI())))
{
break;
}
op1 = op1->gtEffectiveVal();
}
*op1WB = op1;
*op2WB = op2;
}
#ifdef DEBUG
/*****************************************************************************
* This is a workaround. It is to help implement an assert in gtSetEvalOrder() that the values
* gtWalkOp() leaves in op1 and op2 correspond with the values of adr, idx, mul, and cns
* that are returned by genCreateAddrMode(). It's essentially impossible to determine
* what gtWalkOp() *should* return for all possible trees. This simply loosens one assert
* to handle the following case:
indir int
const(h) int 4 field
+ byref
lclVar byref V00 this <-- op2
comma byref <-- adr (base)
indir byte
lclVar byref V00 this
+ byref
const int 2 <-- mul == 4
<< int <-- op1
lclVar int V01 arg1 <-- idx
* Here, we are planning to generate the address mode [edx+4*eax], where eax = idx and edx = the GT_COMMA expression.
* To check adr equivalence with op2, we need to walk down the GT_ADD tree just like gtWalkOp() does.
*/
GenTree* Compiler::gtWalkOpEffectiveVal(GenTree* op)
{
for (;;)
{
op = op->gtEffectiveVal();
if ((op->gtOper != GT_ADD) || op->gtOverflow() || !op->AsOp()->gtOp2->IsCnsIntOrI())
{
break;
}
op = op->AsOp()->gtOp1;
}
return op;
}
#endif // DEBUG
/*****************************************************************************
*
* Given a tree, set the GetCostEx and GetCostSz() fields which
* are used to measure the relative costs of the codegen of the tree
*
*/
void Compiler::gtPrepareCost(GenTree* tree)
{
gtSetEvalOrder(tree);
}
bool Compiler::gtIsLikelyRegVar(GenTree* tree)
{
if (tree->gtOper != GT_LCL_VAR)
{
return false;
}
const LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varDsc->lvDoNotEnregister)
{
return false;
}
// If this is an EH-live var, return false if it is a def,
// as it will have to go to memory.
if (varDsc->lvLiveInOutOfHndlr && ((tree->gtFlags & GTF_VAR_DEF) != 0))
{
return false;
}
// Be pessimistic if ref counts are not yet set up.
//
// Perhaps we should be optimistic though.
// See notes in GitHub issue 18969.
if (!lvaLocalVarRefCounted())
{
return false;
}
if (varDsc->lvRefCntWtd() < (BB_UNITY_WEIGHT * 3))
{
return false;
}
#ifdef TARGET_X86
if (varTypeUsesFloatReg(tree->TypeGet()))
return false;
if (varTypeIsLong(tree->TypeGet()))
return false;
#endif
return true;
}
//------------------------------------------------------------------------
// gtCanSwapOrder: Returns true iff the secondNode can be swapped with firstNode.
//
// Arguments:
// firstNode - An operand of a tree that can have GTF_REVERSE_OPS set.
// secondNode - The other operand of the tree.
//
// Return Value:
// Returns a boolean indicating whether it is safe to reverse the execution
// order of the two trees, considering any exception, global effects, or
// ordering constraints.
//
bool Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
{
// Relative of order of global / side effects can't be swapped.
bool canSwap = true;
if (optValnumCSE_phase)
{
canSwap = optCSE_canSwap(firstNode, secondNode);
}
// We cannot swap in the presence of special side effects such as GT_CATCH_ARG.
if (canSwap && (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
{
canSwap = false;
}
// When strict side effect order is disabled we allow GTF_REVERSE_OPS to be set
// when one or both sides contains a GTF_CALL or GTF_EXCEPT.
// Currently only the C and C++ languages allow non strict side effect order.
unsigned strictEffects = GTF_GLOB_EFFECT;
if (canSwap && (firstNode->gtFlags & strictEffects))
{
// op1 has side efects that can't be reordered.
// Check for some special cases where we still may be able to swap.
if (secondNode->gtFlags & strictEffects)
{
// op2 has also has non reorderable side effects - can't swap.
canSwap = false;
}
else
{
// No side effects in op2 - we can swap iff op1 has no way of modifying op2,
// i.e. through byref assignments or calls or op2 is a constant.
if (firstNode->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS)
{
// We have to be conservative - can swap iff op2 is constant.
if (!secondNode->IsInvariant())
{
canSwap = false;
}
}
}
}
return canSwap;
}
//------------------------------------------------------------------------
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
//
// Arguments:
// addr - The address expression
// costEx - The execution cost of this address expression (in/out arg to be updated)
// costEx - The size cost of this address expression (in/out arg to be updated)
// type - The type of the value being referenced by the parent of this address expression.
//
// Return Value:
// Returns true if it finds an addressing mode.
//
// Notes:
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
//
bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_types type)
{
// These are "out" parameters on the call to genCreateAddrMode():
bool rev; // This will be true if the operands will need to be reversed. At this point we
// don't care about this because we're not yet instantiating this addressing mode.
unsigned mul; // This is the index (scale) value for the addressing mode
ssize_t cns; // This is the constant offset
GenTree* base; // This is the base of the address.
GenTree* idx; // This is the index.
if (codeGen->genCreateAddrMode(addr, false /*fold*/, &rev, &base, &idx, &mul, &cns))
{
#ifdef TARGET_ARMARCH
// Multiplier should be a "natural-scale" power of two number which is equal to target's width.
//
// *(ulong*)(data + index * 8); - can be optimized
// *(ulong*)(data + index * 7); - can not be optimized
// *(int*)(data + index * 2); - can not be optimized
//
if ((mul > 0) && (genTypeSize(type) != mul))
{
return false;
}
#endif
// We can form a complex addressing mode, so mark each of the interior
// nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
#ifdef TARGET_XARCH
// addrmodeCount is the count of items that we used to form
// an addressing mode. The maximum value is 4 when we have
// all of these: { base, idx, cns, mul }
//
unsigned addrmodeCount = 0;
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
addrmodeCount++;
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
addrmodeCount++;
}
if (cns)
{
if (((signed char)cns) == ((int)cns))
{
*pCostSz += 1;
}
else
{
*pCostSz += 4;
}
addrmodeCount++;
}
if (mul)
{
addrmodeCount++;
}
// When we form a complex addressing mode we can reduced the costs
// associated with the interior GT_ADD and GT_LSH nodes:
//
// GT_ADD -- reduce this interior GT_ADD by (-3,-3)
// / \ --
// GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
// / \ --
// 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
// / \ --
// 'idx' 'mul'
//
if (addrmodeCount > 1)
{
// The number of interior GT_ADD and GT_LSL will always be one less than addrmodeCount
//
addrmodeCount--;
GenTree* tmp = addr;
while (addrmodeCount > 0)
{
// decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining
// addrmodeCount
tmp->SetCosts(tmp->GetCostEx() - addrmodeCount, tmp->GetCostSz() - addrmodeCount);
addrmodeCount--;
if (addrmodeCount > 0)
{
GenTree* tmpOp1 = tmp->AsOp()->gtOp1;
GenTree* tmpOp2 = tmp->gtGetOp2();
assert(tmpOp2 != nullptr);
if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_LSH)
{
tmp = tmpOp2;
}
else if (tmpOp1->OperGet() == GT_LSH)
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_ADD)
{
tmp = tmpOp2;
}
else
{
// We can very rarely encounter a tree that has a GT_COMMA node
// that is difficult to walk, so we just early out without decrementing.
addrmodeCount = 0;
}
}
}
}
#elif defined TARGET_ARM
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
if ((base->gtOper == GT_LCL_VAR) && ((idx == NULL) || (cns == 0)))
{
*pCostSz -= 1;
}
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
if (mul > 0)
{
*pCostSz += 2;
}
}
if (cns)
{
if (cns >= 128) // small offsets fits into a 16-bit instruction
{
if (cns < 4096) // medium offsets require a 32-bit instruction
{
if (!varTypeIsFloating(type))
{
*pCostSz += 2;
}
}
else
{
*pCostEx += 2; // Very large offsets require movw/movt instructions
*pCostSz += 8;
}
}
}
#elif defined TARGET_ARM64
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
}
if (cns != 0)
{
if (cns >= (4096 * genTypeSize(type)))
{
*pCostEx += 1;
*pCostSz += 4;
}
}
#else
#error "Unknown TARGET"
#endif
assert(addr->gtOper == GT_ADD);
assert(!addr->gtOverflow());
assert(mul != 1);
// If we have an addressing mode, we have one of:
// [base + cns]
// [ idx * mul ] // mul >= 2, else we would use base instead of idx
// [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
// [base + idx * mul ] // mul can be 0, 2, 4, or 8
// [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
// Note that mul == 0 is semantically equivalent to mul == 1.
// Note that cns can be zero.
CLANG_FORMAT_COMMENT_ANCHOR;
assert((base != nullptr) || (idx != nullptr && mul >= 2));
INDEBUG(GenTree* op1Save = addr);
// Walk 'addr' identifying non-overflow ADDs that will be part of the address mode.
// Note that we will be modifying 'op1' and 'op2' so that eventually they should
// map to the base and index.
GenTree* op1 = addr;
GenTree* op2 = nullptr;
gtWalkOp(&op1, &op2, base, false);
// op1 and op2 are now descendents of the root GT_ADD of the addressing mode.
assert(op1 != op1Save);
assert(op2 != nullptr);
#if defined(TARGET_XARCH)
// Walk the operands again (the third operand is unused in this case).
// This time we will only consider adds with constant op2's, since
// we have already found either a non-ADD op1 or a non-constant op2.
// NOTE: we don't support ADD(op1, cns) addressing for ARM/ARM64 yet so
// this walk makes no sense there.
gtWalkOp(&op1, &op2, nullptr, true);
// For XARCH we will fold GT_ADDs in the op2 position into the addressing mode, so we call
// gtWalkOp on both operands of the original GT_ADD.
// This is not done for ARMARCH. Though the stated reason is that we don't try to create a
// scaled index, in fact we actually do create them (even base + index*scale + offset).
// At this point, 'op2' may itself be an ADD of a constant that should be folded
// into the addressing mode.
// Walk op2 looking for non-overflow GT_ADDs of constants.
gtWalkOp(&op2, &op1, nullptr, true);
#endif // defined(TARGET_XARCH)
// OK we are done walking the tree
// Now assert that op1 and op2 correspond with base and idx
// in one of the several acceptable ways.
// Note that sometimes op1/op2 is equal to idx/base
// and other times op1/op2 is a GT_COMMA node with
// an effective value that is idx/base
if (mul > 1)
{
if ((op1 != base) && (op1->gtOper == GT_LSH))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1->AsOp()->gtOp1->gtOper == GT_MUL)
{
op1->AsOp()->gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
assert((base == nullptr) || (op2 == base) || (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
(gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
}
else
{
assert(op2 != nullptr);
assert(op2->OperIs(GT_LSH, GT_MUL));
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
// We may have eliminated multiple shifts and multiplies in the addressing mode,
// so navigate down through them to get to "idx".
GenTree* op2op1 = op2->AsOp()->gtOp1;
while ((op2op1->gtOper == GT_LSH || op2op1->gtOper == GT_MUL) && op2op1 != idx)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
op2op1 = op2op1->AsOp()->gtOp1;
}
assert(op1->gtEffectiveVal() == base);
assert(op2op1 == idx);
}
}
else
{
assert(mul == 0);
if ((op1 == idx) || (op1->gtEffectiveVal() == idx))
{
if (idx != nullptr)
{
if ((op1->gtOper == GT_MUL) || (op1->gtOper == GT_LSH))
{
GenTree* op1op1 = op1->AsOp()->gtOp1;
if ((op1op1->gtOper == GT_NOP) ||
(op1op1->gtOper == GT_MUL && op1op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1op1->gtOper == GT_MUL)
{
op1op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
}
assert((op2 == base) || (op2->gtEffectiveVal() == base));
}
else if ((op1 == base) || (op1->gtEffectiveVal() == base))
{
if (idx != nullptr)
{
assert(op2 != nullptr);
if (op2->OperIs(GT_MUL, GT_LSH))
{
GenTree* op2op1 = op2->AsOp()->gtOp1;
if ((op2op1->gtOper == GT_NOP) ||
(op2op1->gtOper == GT_MUL && op2op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op2op1->gtOper == GT_MUL)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
assert((op2 == idx) || (op2->gtEffectiveVal() == idx));
}
}
else
{
// op1 isn't base or idx. Is this possible? Or should there be an assert?
}
}
return true;
} // end if (genCreateAddrMode(...))
return false;
}
/*****************************************************************************
*
* Given a tree, figure out the order in which its sub-operands should be
* evaluated. If the second operand of a binary operator is more expensive
* than the first operand, then try to swap the operand trees. Updates the
* GTF_REVERSE_OPS bit if necessary in this case.
*
* Returns the Sethi 'complexity' estimate for this tree (the higher
* the number, the higher is the tree's resources requirement).
*
* This function sets:
* 1. GetCostEx() to the execution complexity estimate
* 2. GetCostSz() to the code size estimate
* 3. Sometimes sets GTF_ADDRMODE_NO_CSE on nodes in the tree.
* 4. DEBUG-only: clears GTF_DEBUG_NODE_MORPHED.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
unsigned Compiler::gtSetEvalOrder(GenTree* tree)
{
assert(tree);
#ifdef DEBUG
/* Clear the GTF_DEBUG_NODE_MORPHED flag as well */
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
/* Is this a FP value? */
bool isflt = varTypeIsFloating(tree->TypeGet());
/* Figure out what kind of a node we have */
const genTreeOps oper = tree->OperGet();
const unsigned kind = tree->OperKind();
/* Assume no fixed registers will be trashed */
unsigned level;
int costEx;
int costSz;
#ifdef DEBUG
costEx = -1;
costSz = -1;
#endif
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
#ifdef TARGET_ARM
case GT_CNS_STR:
// Uses movw/movt
costSz = 8;
costEx = 2;
goto COMMON_CNS;
case GT_CNS_LNG:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
INT64 lngVal = con->LngValue();
INT32 loVal = (INT32)(lngVal & 0xffffffff);
INT32 hiVal = (INT32)(lngVal >> 32);
if (lngVal == 0)
{
costSz = 1;
costEx = 1;
}
else
{
// Minimum of one instruction to setup hiVal,
// and one instruction to setup loVal
costSz = 4 + 4;
costEx = 1 + 1;
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
}
goto COMMON_CNS;
}
case GT_CNS_INT:
{
// If the constant is a handle then it will need to have a relocation
// applied to it.
// Any constant that requires a reloc must use the movw/movt sequence
//
GenTreeIntConCommon* con = tree->AsIntConCommon();
target_ssize_t conVal = (target_ssize_t)con->IconValue();
if (con->ImmedValNeedsReloc(this))
{
// Requires movw/movt
costSz = 8;
costEx = 2;
}
else if (codeGen->validImmForInstr(INS_add, conVal))
{
// Typically included with parent oper
costSz = 2;
costEx = 1;
}
else if (codeGen->validImmForInstr(INS_mov, conVal) || codeGen->validImmForInstr(INS_mvn, conVal))
{
// Uses mov or mvn
costSz = 4;
costEx = 1;
}
else
{
// Needs movw/movt
costSz = 8;
costEx = 2;
}
goto COMMON_CNS;
}
#elif defined TARGET_XARCH
case GT_CNS_STR:
#ifdef TARGET_AMD64
costSz = 10;
costEx = 2;
#else // TARGET_X86
costSz = 4;
costEx = 1;
#endif
goto COMMON_CNS;
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t conVal = (oper == GT_CNS_LNG) ? (ssize_t)con->LngValue() : con->IconValue();
bool fitsInVal = true;
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
INT64 lngVal = con->LngValue();
conVal = (ssize_t)lngVal; // truncate to 32-bits
fitsInVal = ((INT64)conVal == lngVal);
}
#endif // TARGET_X86
// If the constant is a handle then it will need to have a relocation
// applied to it.
//
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
if (iconNeedsReloc)
{
costSz = 4;
costEx = 1;
}
else if (fitsInVal && GenTreeIntConCommon::FitsInI8(conVal))
{
costSz = 1;
costEx = 1;
}
#ifdef TARGET_AMD64
else if (!GenTreeIntConCommon::FitsInI32(conVal))
{
costSz = 10;
costEx = 2;
}
#endif // TARGET_AMD64
else
{
costSz = 4;
costEx = 1;
}
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
costSz += fitsInVal ? 1 : 4;
costEx += 1;
}
#endif // TARGET_X86
goto COMMON_CNS;
}
#elif defined(TARGET_ARM64)
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
INT64 imm = con->LngValue();
emitAttr size = EA_SIZE(emitActualTypeSize(tree));
if (iconNeedsReloc)
{
costSz = 8;
costEx = 2;
}
else if (emitter::emitIns_valid_imm_for_add(imm, size))
{
costSz = 2;
costEx = 1;
}
else if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
costSz = 4;
costEx = 1;
}
else
{
// Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword
// There are three forms
// movk which loads into any halfword preserving the remaining halfwords
// movz which loads into any halfword zeroing the remaining halfwords
// movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting
// the register
// In some cases it is preferable to use movn, because it has the side effect of filling the
// other halfwords
// with ones
// Determine whether movn or movz will require the fewest instructions to populate the immediate
bool preferMovz = false;
bool preferMovn = false;
int instructionCount = 4;
for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16)
{
if (!preferMovn && (uint16_t(imm >> i) == 0x0000))
{
preferMovz = true; // by using a movk to start we can save one instruction
instructionCount--;
}
else if (!preferMovz && (uint16_t(imm >> i) == 0xffff))
{
preferMovn = true; // by using a movn to start we can save one instruction
instructionCount--;
}
}
costEx = instructionCount;
costSz = 4 * instructionCount;
}
}
goto COMMON_CNS;
#else
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
#error "Unknown TARGET"
#endif
COMMON_CNS:
/*
Note that some code below depends on constants always getting
moved to be the second operand of a binary operator. This is
easily accomplished by giving constants a level of 0, which
we do on the next line. If you ever decide to change this, be
aware that unless you make other arrangements for integer
constants to be moved, stuff will break.
*/
level = 0;
break;
case GT_CNS_DBL:
{
level = 0;
#if defined(TARGET_XARCH)
/* We use fldz and fld1 to load 0.0 and 1.0, but all other */
/* floating point constants are loaded using an indirection */
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
(*((__int64*)&(tree->AsDblCon()->gtDconVal)) == I64(0x3ff0000000000000)))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#elif defined(TARGET_ARM)
var_types targetType = tree->TypeGet();
if (targetType == TYP_FLOAT)
{
costEx = 1 + 2;
costSz = 2 + 4;
}
else
{
assert(targetType == TYP_DOUBLE);
costEx = 1 + 4;
costSz = 2 + 8;
}
#elif defined(TARGET_ARM64)
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
emitter::emitIns_valid_imm_for_fmov(tree->AsDblCon()->gtDconVal))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#else
#error "Unknown TARGET"
#endif
}
break;
case GT_LCL_VAR:
level = 1;
if (gtIsLikelyRegVar(tree))
{
costEx = 1;
costSz = 1;
/* Sign-extend and zero-extend are more expensive to load */
if (lvaTable[tree->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
costEx += 1;
costSz += 1;
}
}
else
{
costEx = IND_COST_EX;
costSz = 2;
/* Sign-extend and zero-extend are more expensive to load */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
}
#if defined(TARGET_AMD64)
// increase costSz for floating point locals
if (isflt)
{
costSz += 1;
if (!gtIsLikelyRegVar(tree))
{
costSz += 1;
}
}
#endif
break;
case GT_CLS_VAR:
#ifdef TARGET_ARM
// We generate movw/movt/ldr
level = 1;
costEx = 3 + IND_COST_EX; // 6
costSz = 4 + 4 + 2; // 10
break;
#endif
case GT_LCL_FLD:
level = 1;
costEx = IND_COST_EX;
costSz = 4;
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
level = 1;
costEx = 3;
costSz = 3;
break;
case GT_PHI_ARG:
case GT_ARGPLACE:
level = 0;
costEx = 0;
costSz = 0;
break;
default:
level = 1;
costEx = 1;
costSz = 1;
break;
}
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
int lvlb; // preference for op2
unsigned lvl2; // scratch variable
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
costEx = 0;
costSz = 0;
if (tree->OperIsAddrMode())
{
if (op1 == nullptr)
{
op1 = op2;
op2 = nullptr;
}
}
/* Check for a nilary operator */
if (op1 == nullptr)
{
assert(op2 == nullptr);
level = 0;
goto DONE;
}
/* Is this a unary operator? */
if (op2 == nullptr)
{
/* Process the operand of the operator */
/* Most Unary ops have costEx of 1 */
costEx = 1;
costSz = 1;
level = gtSetEvalOrder(op1);
GenTreeIntrinsic* intrinsic;
/* Special handling for some operators */
switch (oper)
{
case GT_JTRUE:
costEx = 2;
costSz = 2;
break;
case GT_SWITCH:
costEx = 10;
costSz = 5;
break;
case GT_CAST:
#if defined(TARGET_ARM)
costEx = 1;
costSz = 1;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 3;
costSz = 4;
}
#elif defined(TARGET_ARM64)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 2;
costSz = 4;
}
#elif defined(TARGET_XARCH)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
/* cast involving floats always go through memory */
costEx = IND_COST_EX * 2;
costSz = 6;
}
#else
#error "Unknown TARGET"
#endif
/* Overflow casts are a lot more expensive */
if (tree->gtOverflow())
{
costEx += 6;
costSz += 6;
}
break;
case GT_NOP:
costEx = 0;
costSz = 0;
break;
case GT_INTRINSIC:
intrinsic = tree->AsIntrinsic();
// named intrinsic
assert(intrinsic->gtIntrinsicName != NI_Illegal);
// GT_INTRINSIC intrinsics Sin, Cos, Sqrt, Abs ... have higher costs.
// TODO: tune these costs target specific as some of these are
// target intrinsics and would cost less to generate code.
switch (intrinsic->gtIntrinsicName)
{
default:
assert(!"missing case for gtIntrinsicName");
costEx = 12;
costSz = 12;
break;
case NI_System_Math_Abs:
costEx = 5;
costSz = 15;
break;
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_FusedMultiplyAdd:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
case NI_System_Math_Max:
case NI_System_Math_Min:
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls. We don't do this for target intrinsics
// however as they typically represent single instruction calls
if (IsIntrinsicImplementedByUserCall(intrinsic->gtIntrinsicName))
{
costEx = 36;
costSz = 4;
}
else
{
costEx = 3;
costSz = 4;
}
break;
}
case NI_System_Object_GetType:
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls.
costEx = 36;
costSz = 4;
break;
}
level++;
break;
case GT_NOT:
case GT_NEG:
// We need to ensure that -x is evaluated before x or else
// we get burned while adjusting genFPstkLevel in x*-x where
// the rhs x is the last use of the enregistered x.
//
// Even in the integer case we want to prefer to
// evaluate the side without the GT_NEG node, all other things
// being equal. Also a GT_NOT requires a scratch register
level++;
break;
case GT_ADDR:
costEx = 0;
costSz = 1;
// If we have a GT_ADDR of an GT_IND we can just copy the costs from indOp1
if (op1->OperGet() == GT_IND)
{
GenTree* indOp1 = op1->AsOp()->gtOp1;
costEx = indOp1->GetCostEx();
costSz = indOp1->GetCostSz();
}
break;
case GT_ARR_LENGTH:
level++;
/* Array Len should be the same as an indirections, which have a costEx of IND_COST_EX */
costEx = IND_COST_EX - 1;
costSz = 2;
break;
case GT_MKREFANY:
case GT_OBJ:
// We estimate the cost of a GT_OBJ or GT_MKREFANY to be two loads (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BOX:
// We estimate the cost of a GT_BOX to be two stores (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BLK:
case GT_IND:
/* An indirection should always have a non-zero level.
* Only constant leaf nodes have level 0.
*/
if (level == 0)
{
level = 1;
}
/* Indirections have a costEx of IND_COST_EX */
costEx = IND_COST_EX;
costSz = 2;
/* If we have to sign-extend or zero-extend, bump the cost */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
if (isflt)
{
if (tree->TypeGet() == TYP_DOUBLE)
{
costEx += 1;
}
#ifdef TARGET_ARM
costSz += 2;
#endif // TARGET_ARM
}
// Can we form an addressing mode with this indirection?
// TODO-CQ: Consider changing this to op1->gtEffectiveVal() to take into account
// addressing modes hidden under a comma node.
if (op1->gtOper == GT_ADD)
{
// See if we can form a complex addressing mode.
GenTree* addr = op1->gtEffectiveVal();
bool doAddrMode = true;
// See if we can form a complex addressing mode.
// Always use an addrMode for an array index indirection.
// TODO-1stClassStructs: Always do this, but first make sure it's
// done in Lowering as well.
if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
if (tree->TypeGet() == TYP_STRUCT)
{
doAddrMode = false;
}
else if (varTypeIsStruct(tree))
{
// This is a heuristic attempting to match prior behavior when indirections
// under a struct assignment would not be considered for addressing modes.
if (compCurStmt != nullptr)
{
GenTree* expr = compCurStmt->GetRootNode();
if ((expr->OperGet() == GT_ASG) &&
((expr->gtGetOp1() == tree) || (expr->gtGetOp2() == tree)))
{
doAddrMode = false;
}
}
}
}
#ifdef TARGET_ARM64
if (tree->gtFlags & GTF_IND_VOLATILE)
{
// For volatile store/loads when address is contained we always emit `dmb`
// if it's not - we emit one-way barriers i.e. ldar/stlr
doAddrMode = false;
}
#endif // TARGET_ARM64
if (doAddrMode && gtMarkAddrMode(addr, &costEx, &costSz, tree->TypeGet()))
{
goto DONE;
}
} // end if (op1->gtOper == GT_ADD)
else if (gtIsLikelyRegVar(op1))
{
/* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
goto DONE;
}
#ifdef TARGET_XARCH
else if (op1->IsCnsIntOrI())
{
// Indirection of a CNS_INT, subtract 1 from costEx
// makes costEx 3 for x86 and 4 for amd64
//
costEx += (op1->GetCostEx() - 1);
costSz += op1->GetCostSz();
goto DONE;
}
#endif
break;
default:
break;
}
costEx += op1->GetCostEx();
costSz += op1->GetCostSz();
goto DONE;
}
/* Binary operator - check for certain special cases */
lvlb = 0;
/* Default Binary ops have a cost of 1,1 */
costEx = 1;
costSz = 1;
#ifdef TARGET_ARM
if (isflt)
{
costSz += 2;
}
#endif
#ifndef TARGET_64BIT
if (varTypeIsLong(op1->TypeGet()))
{
/* Operations on longs are more expensive */
costEx += 3;
costSz += 3;
}
#endif
switch (oper)
{
case GT_MOD:
case GT_UMOD:
/* Modulo by a power of 2 is easy */
if (op2->IsCnsIntOrI())
{
size_t ival = op2->AsIntConCommon()->IconValue();
if (ival > 0 && ival == genFindLowestBit(ival))
{
break;
}
}
FALLTHROUGH;
case GT_DIV:
case GT_UDIV:
if (isflt)
{
/* fp division is very expensive to execute */
costEx = 36; // TYP_DOUBLE
costSz += 3;
}
else
{
/* integer division is also very expensive */
costEx = 20;
costSz += 2;
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 3;
}
break;
case GT_MUL:
if (isflt)
{
/* FP multiplication instructions are more expensive */
costEx += 4;
costSz += 3;
}
else
{
/* Integer multiplication instructions are more expensive */
costEx += 3;
costSz += 2;
if (tree->gtOverflow())
{
/* Overflow check are more expensive */
costEx += 3;
costSz += 3;
}
#ifdef TARGET_X86
if ((tree->gtType == TYP_LONG) || tree->gtOverflow())
{
/* We use imulEAX for TYP_LONG and overflow multiplications */
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 4;
/* The 64-bit imul instruction costs more */
costEx += 4;
}
#endif // TARGET_X86
}
break;
case GT_ADD:
case GT_SUB:
if (isflt)
{
/* FP instructions are a bit more expensive */
costEx += 4;
costSz += 3;
break;
}
/* Overflow check are more expensive */
if (tree->gtOverflow())
{
costEx += 3;
costSz += 3;
}
break;
case GT_BOUNDS_CHECK:
costEx = 4; // cmp reg,reg and jae throw (not taken)
costSz = 7; // jump to cold section
break;
case GT_COMMA:
/* Comma tosses the result of the left operand */
gtSetEvalOrder(op1);
level = gtSetEvalOrder(op2);
/* GT_COMMA cost is the sum of op1 and op2 costs */
costEx = (op1->GetCostEx() + op2->GetCostEx());
costSz = (op1->GetCostSz() + op2->GetCostSz());
goto DONE;
case GT_COLON:
level = gtSetEvalOrder(op1);
lvl2 = gtSetEvalOrder(op2);
if (level < lvl2)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx = op1->GetCostEx() + op2->GetCostEx();
costSz = op1->GetCostSz() + op2->GetCostSz();
goto DONE;
case GT_INDEX_ADDR:
costEx = 6; // cmp reg,reg; jae throw; mov reg, [addrmode] (not taken)
costSz = 9; // jump to cold section
break;
case GT_ASG:
/* Assignments need a bit of special handling */
/* Process the target */
level = gtSetEvalOrder(op1);
if (gtIsLikelyRegVar(op1))
{
assert(lvlb == 0);
lvl2 = gtSetEvalOrder(op2);
/* Assignment to an enregistered LCL_VAR */
costEx = op2->GetCostEx();
costSz = max(3, op2->GetCostSz()); // 3 is an estimate for a reg-reg assignment
goto DONE_OP1_AFTER_COST;
}
goto DONE_OP1;
default:
break;
}
/* Process the sub-operands */
level = gtSetEvalOrder(op1);
if (lvlb < 0)
{
level -= lvlb; // lvlb is negative, so this increases level
lvlb = 0;
}
DONE_OP1:
assert(lvlb >= 0);
lvl2 = gtSetEvalOrder(op2) + lvlb;
costEx += (op1->GetCostEx() + op2->GetCostEx());
costSz += (op1->GetCostSz() + op2->GetCostSz());
DONE_OP1_AFTER_COST:
bool bReverseInAssignment = false;
if (oper == GT_ASG && (!optValnumCSE_phase || optCSE_canSwap(op1, op2)))
{
GenTree* op1Val = op1;
// Skip over the GT_IND/GT_ADDR tree (if one exists)
//
if ((op1->gtOper == GT_IND) && (op1->AsOp()->gtOp1->gtOper == GT_ADDR))
{
op1Val = op1->AsOp()->gtOp1->AsOp()->gtOp1;
}
switch (op1Val->gtOper)
{
case GT_IND:
case GT_BLK:
case GT_OBJ:
// In an indirection, the destination address is evaluated prior to the source.
// If we have any side effects on the target indirection,
// we have to evaluate op1 first.
// However, if the LHS is a lclVar address, SSA relies on using evaluation order for its
// renaming, and therefore the RHS must be evaluated first.
// If we have an assignment involving a lclVar address, the LHS may be marked as having
// side-effects.
// However the side-effects won't require that we evaluate the LHS address first:
// - The GTF_GLOB_REF might have been conservatively set on a FIELD of a local.
// - The local might be address-exposed, but that side-effect happens at the actual assignment (not
// when its address is "evaluated") so it doesn't change the side effect to "evaluate" the address
// after the RHS (note that in this case it won't be renamed by SSA anyway, but the reordering is
// safe).
//
if (op1Val->AsIndir()->Addr()->IsLocalAddrExpr())
{
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
}
if (op1Val->AsIndir()->Addr()->gtFlags & GTF_ALL_EFFECT)
{
break;
}
// In case op2 assigns to a local var that is used in op1Val, we have to evaluate op1Val first.
if (op2->gtFlags & GTF_ASG)
{
break;
}
// If op2 is simple then evaluate op1 first
if (op2->OperKind() & GTK_LEAF)
{
break;
}
// fall through and set GTF_REVERSE_OPS
FALLTHROUGH;
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_CLS_VAR:
// We evaluate op2 before op1
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
default:
break;
}
}
else if (GenTree::OperIsCompare(oper))
{
/* Float compares remove both operands from the FP stack */
/* Also FP comparison uses EAX for flags */
if (varTypeIsFloating(op1->TypeGet()))
{
level++;
lvl2++;
}
if ((tree->gtFlags & GTF_RELOP_JMP_USED) == 0)
{
/* Using a setcc instruction is more expensive */
costEx += 3;
}
}
/* Check for other interesting cases */
switch (oper)
{
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
/* Variable sized shifts are more expensive and use REG_SHIFT */
if (!op2->IsCnsIntOrI())
{
costEx += 3;
#ifndef TARGET_64BIT
// Variable sized LONG shifts require the use of a helper call
//
if (tree->gtType == TYP_LONG)
{
level += 5;
lvl2 += 5;
costEx += 3 * IND_COST_EX;
costSz += 4;
}
#endif // !TARGET_64BIT
}
break;
case GT_INTRINSIC:
switch (tree->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Atan2:
case NI_System_Math_Pow:
// These math intrinsics are actually implemented by user calls.
// Increase the Sethi 'complexity' by two to reflect the argument
// register requirement.
level += 2;
break;
case NI_System_Math_Max:
case NI_System_Math_Min:
level++;
break;
default:
assert(!"Unknown binary GT_INTRINSIC operator");
break;
}
break;
default:
break;
}
/* We need to evalutate constants later as many places in codegen
can't handle op1 being a constant. This is normally naturally
enforced as constants have the least level of 0. However,
sometimes we end up with a tree like "cns1 < nop(cns2)". In
such cases, both sides have a level of 0. So encourage constants
to be evaluated last in such cases */
if ((level == 0) && (level == lvl2) && op1->OperIsConst() &&
(tree->OperIsCommutative() || tree->OperIsCompare()))
{
lvl2++;
}
/* We try to swap operands if the second one is more expensive */
bool tryToSwap;
GenTree* opA;
GenTree* opB;
if (tree->gtFlags & GTF_REVERSE_OPS)
{
opA = op2;
opB = op1;
}
else
{
opA = op1;
opB = op2;
}
if (fgOrder == FGOrderLinear)
{
// Don't swap anything if we're in linear order; we're really just interested in the costs.
tryToSwap = false;
}
else if (bReverseInAssignment)
{
// Assignments are special, we want the reverseops flags
// so if possible it was set above.
tryToSwap = false;
}
else if ((oper == GT_INTRINSIC) && IsIntrinsicImplementedByUserCall(tree->AsIntrinsic()->gtIntrinsicName))
{
// We do not swap operand execution order for intrinsics that are implemented by user calls
// because of trickiness around ensuring the execution order does not change during rationalization.
tryToSwap = false;
}
else if (oper == GT_BOUNDS_CHECK)
{
// Bounds check nodes used to not be binary, thus GTF_REVERSE_OPS was
// not enabled for them. This condition preserves that behavior.
// Additionally, CQ analysis shows that enabling GTF_REVERSE_OPS
// for these nodes leads to mixed results at best.
tryToSwap = false;
}
else
{
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tryToSwap = (level > lvl2);
}
else
{
tryToSwap = (level < lvl2);
}
// Try to force extra swapping when in the stress mode:
if (compStressCompile(STRESS_REVERSE_FLAG, 60) && ((tree->gtFlags & GTF_REVERSE_OPS) == 0) &&
!op2->OperIsConst())
{
tryToSwap = true;
}
}
if (tryToSwap)
{
bool canSwap = gtCanSwapOrder(opA, opB);
if (canSwap)
{
/* Can we swap the order by commuting the operands? */
switch (oper)
{
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (GenTree::SwapRelop(oper) != oper)
{
tree->SetOper(GenTree::SwapRelop(oper), GenTree::PRESERVE_VN);
}
FALLTHROUGH;
case GT_ADD:
case GT_MUL:
case GT_OR:
case GT_XOR:
case GT_AND:
/* Swap the operands */
tree->AsOp()->gtOp1 = op2;
tree->AsOp()->gtOp2 = op1;
break;
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
break;
default:
/* Mark the operand's evaluation order to be swapped */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
else
{
tree->gtFlags |= GTF_REVERSE_OPS;
}
break;
}
}
}
/* Swap the level counts */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
unsigned tmpl;
tmpl = level;
level = lvl2;
lvl2 = tmpl;
}
/* Compute the sethi number for this binary operator */
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
unsigned lvl2; // Scratch variable
case GT_CALL:
assert(tree->gtFlags & GTF_CALL);
level = 0;
costEx = 5;
costSz = 2;
GenTreeCall* call;
call = tree->AsCall();
/* Evaluate the 'this' argument, if present */
if (tree->AsCall()->gtCallThisArg != nullptr)
{
GenTree* thisVal = tree->AsCall()->gtCallThisArg->GetNode();
lvl2 = gtSetEvalOrder(thisVal);
if (level < lvl2)
{
level = lvl2;
}
costEx += thisVal->GetCostEx();
costSz += thisVal->GetCostSz() + 1;
}
/* Evaluate the arguments, right to left */
if (call->gtCallArgs != nullptr)
{
const bool lateArgs = false;
lvl2 = gtSetCallArgsOrder(call->Args(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
/* Evaluate the temp register arguments list
* This is a "hidden" list and its only purpose is to
* extend the life of temps until we make the call */
if (call->gtCallLateArgs != nullptr)
{
const bool lateArgs = true;
lvl2 = gtSetCallArgsOrder(call->LateArgs(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
if (call->gtCallType == CT_INDIRECT)
{
// pinvoke-calli cookie is a constant, or constant indirection
assert(call->gtCallCookie == nullptr || call->gtCallCookie->gtOper == GT_CNS_INT ||
call->gtCallCookie->gtOper == GT_IND);
GenTree* indirect = call->gtCallAddr;
lvl2 = gtSetEvalOrder(indirect);
if (level < lvl2)
{
level = lvl2;
}
costEx += indirect->GetCostEx() + IND_COST_EX;
costSz += indirect->GetCostSz();
}
else
{
if (call->IsVirtual())
{
GenTree* controlExpr = call->gtControlExpr;
if (controlExpr != nullptr)
{
lvl2 = gtSetEvalOrder(controlExpr);
if (level < lvl2)
{
level = lvl2;
}
costEx += controlExpr->GetCostEx();
costSz += controlExpr->GetCostSz();
}
}
#ifdef TARGET_ARM
if (call->IsVirtualStub())
{
// We generate movw/movt/ldr
costEx += (1 + IND_COST_EX);
costSz += 8;
if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
{
// Must use R12 for the ldr target -- REG_JUMP_THUNK_PARAM
costSz += 2;
}
}
else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
costEx += 2;
costSz += 6;
}
costSz += 2;
#endif
#ifdef TARGET_XARCH
costSz += 3;
#endif
}
level += 1;
/* Virtual calls are a bit more expensive */
if (call->IsVirtual())
{
costEx += 2 * IND_COST_EX;
costSz += 2;
}
level += 5;
costEx += 3 * IND_COST_EX;
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
return gtSetMultiOpOrder(tree->AsMultiOp());
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
level = gtSetEvalOrder(arrElem->gtArrObj);
costEx = arrElem->gtArrObj->GetCostEx();
costSz = arrElem->gtArrObj->GetCostSz();
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
lvl2 = gtSetEvalOrder(arrElem->gtArrInds[dim]);
if (level < lvl2)
{
level = lvl2;
}
costEx += arrElem->gtArrInds[dim]->GetCostEx();
costSz += arrElem->gtArrInds[dim]->GetCostSz();
}
level += arrElem->gtArrRank;
costEx += 2 + (arrElem->gtArrRank * (IND_COST_EX + 1));
costSz += 2 + (arrElem->gtArrRank * 2);
}
break;
case GT_ARR_OFFSET:
level = gtSetEvalOrder(tree->AsArrOffs()->gtOffset);
costEx = tree->AsArrOffs()->gtOffset->GetCostEx();
costSz = tree->AsArrOffs()->gtOffset->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtIndex);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtIndex->GetCostEx();
costSz += tree->AsArrOffs()->gtIndex->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtArrObj);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtArrObj->GetCostEx();
costSz += tree->AsArrOffs()->gtArrObj->GetCostSz();
break;
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
lvl2 = gtSetEvalOrder(use.GetNode());
// PHI args should always have cost 0 and level 0
assert(lvl2 == 0);
assert(use.GetNode()->GetCostEx() == 0);
assert(use.GetNode()->GetCostSz() == 0);
}
// Give it a level of 2, just to be sure that it's greater than the LHS of
// the parent assignment and the PHI gets evaluated first in linear order.
// See also SsaBuilder::InsertPhi and SsaBuilder::AddPhiArg.
level = 2;
costEx = 0;
costSz = 0;
break;
case GT_FIELD_LIST:
level = 0;
costEx = 0;
costSz = 0;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
unsigned opLevel = gtSetEvalOrder(use.GetNode());
level = max(level, opLevel);
gtSetEvalOrder(use.GetNode());
costEx += use.GetNode()->GetCostEx();
costSz += use.GetNode()->GetCostSz();
}
break;
case GT_CMPXCHG:
level = gtSetEvalOrder(tree->AsCmpXchg()->gtOpLocation);
costSz = tree->AsCmpXchg()->gtOpLocation->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpValue);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpValue->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpComparand);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpComparand->GetCostSz();
costEx = MAX_COST; // Seriously, what could be more expensive than lock cmpxchg?
costSz += 5; // size of lock cmpxchg [reg+C], reg
break;
case GT_STORE_DYN_BLK:
level = gtSetEvalOrder(tree->AsStoreDynBlk()->Addr());
costEx = tree->AsStoreDynBlk()->Addr()->GetCostEx();
costSz = tree->AsStoreDynBlk()->Addr()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->Data());
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->Data()->GetCostEx();
costSz += tree->AsStoreDynBlk()->Data()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->gtDynamicSize);
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->gtDynamicSize->GetCostEx();
costSz += tree->AsStoreDynBlk()->gtDynamicSize->GetCostSz();
break;
default:
JITDUMP("unexpected operator in this tree:\n");
DISPTREE(tree);
NO_WAY("unexpected operator");
}
DONE:
// Some path through this function must have set the costs.
assert(costEx != -1);
assert(costSz != -1);
tree->SetCosts(costEx, costSz);
return level;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
#ifdef DEBUG
bool GenTree::OperSupportsReverseOpEvalOrder(Compiler* comp) const
{
if (OperIsBinary())
{
if ((AsOp()->gtGetOp1() == nullptr) || (AsOp()->gtGetOp2() == nullptr))
{
return false;
}
if (OperIs(GT_COMMA, GT_BOUNDS_CHECK))
{
return false;
}
if (OperIs(GT_INTRINSIC))
{
return !comp->IsIntrinsicImplementedByUserCall(AsIntrinsic()->gtIntrinsicName);
}
return true;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
if (OperIsMultiOp())
{
return AsMultiOp()->GetOperandCount() == 2;
}
#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
return false;
}
#endif // DEBUG
/*****************************************************************************
*
* If the given tree is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0. Note that we never return 1,
* to match the behavior of GetScaleIndexShf().
*/
unsigned GenTree::GetScaleIndexMul()
{
if (IsCnsIntOrI() && jitIsScaleIndexMul(AsIntConCommon()->IconValue()) && AsIntConCommon()->IconValue() != 1)
{
return (unsigned)AsIntConCommon()->IconValue();
}
return 0;
}
/*****************************************************************************
*
* If the given tree is the right-hand side of a left shift (that is,
* 'y' in the tree 'x' << 'y'), and it is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0.
*/
unsigned GenTree::GetScaleIndexShf()
{
if (IsCnsIntOrI() && jitIsScaleIndexShift(AsIntConCommon()->IconValue()))
{
return (unsigned)(1 << AsIntConCommon()->IconValue());
}
return 0;
}
/*****************************************************************************
*
* If the given tree is a scaled index (i.e. "op * 4" or "op << 2"), returns
* the multiplier: 2, 4, or 8; otherwise returns 0. Note that "1" is never
* returned.
*/
unsigned GenTree::GetScaledIndex()
{
// with (!opts.OptEnabled(CLFLG_CONSTANTFOLD) we can have
// CNS_INT * CNS_INT
//
if (AsOp()->gtOp1->IsCnsIntOrI())
{
return 0;
}
switch (gtOper)
{
case GT_MUL:
return AsOp()->gtOp2->GetScaleIndexMul();
case GT_LSH:
return AsOp()->gtOp2->GetScaleIndexShf();
default:
assert(!"GenTree::GetScaledIndex() called with illegal gtOper");
break;
}
return 0;
}
//------------------------------------------------------------------------
// TryGetUse: Get the use edge for an operand of this tree.
//
// Arguments:
// operand - the node to find the use for
// pUse - [out] parameter for the use
//
// Return Value:
// Whether "operand" is a child of this node. If it is, "*pUse" is set,
// allowing for the replacement of "operand" with some other node.
//
bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
switch (OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
return false;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_RETURN:
case GT_RETFILT:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
// Variadic nodes
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
if (this->AsUnOp()->gtOp1->gtOper == GT_FIELD_LIST)
{
return this->AsUnOp()->gtOp1->TryGetUse(operand, pUse);
}
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
#endif // FEATURE_ARG_SPLIT
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
for (GenTree** opUse : this->AsMultiOp()->UseEdges())
{
if (*opUse == operand)
{
*pUse = opUse;
return true;
}
}
return false;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& phiUse : AsPhi()->Uses())
{
if (phiUse.GetNode() == operand)
{
*pUse = &phiUse.NodeRef();
return true;
}
}
return false;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& fieldUse : AsFieldList()->Uses())
{
if (fieldUse.GetNode() == operand)
{
*pUse = &fieldUse.NodeRef();
return true;
}
}
return false;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg();
if (operand == cmpXchg->gtOpLocation)
{
*pUse = &cmpXchg->gtOpLocation;
return true;
}
if (operand == cmpXchg->gtOpValue)
{
*pUse = &cmpXchg->gtOpValue;
return true;
}
if (operand == cmpXchg->gtOpComparand)
{
*pUse = &cmpXchg->gtOpComparand;
return true;
}
return false;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = this->AsArrElem();
if (operand == arrElem->gtArrObj)
{
*pUse = &arrElem->gtArrObj;
return true;
}
for (unsigned i = 0; i < arrElem->gtArrRank; i++)
{
if (operand == arrElem->gtArrInds[i])
{
*pUse = &arrElem->gtArrInds[i];
return true;
}
}
return false;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = this->AsArrOffs();
if (operand == arrOffs->gtOffset)
{
*pUse = &arrOffs->gtOffset;
return true;
}
if (operand == arrOffs->gtIndex)
{
*pUse = &arrOffs->gtIndex;
return true;
}
if (operand == arrOffs->gtArrObj)
{
*pUse = &arrOffs->gtArrObj;
return true;
}
return false;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk();
if (operand == dynBlock->gtOp1)
{
*pUse = &dynBlock->gtOp1;
return true;
}
if (operand == dynBlock->gtOp2)
{
*pUse = &dynBlock->gtOp2;
return true;
}
if (operand == dynBlock->gtDynamicSize)
{
*pUse = &dynBlock->gtDynamicSize;
return true;
}
return false;
}
case GT_CALL:
{
GenTreeCall* const call = this->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
*pUse = &call->gtCallThisArg->NodeRef();
return true;
}
if (operand == call->gtControlExpr)
{
*pUse = &call->gtControlExpr;
return true;
}
if (call->gtCallType == CT_INDIRECT)
{
if (operand == call->gtCallCookie)
{
*pUse = &call->gtCallCookie;
return true;
}
if (operand == call->gtCallAddr)
{
*pUse = &call->gtCallAddr;
return true;
}
}
for (GenTreeCall::Use& argUse : call->Args())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
for (GenTreeCall::Use& argUse : call->LateArgs())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
return false;
}
// Binary nodes
default:
assert(this->OperIsBinary());
return TryGetUseBinOp(operand, pUse);
}
}
bool GenTree::TryGetUseBinOp(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
assert(this->OperIsBinary());
GenTreeOp* const binOp = this->AsOp();
if (operand == binOp->gtOp1)
{
*pUse = &binOp->gtOp1;
return true;
}
if (operand == binOp->gtOp2)
{
*pUse = &binOp->gtOp2;
return true;
}
return false;
}
//------------------------------------------------------------------------
// GenTree::ReplaceOperand:
// Replace a given operand to this node with a new operand. If the
// current node is a call node, this will also udpate the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTree::ReplaceOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
if (OperGet() == GT_CALL)
{
AsCall()->ReplaceCallOperand(useEdge, replacement);
}
else
{
*useEdge = replacement;
}
}
//------------------------------------------------------------------------
// gtGetParent: Get the parent of this node, and optionally capture the
// pointer to the child so that it can be modified.
//
// Arguments:
// pUse - A pointer to a GenTree** (yes, that's three
// levels, i.e. GenTree ***), which if non-null,
// will be set to point to the field in the parent
// that points to this node.
//
// Return value
// The parent of this node.
//
// Notes:
// This requires that the execution order must be defined (i.e. gtSetEvalOrder() has been called).
// To enable the child to be replaced, it accepts an argument, "pUse", that, if non-null,
// will be set to point to the child pointer in the parent that points to this node.
//
GenTree* GenTree::gtGetParent(GenTree*** pUse)
{
// Find the parent node; it must be after this node in the execution order.
GenTree* user;
GenTree** use = nullptr;
for (user = gtNext; user != nullptr; user = user->gtNext)
{
if (user->TryGetUse(this, &use))
{
break;
}
}
if (pUse != nullptr)
{
*pUse = use;
}
return user;
}
//-------------------------------------------------------------------------
// gtRetExprVal - walk back through GT_RET_EXPRs
//
// Arguments:
// pbbFlags - out-parameter that is set to the flags of the basic block
// containing the inlinee return value. The value is 0
// for unsuccessful inlines.
//
// Returns:
// tree representing return value from a successful inline,
// or original call for failed or yet to be determined inline.
//
// Notes:
// Multi-level inlines can form chains of GT_RET_EXPRs.
// This method walks back to the root of the chain.
//
GenTree* GenTree::gtRetExprVal(BasicBlockFlags* pbbFlags /* = nullptr */)
{
GenTree* retExprVal = this;
BasicBlockFlags bbFlags = BBF_EMPTY;
assert(!retExprVal->OperIs(GT_PUTARG_TYPE));
while (retExprVal->OperIs(GT_RET_EXPR))
{
const GenTreeRetExpr* retExpr = retExprVal->AsRetExpr();
bbFlags = retExpr->bbFlags;
retExprVal = retExpr->gtInlineCandidate;
}
if (pbbFlags != nullptr)
{
*pbbFlags = bbFlags;
}
return retExprVal;
}
//------------------------------------------------------------------------------
// OperRequiresAsgFlag : Check whether the operation requires GTF_ASG flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresAsgFlag()
{
if (OperIs(GT_ASG, GT_STORE_DYN_BLK) ||
OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER))
{
return true;
}
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
if (hwIntrinsicNode->OperIsMemoryStore())
{
// A MemoryStore operation is an assignment
return true;
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//------------------------------------------------------------------------------
// OperRequiresCallFlag : Check whether the operation requires GTF_CALL flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresCallFlag(Compiler* comp)
{
switch (gtOper)
{
case GT_CALL:
return true;
case GT_KEEPALIVE:
return true;
case GT_INTRINSIC:
return comp->IsIntrinsicImplementedByUserCall(this->AsIntrinsic()->gtIntrinsicName);
#if FEATURE_FIXED_OUT_ARGS && !defined(TARGET_64BIT)
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// Variable shifts of a long end up being helper calls, so mark the tree as such in morph.
// This is potentially too conservative, since they'll get treated as having side effects.
// It is important to mark them as calls so if they are part of an argument list,
// they will get sorted and processed properly (for example, it is important to handle
// all nested calls before putting struct arguments in the argument registers). We
// could mark the trees just before argument processing, but it would require a full
// tree walk of the argument tree, so we just do it when morphing, instead, even though we'll
// mark non-argument trees (that will still get converted to calls, anyway).
return (this->TypeGet() == TYP_LONG) && (gtGetOp2()->OperGet() != GT_CNS_INT);
#endif // FEATURE_FIXED_OUT_ARGS && !TARGET_64BIT
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperIsImplicitIndir : Check whether the operation contains an implicit
// indirection.
// Arguments:
// this - a GenTree node
//
// Return Value:
// True if the given node contains an implicit indirection
//
// Note that for the [HW]INTRINSIC nodes we have to examine the
// details of the node to determine its result.
//
bool GenTree::OperIsImplicitIndir() const
{
switch (gtOper)
{
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
case GT_XCHG:
case GT_CMPXCHG:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_BOX:
case GT_ARR_INDEX:
case GT_ARR_ELEM:
case GT_ARR_OFFSET:
return true;
case GT_INTRINSIC:
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
#ifdef FEATURE_SIMD
case GT_SIMD:
{
return AsSIMD()->OperIsMemoryLoad();
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
return AsHWIntrinsic()->OperIsMemoryLoadOrStore();
}
#endif // FEATURE_HW_INTRINSICS
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperMayThrow : Check whether the operation may throw.
//
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if the given operator may cause an exception
bool GenTree::OperMayThrow(Compiler* comp)
{
GenTree* op;
switch (gtOper)
{
case GT_MOD:
case GT_DIV:
case GT_UMOD:
case GT_UDIV:
/* Division with a non-zero, non-minus-one constant does not throw an exception */
op = AsOp()->gtOp2;
if (varTypeIsFloating(op->TypeGet()))
{
return false; // Floating point division does not throw.
}
// For integers only division by 0 or by -1 can throw
if (op->IsIntegralConst() && !op->IsIntegralConst(0) && !op->IsIntegralConst(-1))
{
return false;
}
return true;
case GT_INTRINSIC:
// If this is an intrinsic that represents the object.GetType(), it can throw an NullReferenceException.
// Currently, this is the only intrinsic that can throw an exception.
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
case GT_CALL:
CorInfoHelpFunc helper;
helper = comp->eeGetHelperNum(this->AsCall()->gtCallMethHnd);
return ((helper == CORINFO_HELP_UNDEF) || !comp->s_helperCallProperties.NoThrow(helper));
case GT_IND:
case GT_BLK:
case GT_OBJ:
case GT_NULLCHECK:
case GT_STORE_BLK:
case GT_STORE_DYN_BLK:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) && comp->fgAddrCouldBeNull(this->AsIndir()->Addr()));
case GT_ARR_LENGTH:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) &&
comp->fgAddrCouldBeNull(this->AsArrLen()->ArrRef()));
case GT_ARR_ELEM:
return comp->fgAddrCouldBeNull(this->AsArrElem()->gtArrObj);
case GT_FIELD:
{
GenTree* fldObj = this->AsField()->GetFldObj();
if (fldObj != nullptr)
{
return comp->fgAddrCouldBeNull(fldObj);
}
return false;
}
case GT_BOUNDS_CHECK:
case GT_ARR_INDEX:
case GT_ARR_OFFSET:
case GT_LCLHEAP:
case GT_CKFINITE:
case GT_INDEX_ADDR:
return true;
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
assert(hwIntrinsicNode != nullptr);
if (hwIntrinsicNode->OperIsMemoryLoadOrStore())
{
// This operation contains an implicit indirection
// it could throw a null reference exception.
//
return true;
}
break;
}
#endif // FEATURE_HW_INTRINSICS
default:
break;
}
/* Overflow arithmetic operations also throw exceptions */
if (gtOverflowEx())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetFieldCount: Return the register count for a multi-reg lclVar.
//
// Arguments:
// compiler - the current Compiler instance.
//
// Return Value:
// Returns the number of registers defined by this node.
//
// Notes:
// This must be a multireg lclVar.
//
unsigned int GenTreeLclVar::GetFieldCount(Compiler* compiler) const
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
return varDsc->lvFieldCnt;
}
//-----------------------------------------------------------------------------------
// GetFieldTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// compiler - the current Compiler instance.
// idx - which register type to return.
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg lclVar and 'regIndex' must be a valid index for this node.
//
var_types GenTreeLclVar::GetFieldTypeByIndex(Compiler* compiler, unsigned idx)
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + idx);
assert(fieldVarDsc->TypeGet() != TYP_STRUCT); // Don't expect struct fields.
return fieldVarDsc->TypeGet();
}
#if DEBUGGABLE_GENTREE
// static
GenTree::VtablePtr GenTree::s_vtablesForOpers[] = {nullptr};
GenTree::VtablePtr GenTree::s_vtableForOp = nullptr;
GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper)
{
noway_assert(oper < GT_COUNT);
// First, check a cache.
if (s_vtablesForOpers[oper] != nullptr)
{
return s_vtablesForOpers[oper];
}
// Otherwise, look up the correct vtable entry. Note that we want the most derived GenTree subtype
// for an oper. E.g., GT_LCL_VAR is defined in GTSTRUCT_3 as GenTreeLclVar and in GTSTRUCT_N as
// GenTreeLclVarCommon. We want the GenTreeLclVar vtable, since nothing should actually be
// instantiated as a GenTreeLclVarCommon.
VtablePtr res = nullptr;
switch (oper)
{
// clang-format off
#define GTSTRUCT_0(nm, tag) /*handle explicitly*/
#define GTSTRUCT_1(nm, tag) \
case tag: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_2(nm, tag, tag2) \
case tag: \
case tag2: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_3(nm, tag, tag2, tag3) \
case tag: \
case tag2: \
case tag3: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_4(nm, tag, tag2, tag3, tag4) \
case tag: \
case tag2: \
case tag3: \
case tag4: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_N(nm, ...) /*handle explicitly*/
#define GTSTRUCT_2_SPECIAL(nm, tag, tag2) /*handle explicitly*/
#define GTSTRUCT_3_SPECIAL(nm, tag, tag2, tag3) /*handle explicitly*/
#include "gtstructs.h"
// clang-format on
// Handle the special cases.
// The following opers are in GTSTRUCT_N but no other place (namely, no subtypes).
case GT_STORE_BLK:
case GT_BLK:
{
GenTreeBlk gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
case GT_IND:
case GT_NULLCHECK:
{
GenTreeIndir gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
// We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified
// in their proper subtype. Similarly for GenTreeIndir.
default:
{
// Should be unary or binary op.
if (s_vtableForOp == nullptr)
{
unsigned opKind = OperKind(oper);
assert(!IsExOp(opKind));
assert(OperIsSimple(oper) || OperIsLeaf(oper));
// Need to provide non-null operands.
GenTreeIntCon dummyOp(TYP_INT, 0);
GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? nullptr : &dummyOp));
s_vtableForOp = *reinterpret_cast<VtablePtr*>(>);
}
res = s_vtableForOp;
break;
}
}
s_vtablesForOpers[oper] = res;
return res;
}
void GenTree::SetVtableForOper(genTreeOps oper)
{
*reinterpret_cast<VtablePtr*>(this) = GetVtableForOper(oper);
}
#endif // DEBUGGABLE_GENTREE
GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
{
assert(op1 != nullptr);
assert(op2 != nullptr);
// We should not be allocating nodes that extend GenTreeOp with this;
// should call the appropriate constructor for the extended type.
assert(!GenTree::IsExOp(GenTree::OperKind(oper)));
GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, op2);
return node;
}
GenTreeQmark* Compiler::gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon)
{
compQmarkUsed = true;
GenTreeQmark* result = new (this, GT_QMARK) GenTreeQmark(type, cond, colon);
#ifdef DEBUG
if (compQmarkRationalized)
{
fgCheckQmarkAllowedForm(result);
}
#endif
return result;
}
GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
{
return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
}
GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq)
{
GenTreeIntCon* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, static_cast<ssize_t>(fieldOffset));
node->gtFieldSeq = fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq;
return node;
}
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
assert(genIsValidIntReg(reg) || (reg == REG_SPBASE));
GenTree* result = new (this, GT_PHYSREG) GenTreePhysReg(reg, type);
return result;
}
GenTree* Compiler::gtNewJmpTableNode()
{
return new (this, GT_JMPTABLE) GenTree(GT_JMPTABLE, TYP_I_IMPL);
}
/*****************************************************************************
*
* Converts an annotated token into an icon flags (so that we will later be
* able to tell the type of the handle that will be embedded in the icon
* node)
*/
GenTreeFlags Compiler::gtTokenToIconFlags(unsigned token)
{
GenTreeFlags flags = GTF_EMPTY;
switch (TypeFromToken(token))
{
case mdtTypeRef:
case mdtTypeDef:
case mdtTypeSpec:
flags = GTF_ICON_CLASS_HDL;
break;
case mdtMethodDef:
flags = GTF_ICON_METHOD_HDL;
break;
case mdtFieldDef:
flags = GTF_ICON_FIELD_HDL;
break;
default:
flags = GTF_ICON_TOKEN_HDL;
break;
}
return flags;
}
//-----------------------------------------------------------------------------------------
// gtNewIndOfIconHandleNode: Creates an indirection GenTree node of a constant handle
//
// Arguments:
// indType - The type returned by the indirection node
// addr - The constant address to read from
// iconFlags - The GTF_ICON flag value that specifies the kind of handle that we have
// isInvariant - The indNode should also be marked as invariant
//
// Return Value:
// Returns a GT_IND node representing value at the address provided by 'value'
//
// Notes:
// The GT_IND node is marked as non-faulting
// If the indType is GT_REF we also mark the indNode as GTF_GLOB_REF
//
GenTree* Compiler::gtNewIndOfIconHandleNode(var_types indType, size_t addr, GenTreeFlags iconFlags, bool isInvariant)
{
GenTree* addrNode = gtNewIconHandleNode(addr, iconFlags);
GenTree* indNode = gtNewOperNode(GT_IND, indType, addrNode);
// This indirection won't cause an exception.
//
indNode->gtFlags |= GTF_IND_NONFAULTING;
if (isInvariant)
{
assert(iconFlags != GTF_ICON_STATIC_HDL); // Pointer to a mutable class Static variable
assert(iconFlags != GTF_ICON_BBC_PTR); // Pointer to a mutable basic block count value
assert(iconFlags != GTF_ICON_GLOBAL_PTR); // Pointer to mutable data from the VM state
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
if (iconFlags == GTF_ICON_STR_HDL)
{
// String literals are never null
indNode->gtFlags |= GTF_IND_NONNULL;
}
}
else
{
// GLOB_REF needs to be set for indirections returning values from mutable
// locations, so that e. g. args sorting does not reorder them with calls.
indNode->gtFlags |= GTF_GLOB_REF;
}
return indNode;
}
/*****************************************************************************
*
* Allocates a integer constant entry that represents a HANDLE to something.
* It may not be allowed to embed HANDLEs directly into the JITed code (for eg,
* as arguments to JIT helpers). Get a corresponding value that can be embedded.
* If the handle needs to be accessed via an indirection, pValue points to it.
*/
GenTree* Compiler::gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags iconFlags, void* compileTimeHandle)
{
GenTree* iconNode;
GenTree* handleNode;
if (value != nullptr)
{
// When 'value' is non-null, pValue is required to be null
assert(pValue == nullptr);
// use 'value' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)value, iconFlags);
// 'value' is the handle
handleNode = iconNode;
}
else
{
// When 'value' is null, pValue is required to be non-null
assert(pValue != nullptr);
// use 'pValue' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)pValue, iconFlags);
// 'pValue' is an address of a location that contains the handle
// construct the indirection of 'pValue'
handleNode = gtNewOperNode(GT_IND, TYP_I_IMPL, iconNode);
// This indirection won't cause an exception.
handleNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
handleNode->gtFlags |= GTF_IND_INVARIANT;
}
iconNode->AsIntCon()->gtCompileTimeHandle = (size_t)compileTimeHandle;
return handleNode;
}
/*****************************************************************************/
GenTree* Compiler::gtNewStringLiteralNode(InfoAccessType iat, void* pValue)
{
GenTree* tree = nullptr;
switch (iat)
{
case IAT_VALUE:
setMethodHasFrozenString();
tree = gtNewIconEmbHndNode(pValue, nullptr, GTF_ICON_STR_HDL, nullptr);
tree->gtType = TYP_REF;
#ifdef DEBUG
tree->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PVALUE: // The value needs to be accessed via an indirection
// Create an indirection
tree = gtNewIndOfIconHandleNode(TYP_REF, (size_t)pValue, GTF_ICON_STR_HDL, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PPVALUE: // The value needs to be accessed via a double indirection
// Create the first indirection
tree = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pValue, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
// Create the second indirection
tree = gtNewOperNode(GT_IND, TYP_REF, tree);
// This indirection won't cause an exception.
tree->gtFlags |= GTF_IND_NONFAULTING;
// This indirection points into the gloabal heap (it is String Object)
tree->gtFlags |= GTF_GLOB_REF;
break;
default:
noway_assert(!"Unexpected InfoAccessType");
}
return tree;
}
//------------------------------------------------------------------------
// gtNewStringLiteralLength: create GenTreeIntCon node for the given string
// literal to store its length.
//
// Arguments:
// node - string literal node.
//
// Return Value:
// GenTreeIntCon node with string's length as a value or null.
//
GenTreeIntCon* Compiler::gtNewStringLiteralLength(GenTreeStrCon* node)
{
if (node->IsStringEmptyField())
{
JITDUMP("Folded String.Empty.Length to 0\n");
return gtNewIconNode(0);
}
int length = -1;
const char16_t* str = info.compCompHnd->getStringLiteral(node->gtScpHnd, node->gtSconCPX, &length);
if (length >= 0)
{
GenTreeIntCon* iconNode = gtNewIconNode(length);
// str can be NULL for dynamic context
if (str != nullptr)
{
JITDUMP("Folded '\"%ws\".Length' to '%d'\n", str, length)
}
else
{
JITDUMP("Folded 'CNS_STR.Length' to '%d'\n", length)
}
return iconNode;
}
return nullptr;
}
/*****************************************************************************/
GenTree* Compiler::gtNewLconNode(__int64 value)
{
#ifdef TARGET_64BIT
GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
#else
GenTree* node = new (this, GT_CNS_LNG) GenTreeLngCon(value);
#endif
return node;
}
GenTree* Compiler::gtNewDconNode(double value, var_types type)
{
GenTree* node = new (this, GT_CNS_DBL) GenTreeDblCon(value, type);
return node;
}
GenTree* Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle)
{
// 'GT_CNS_STR' nodes later get transformed into 'GT_CALL'
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_CNS_STR]);
GenTree* node = new (this, GT_CALL) GenTreeStrCon(CPX, scpHandle DEBUGARG(/*largeNode*/ true));
return node;
}
GenTree* Compiler::gtNewZeroConNode(var_types type)
{
GenTree* zero;
switch (type)
{
case TYP_INT:
zero = gtNewIconNode(0);
break;
case TYP_BYREF:
FALLTHROUGH;
case TYP_REF:
zero = gtNewIconNode(0);
zero->gtType = type;
break;
case TYP_LONG:
zero = gtNewLconNode(0);
break;
case TYP_FLOAT:
zero = gtNewDconNode(0.0);
zero->gtType = type;
break;
case TYP_DOUBLE:
zero = gtNewDconNode(0.0);
break;
default:
noway_assert(!"Bad type in gtNewZeroConNode");
zero = nullptr;
break;
}
return zero;
}
GenTree* Compiler::gtNewOneConNode(var_types type)
{
GenTree* one;
switch (type)
{
case TYP_INT:
case TYP_UINT:
one = gtNewIconNode(1);
break;
case TYP_LONG:
case TYP_ULONG:
one = gtNewLconNode(1);
break;
case TYP_FLOAT:
case TYP_DOUBLE:
one = gtNewDconNode(1.0);
one->gtType = type;
break;
default:
noway_assert(!"Bad type in gtNewOneConNode");
one = nullptr;
break;
}
return one;
}
GenTreeLclVar* Compiler::gtNewStoreLclVar(unsigned dstLclNum, GenTree* src)
{
GenTreeLclVar* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, src->TypeGet(), dstLclNum);
store->gtOp1 = src;
store->gtFlags = (src->gtFlags & GTF_COMMON_MASK);
store->gtFlags |= GTF_VAR_DEF | GTF_ASG;
return store;
}
#ifdef FEATURE_SIMD
//---------------------------------------------------------------------
// gtNewSIMDVectorZero: create a GT_SIMD node for Vector<T>.Zero
//
// Arguments:
// simdType - simd vector type
// simdBaseJitType - element type of vector
// simdSize - size of vector in bytes
GenTree* Compiler::gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = genActualType(JitType2PreciseVarType(simdBaseJitType));
GenTree* initVal = gtNewZeroConNode(simdBaseType);
initVal->gtType = simdBaseType;
return gtNewSIMDNode(simdType, initVal, SIMDIntrinsicInit, simdBaseJitType, simdSize);
}
#endif // FEATURE_SIMD
GenTreeCall* Compiler::gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
return gtNewCallNode(CT_INDIRECT, (CORINFO_METHOD_HANDLE)addr, type, args, di);
}
GenTreeCall* Compiler::gtNewCallNode(
gtCallTypes callType, CORINFO_METHOD_HANDLE callHnd, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
GenTreeCall* node = new (this, GT_CALL) GenTreeCall(genActualType(type));
node->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
#ifdef UNIX_X86_ABI
if (callType == CT_INDIRECT || callType == CT_HELPER)
node->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
node->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
node->gtCallType = callType;
node->gtCallMethHnd = callHnd;
node->gtCallArgs = args;
node->gtCallThisArg = nullptr;
node->fgArgInfo = nullptr;
INDEBUG(node->callSig = nullptr;)
node->tailCallInfo = nullptr;
node->gtRetClsHnd = nullptr;
node->gtControlExpr = nullptr;
node->gtCallMoreFlags = GTF_CALL_M_EMPTY;
if (callType == CT_INDIRECT)
{
node->gtCallCookie = nullptr;
}
else
{
node->gtInlineCandidateInfo = nullptr;
}
node->gtCallLateArgs = nullptr;
node->gtReturnType = type;
#ifdef FEATURE_READYTORUN
node->gtEntryPoint.addr = nullptr;
node->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These get updated after call node is built.
node->gtInlineObservation = InlineObservation::CALLEE_UNUSED_INITIAL;
node->gtRawILOffset = BAD_IL_OFFSET;
node->gtInlineContext = compInlineContext;
#endif
// Spec: Managed Retval sequence points needs to be generated while generating debug info for debuggable code.
//
// Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and
// codegen will pass DebugInfo() to emitter, which will cause emitter
// not to emit IP mapping entry.
if (opts.compDbgCode && opts.compDbgInfo && di.IsValid())
{
// Managed Retval - IL offset of the call. This offset is used to emit a
// CALL_INSTRUCTION type sequence point while emitting corresponding native call.
//
// TODO-Cleanup:
// a) (Opt) We need not store this offset if the method doesn't return a
// value. Rather it can be made BAD_IL_OFFSET to prevent a sequence
// point being emitted.
//
// b) (Opt) Add new sequence points only if requested by debugger through
// a new boundary type - ICorDebugInfo::BoundaryTypes
if (genCallSite2DebugInfoMap == nullptr)
{
genCallSite2DebugInfoMap = new (getAllocator()) CallSiteDebugInfoTable(getAllocator());
}
// Make sure that there are no duplicate entries for a given call node
assert(!genCallSite2DebugInfoMap->Lookup(node));
genCallSite2DebugInfoMap->Set(node, di);
}
// Initialize gtOtherRegs
node->ClearOtherRegs();
// Initialize spill flags of gtOtherRegs
node->ClearOtherRegFlags();
#if !defined(TARGET_64BIT)
if (varTypeIsLong(node))
{
assert(node->gtReturnType == node->gtType);
// Initialize Return type descriptor of call node
node->InitializeLongReturnType();
}
#endif // !defined(TARGET_64BIT)
return node;
}
GenTreeLclVar* Compiler::gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
assert(type != TYP_VOID);
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
LclVarDsc* varDsc = lvaGetDesc(lnum);
bool simd12ToSimd16Widening = false;
#if FEATURE_SIMD
// We can additionally have a SIMD12 that was widened to a SIMD16, generally as part of lowering
simd12ToSimd16Widening = (type == TYP_SIMD16) && (varDsc->lvType == TYP_SIMD12);
#endif
assert((type == varDsc->lvType) || simd12ToSimd16Widening ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (varDsc->lvType == TYP_BYREF)));
}
GenTreeLclVar* node = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs));
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
return node;
}
GenTreeLclVar* Compiler::gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
assert(type == lvaTable[lnum].lvType ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (lvaTable[lnum].lvType == TYP_BYREF)));
}
// This local variable node may later get transformed into a large node
assert(GenTree::s_gtNodeSizes[LargeOpOpcode()] > GenTree::s_gtNodeSizes[GT_LCL_VAR]);
GenTreeLclVar* node =
new (this, LargeOpOpcode()) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs) DEBUGARG(/*largeNode*/ true));
return node;
}
GenTreeLclVar* Compiler::gtNewLclVarAddrNode(unsigned lclNum, var_types type)
{
GenTreeLclVar* node = new (this, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, type, lclNum);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, type, lclNum, lclOffs);
node->SetFieldSeq(fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, type, lnum, offset);
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
node->SetFieldSeq(FieldSeqStore::NotAField());
return node;
}
GenTree* Compiler::gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags)
{
assert(GenTree::s_gtNodeSizes[GT_RET_EXPR] == TREE_NODE_SZ_LARGE);
GenTreeRetExpr* node = new (this, GT_RET_EXPR) GenTreeRetExpr(type);
node->gtInlineCandidate = inlineCandidate;
node->bbFlags = bbFlags;
if (varTypeIsStruct(inlineCandidate) && !inlineCandidate->OperIsBlkOp())
{
node->gtRetClsHnd = gtGetStructHandle(inlineCandidate);
}
// GT_RET_EXPR node eventually might be bashed back to GT_CALL (when inlining is aborted for example).
// Therefore it should carry the GTF_CALL flag so that all the rules about spilling can apply to it as well.
// For example, impImportLeave or CEE_POP need to spill GT_RET_EXPR before empty the evaluation stack.
node->gtFlags |= GTF_CALL;
return node;
}
GenTreeCall::Use* Compiler::gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node, args);
}
GenTreeCall::Use* Compiler::gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after)
{
after->SetNext(new (this, CMK_ASTNode) GenTreeCall::Use(node, after->GetNext()));
return after->GetNext();
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node);
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3, node4));
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching argNum and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
return argInfo->GetArgEntry(argNum);
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching node and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByNode(GenTreeCall* call, GenTree* node)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->GetNode() == node)
{
return curArgTabEntry;
}
else if (curArgTabEntry->use->GetNode() == node)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
/*****************************************************************************
*
* Find and return the entry with the given "lateArgInx". Requires that one is found
* (asserts this).
*/
fgArgTabEntry* Compiler::gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
assert(lateArgInx != UINT_MAX);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->isLateArg() && curArgTabEntry->GetLateArgInx() == lateArgInx)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
//------------------------------------------------------------------------
// gtArgNodeByLateArgInx: Given a call instruction, find the argument with the given
// late arg index (i.e. the given position in the gtCallLateArgs list).
// Arguments:
// call - the call node
// lateArgInx - the index into the late args list
//
// Return value:
// The late argument node.
//
GenTree* Compiler::gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx)
{
GenTree* argx = nullptr;
unsigned regIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
argx = use.GetNode();
assert(!argx->IsArgPlaceHolderNode()); // No placeholder nodes are in gtCallLateArgs;
if (regIndex == lateArgInx)
{
break;
}
regIndex++;
}
noway_assert(argx != nullptr);
return argx;
}
/*****************************************************************************
*
* Create a node that will assign 'src' to 'dst'.
*/
GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
{
assert(!src->TypeIs(TYP_VOID));
/* Mark the target as being assigned */
if ((dst->gtOper == GT_LCL_VAR) || (dst->OperGet() == GT_LCL_FLD))
{
dst->gtFlags |= GTF_VAR_DEF;
if (dst->IsPartialLclFld(this))
{
// We treat these partial writes as combined uses and defs.
dst->gtFlags |= GTF_VAR_USEASG;
}
}
dst->gtFlags |= GTF_DONT_CSE;
#if defined(FEATURE_SIMD) && !defined(TARGET_X86)
// TODO-CQ: x86 Windows supports multi-reg returns but not SIMD multi-reg returns
if (varTypeIsSIMD(dst->gtType))
{
// We want to track SIMD assignments as being intrinsics since they
// are functionally SIMD `mov` instructions and are more efficient
// when we don't promote, particularly when it occurs due to inlining
SetOpLclRelatedToSIMDIntrinsic(dst);
SetOpLclRelatedToSIMDIntrinsic(src);
}
#endif // FEATURE_SIMD
/* Create the assignment node */
GenTreeOp* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src)->AsOp();
/* Mark the expression as containing an assignment */
asg->gtFlags |= GTF_ASG;
return asg;
}
//------------------------------------------------------------------------
// gtNewObjNode: Creates a new Obj node.
//
// Arguments:
// structHnd - The class handle of the struct type.
// addr - The address of the struct.
//
// Return Value:
// Returns a node representing the struct value at the given address.
//
GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, typGetObjLayout(structHnd));
// An Obj is not a global reference, if it is known to be a local struct.
if ((addr->gtFlags & GTF_GLOB_REF) == 0)
{
GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr();
if (lclNode != nullptr)
{
objNode->gtFlags |= GTF_IND_NONFAULTING;
if (!lvaIsImplicitByRefLocal(lclNode->GetLclNum()))
{
objNode->gtFlags &= ~GTF_GLOB_REF;
}
}
}
return objNode;
}
//------------------------------------------------------------------------
// gtSetObjGcInfo: Set the GC info on an object node
//
// Arguments:
// objNode - The object node of interest
void Compiler::gtSetObjGcInfo(GenTreeObj* objNode)
{
assert(varTypeIsStruct(objNode->TypeGet()));
assert(objNode->TypeGet() == impNormStructType(objNode->GetLayout()->GetClassHandle()));
if (!objNode->GetLayout()->HasGCPtr())
{
objNode->SetOper(objNode->OperIs(GT_OBJ) ? GT_BLK : GT_STORE_BLK);
}
}
//------------------------------------------------------------------------
// gtNewStructVal: Return a node that represents a struct value
//
// Arguments:
// structHnd - The class for the struct
// addr - The address of the struct
//
// Return Value:
// A block, object or local node that represents the struct value pointed to by 'addr'.
GenTree* Compiler::gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
if (val->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = addr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = &(lvaTable[lclNum]);
if (varTypeIsStruct(varDsc) && (varDsc->GetStructHnd() == structHnd) && !lvaIsImplicitByRefLocal(lclNum))
{
return addr->gtGetOp1();
}
}
}
return gtNewObjNode(structHnd, addr);
}
//------------------------------------------------------------------------
// gtNewBlockVal: Return a node that represents a possibly untyped block value
//
// Arguments:
// addr - The address of the block
// size - The size of the block
//
// Return Value:
// A block, object or local node that represents the block value pointed to by 'addr'.
GenTree* Compiler::gtNewBlockVal(GenTree* addr, unsigned size)
{
// By default we treat this as an opaque struct type with known size.
var_types blkType = TYP_STRUCT;
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
#if FEATURE_SIMD
if (varTypeIsSIMD(val) && (genTypeSize(val) == size))
{
blkType = val->TypeGet();
}
#endif // FEATURE_SIMD
if (varTypeIsStruct(val) && val->OperIs(GT_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(val->AsLclVarCommon());
unsigned varSize = varTypeIsStruct(varDsc) ? varDsc->lvExactSize : genTypeSize(varDsc);
if (varSize == size)
{
return val;
}
}
}
return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, typGetBlkLayout(size));
}
// Creates a new assignment node for a CpObj.
// Parameters (exactly the same as MSIL CpObj):
//
// dstAddr - The target to copy the struct to
// srcAddr - The source to copy the struct from
// structHnd - A class token that represents the type of object being copied. May be null
// if FEATURE_SIMD is enabled and the source has a SIMD type.
// isVolatile - Is this marked as volatile memory?
GenTree* Compiler::gtNewCpObjNode(GenTree* dstAddr, GenTree* srcAddr, CORINFO_CLASS_HANDLE structHnd, bool isVolatile)
{
GenTree* lhs = gtNewStructVal(structHnd, dstAddr);
GenTree* src = nullptr;
if (lhs->OperIs(GT_OBJ))
{
GenTreeObj* lhsObj = lhs->AsObj();
#if DEBUG
// Codegen for CpObj assumes that we cannot have a struct with GC pointers whose size is not a multiple
// of the register size. The EE currently does not allow this to ensure that GC pointers are aligned
// if the struct is stored in an array. Note that this restriction doesn't apply to stack-allocated objects:
// they are never stored in arrays. We should never get to this method with stack-allocated objects since they
// are never copied so we don't need to exclude them from the assert below.
// Let's assert it just to be safe.
ClassLayout* layout = lhsObj->GetLayout();
unsigned size = layout->GetSize();
assert((layout->GetGCPtrCount() == 0) || (roundUp(size, REGSIZE_BYTES) == size));
#endif
gtSetObjGcInfo(lhsObj);
}
if (srcAddr->OperGet() == GT_ADDR)
{
src = srcAddr->AsOp()->gtOp1;
}
else
{
src = gtNewOperNode(GT_IND, lhs->TypeGet(), srcAddr);
}
GenTree* result = gtNewBlkOpNode(lhs, src, isVolatile, true);
return result;
}
//------------------------------------------------------------------------
// FixupInitBlkValue: Fixup the init value for an initBlk operation
//
// Arguments:
// asgType - The type of assignment that the initBlk is being transformed into
//
// Return Value:
// Modifies the constant value on this node to be the appropriate "fill"
// value for the initblk.
//
// Notes:
// The initBlk MSIL instruction takes a byte value, which must be
// extended to the size of the assignment when an initBlk is transformed
// to an assignment of a primitive type.
// This performs the appropriate extension.
void GenTreeIntCon::FixupInitBlkValue(var_types asgType)
{
assert(varTypeIsIntegralOrI(asgType));
unsigned size = genTypeSize(asgType);
if (size > 1)
{
size_t cns = gtIconVal;
cns = cns & 0xFF;
cns |= cns << 8;
if (size >= 4)
{
cns |= cns << 16;
#ifdef TARGET_64BIT
if (size == 8)
{
cns |= cns << 32;
}
#endif // TARGET_64BIT
// Make the type match for evaluation types.
gtType = asgType;
// if we are initializing a GC type the value being assigned must be zero (null).
assert(!varTypeIsGC(asgType) || (cns == 0));
}
gtIconVal = cns;
}
}
//----------------------------------------------------------------------------
// UsesDivideByConstOptimized:
// returns true if rationalize will use the division by constant
// optimization for this node.
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
// Return Value:
// Return true iff the node is a GT_DIV,GT_UDIV, GT_MOD or GT_UMOD with
// an integer constant and we can perform the division operation using
// a reciprocal multiply or a shift operation.
//
bool GenTreeOp::UsesDivideByConstOptimized(Compiler* comp)
{
if (!comp->opts.OptimizationEnabled())
{
return false;
}
if (!OperIs(GT_DIV, GT_MOD, GT_UDIV, GT_UMOD))
{
return false;
}
#if defined(TARGET_ARM64)
if (OperIs(GT_MOD, GT_UMOD))
{
// MOD, UMOD not supported for ARM64
return false;
}
#endif // TARGET_ARM64
bool isSignedDivide = OperIs(GT_DIV, GT_MOD);
GenTree* dividend = gtGetOp1()->gtEffectiveVal(/*commaOnly*/ true);
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
#if !defined(TARGET_64BIT)
if (dividend->OperIs(GT_LONG))
{
return false;
}
#endif
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return false;
}
ssize_t divisorValue;
if (divisor->IsCnsIntOrI())
{
divisorValue = static_cast<ssize_t>(divisor->AsIntCon()->IconValue());
}
else
{
ValueNum vn = divisor->gtVNPair.GetLiberal();
if (comp->vnStore->IsVNConstant(vn))
{
divisorValue = comp->vnStore->CoercedConstantValue<ssize_t>(vn);
}
else
{
return false;
}
}
const var_types divType = TypeGet();
if (divisorValue == 0)
{
// x / 0 and x % 0 can't be optimized because they are required to throw an exception.
return false;
}
else if (isSignedDivide)
{
if (divisorValue == -1)
{
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
return false;
}
else if (isPow2(divisorValue))
{
return true;
}
}
else // unsigned divide
{
if (divType == TYP_INT)
{
// Clear up the upper 32 bits of the value, they may be set to 1 because constants
// are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
divisorValue &= UINT32_MAX;
}
size_t unsignedDivisorValue = (size_t)divisorValue;
if (isPow2(unsignedDivisorValue))
{
return true;
}
}
const bool isDiv = OperIs(GT_DIV, GT_UDIV);
if (isDiv)
{
if (isSignedDivide)
{
// If the divisor is the minimum representable integer value then the result is either 0 or 1
if ((divType == TYP_INT && divisorValue == INT_MIN) || (divType == TYP_LONG && divisorValue == INT64_MIN))
{
return true;
}
}
else
{
// If the divisor is greater or equal than 2^(N - 1) then the result is either 0 or 1
if (((divType == TYP_INT) && ((UINT32)divisorValue > (UINT32_MAX / 2))) ||
((divType == TYP_LONG) && ((UINT64)divisorValue > (UINT64_MAX / 2))))
{
return true;
}
}
}
// TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
if (!comp->opts.MinOpts() && ((divisorValue >= 3) || !isSignedDivide))
{
// All checks pass we can perform the division operation using a reciprocal multiply.
return true;
}
#endif
return false;
}
//------------------------------------------------------------------------
// CheckDivideByConstOptimized:
// Checks if we can use the division by constant optimization
// on this node
// and if so sets the flag GTF_DIV_BY_CNS_OPT and
// set GTF_DONT_CSE on the constant node
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
void GenTreeOp::CheckDivideByConstOptimized(Compiler* comp)
{
if (UsesDivideByConstOptimized(comp))
{
gtFlags |= GTF_DIV_BY_CNS_OPT;
// Now set DONT_CSE on the GT_CNS_INT divisor, note that
// with ValueNumbering we can have a non GT_CNS_INT divisior
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
if (divisor->OperIs(GT_CNS_INT))
{
divisor->gtFlags |= GTF_DONT_CSE;
}
}
}
//
//------------------------------------------------------------------------
// gtBlockOpInit: Initializes a BlkOp GenTree
//
// Arguments:
// result - an assignment node that is to be initialized.
// dst - the target (destination) we want to either initialize or copy to.
// src - the init value for InitBlk or the source struct for CpBlk/CpObj.
// isVolatile - specifies whether this node is a volatile memory operation.
//
// Assumptions:
// 'result' is an assignment that is newly constructed.
// If 'dst' is TYP_STRUCT, then it must be a block node or lclVar.
//
// Notes:
// This procedure centralizes all the logic to both enforce proper structure and
// to properly construct any InitBlk/CpBlk node.
void Compiler::gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile)
{
if (!result->OperIsBlkOp())
{
assert(dst->TypeGet() != TYP_STRUCT);
return;
}
/* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
* because of two reasons, first, is useless, second
* it introduces issues in liveness and also copying
* memory from an overlapping memory location is
* undefined both as per the ECMA standard and also
* the memcpy semantics specify that.
*
* NOTE: In this case we'll only detect the case for addr of a local
* and a local itself, any other complex expressions won't be
* caught.
*
* TODO-Cleanup: though having this logic is goodness (i.e. avoids self-assignment
* of struct vars very early), it was added because fgInterBlockLocalVarLiveness()
* isn't handling self-assignment of struct variables correctly. This issue may not
* surface if struct promotion is ON (which is the case on x86/arm). But still the
* fundamental issue exists that needs to be addressed.
*/
if (result->OperIsCopyBlkOp())
{
GenTree* currSrc = srcOrFillVal;
GenTree* currDst = dst;
if (currSrc->OperIsBlk() && (currSrc->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currSrc = currSrc->AsBlk()->Addr()->gtGetOp1();
}
if (currDst->OperIsBlk() && (currDst->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currDst = currDst->AsBlk()->Addr()->gtGetOp1();
}
if (currSrc->OperGet() == GT_LCL_VAR && currDst->OperGet() == GT_LCL_VAR &&
currSrc->AsLclVarCommon()->GetLclNum() == currDst->AsLclVarCommon()->GetLclNum())
{
// Make this a NOP
// TODO-Cleanup: probably doesn't matter, but could do this earlier and avoid creating a GT_ASG
result->gtBashToNOP();
return;
}
}
// Propagate all effect flags from children
result->gtFlags |= dst->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= result->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) | (srcOrFillVal->gtFlags & GTF_EXCEPT);
if (isVolatile)
{
result->gtFlags |= GTF_BLK_VOLATILE;
}
#ifdef FEATURE_SIMD
if (result->OperIsCopyBlkOp() && varTypeIsSIMD(srcOrFillVal))
{
// If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
// should be labeled as simd intrinsic related struct.
// This is done so that the morpher can transform any field accesses into
// intrinsics, thus avoiding conflicting access methods (fields vs. whole-register).
GenTree* src = srcOrFillVal;
if (src->OperIsIndir() && (src->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
src = src->AsIndir()->Addr()->gtGetOp1();
}
#ifdef FEATURE_HW_INTRINSICS
if ((src->OperGet() == GT_SIMD) || (src->OperGet() == GT_HWINTRINSIC))
#else
if (src->OperGet() == GT_SIMD)
#endif // FEATURE_HW_INTRINSICS
{
if (dst->OperIsBlk() && (dst->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
dst = dst->AsIndir()->Addr()->gtGetOp1();
}
if (dst->OperIsLocal() && varTypeIsStruct(dst))
{
setLclRelatedToSIMDIntrinsic(dst);
}
}
}
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// gtNewBlkOpNode: Creates a GenTree for a block (struct) assignment.
//
// Arguments:
// dst - The destination node: local var / block node.
// srcOrFillVall - The value to assign for CopyBlk, the integer "fill" for InitBlk
// isVolatile - Whether this is a volatile memory operation or not.
// isCopyBlock - True if this is a block copy (rather than a block init).
//
// Return Value:
// Returns the newly constructed and initialized block operation.
//
GenTree* Compiler::gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock)
{
assert(dst->OperIsBlk() || dst->OperIsLocal());
if (isCopyBlock)
{
if (srcOrFillVal->OperIsIndir() && (srcOrFillVal->gtGetOp1()->gtOper == GT_ADDR))
{
srcOrFillVal = srcOrFillVal->gtGetOp1()->gtGetOp1();
}
}
else
{
// InitBlk
assert(varTypeIsIntegral(srcOrFillVal));
if (varTypeIsStruct(dst))
{
if (!srcOrFillVal->IsIntegralConst(0))
{
srcOrFillVal = gtNewOperNode(GT_INIT_VAL, TYP_INT, srcOrFillVal);
}
}
}
GenTree* result = gtNewAssignNode(dst, srcOrFillVal);
gtBlockOpInit(result, dst, srcOrFillVal, isVolatile);
return result;
}
//------------------------------------------------------------------------
// gtNewPutArgReg: Creates a new PutArgReg node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created PutArgReg node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/armel, GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg)
{
assert(arg != nullptr);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A PUTARG_REG could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_PUTARG_REG) GenTreeMultiRegOp(GT_PUTARG_REG, type, arg, nullptr);
if (type == TYP_LONG)
{
node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg);
}
#else
node = gtNewOperNode(GT_PUTARG_REG, type, arg);
#endif
node->SetRegNum(argReg);
return node;
}
//------------------------------------------------------------------------
// gtNewBitCastNode: Creates a new BitCast node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created BitCast node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/arm, as GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg)
{
assert(arg != nullptr);
assert(type != TYP_STRUCT);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr);
#else
node = gtNewOperNode(GT_BITCAST, type, arg);
#endif
return node;
}
//------------------------------------------------------------------------
// gtNewAllocObjNode: Helper to create an object allocation node.
//
// Arguments:
// pResolvedToken - Resolved token for the object being allocated
// useParent - true iff the token represents a child of the object's class
//
// Return Value:
// Returns GT_ALLOCOBJ node that will be later morphed into an
// allocation helper call or local variable allocation on the stack.
//
// Node creation can fail for inlinees when the type described by pResolvedToken
// can't be represented in jitted code. If this happens, this method will return
// nullptr.
//
GenTreeAllocObj* Compiler::gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent)
{
const bool mustRestoreHandle = true;
bool* const pRuntimeLookup = nullptr;
bool usingReadyToRunHelper = false;
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
GenTree* opHandle = impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, useParent);
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP lookup = {};
if (opts.IsReadyToRun())
{
helper = CORINFO_HELP_READYTORUN_NEW;
CORINFO_LOOKUP_KIND* const pGenericLookupKind = nullptr;
usingReadyToRunHelper =
info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup);
}
#endif
if (!usingReadyToRunHelper)
{
if (opHandle == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return nullptr;
}
}
bool helperHasSideEffects;
CorInfoHelpFunc helperTemp =
info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd, &helperHasSideEffects);
if (!usingReadyToRunHelper)
{
helper = helperTemp;
}
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newfast call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate and return the new object for boxing
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
GenTreeAllocObj* allocObj =
gtNewAllocObjNode(helper, helperHasSideEffects, pResolvedToken->hClass, TYP_REF, opHandle);
#ifdef FEATURE_READYTORUN
if (usingReadyToRunHelper)
{
assert(lookup.addr != nullptr);
allocObj->gtEntryPoint = lookup;
}
#endif
return allocObj;
}
/*****************************************************************************
*
* Clones the given tree value and returns a copy of the given tree.
* If 'complexOK' is false, the cloning is only done provided the tree
* is not too complex (whatever that may mean);
* If 'complexOK' is true, we try slightly harder to clone the tree.
* In either case, NULL is returned if the tree cannot be cloned
*
* Note that there is the function gtCloneExpr() which does a more
* complete job if you can't handle this function failing.
*/
GenTree* Compiler::gtClone(GenTree* tree, bool complexOK)
{
GenTree* copy;
switch (tree->gtOper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy = gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = new (this, GT_CNS_INT)
GenTreeIntCon(tree->gtType, tree->AsIntCon()->gtIconVal, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
break;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
break;
case GT_LCL_VAR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVarCommon()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
break;
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = new (this, tree->OperGet())
GenTreeLclFld(tree->OperGet(), tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
break;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->gtType, tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
break;
default:
if (!complexOK)
{
return nullptr;
}
if (tree->gtOper == GT_FIELD)
{
GenTree* objp = nullptr;
if (tree->AsField()->GetFldObj() != nullptr)
{
objp = gtClone(tree->AsField()->GetFldObj(), false);
if (objp == nullptr)
{
return nullptr;
}
}
copy = gtNewFieldRef(tree->TypeGet(), tree->AsField()->gtFldHnd, objp, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
}
else if (tree->OperIs(GT_ADD, GT_SUB))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
if (op1->OperIsLeaf() && op2->OperIsLeaf())
{
op1 = gtClone(op1);
if (op1 == nullptr)
{
return nullptr;
}
op2 = gtClone(op2);
if (op2 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(tree->OperGet(), tree->TypeGet(), op1, op2);
}
else
{
return nullptr;
}
}
else if (tree->gtOper == GT_ADDR)
{
GenTree* op1 = gtClone(tree->AsOp()->gtOp1);
if (op1 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(GT_ADDR, tree->TypeGet(), op1);
}
else
{
return nullptr;
}
break;
}
copy->gtFlags |= tree->gtFlags & ~GTF_NODE_MASK;
#if defined(DEBUG)
copy->gtDebugFlags |= tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK;
#endif // defined(DEBUG)
return copy;
}
//------------------------------------------------------------------------
// gtCloneExpr: Create a copy of `tree`, adding flags `addFlags`, mapping
// local `varNum` to int constant `varVal` if it appears at
// the root, and mapping uses of local `deepVarNum` to constant
// `deepVarVal` if they occur beyond the root.
//
// Arguments:
// tree - GenTree to create a copy of
// addFlags - GTF_* flags to add to the copied tree nodes
// varNum - lclNum to replace at the root, or ~0 for no root replacement
// varVal - If replacing at root, replace local `varNum` with IntCns `varVal`
// deepVarNum - lclNum to replace uses of beyond the root, or ~0 for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Return Value:
// A copy of the given tree with the replacements and added flags specified.
//
// Notes:
// Top-level callers should generally call the overload that doesn't have
// the explicit `deepVarNum` and `deepVarVal` parameters; those are used in
// recursive invocations to avoid replacing defs.
GenTree* Compiler::gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal)
{
if (tree == nullptr)
{
return nullptr;
}
/* Figure out what kind of a node we have */
genTreeOps oper = tree->OperGet();
unsigned kind = tree->OperKind();
GenTree* copy;
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy =
gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = gtNewIconNode(tree->AsIntCon()->gtIconVal, tree->gtType);
#ifdef DEBUG
copy->AsIntCon()->gtTargetHandle = tree->AsIntCon()->gtTargetHandle;
#endif
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->AsIntCon()->gtFieldSeq = tree->AsIntCon()->gtFieldSeq;
}
goto DONE;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
goto DONE;
case GT_CNS_DBL:
copy = gtNewDconNode(tree->AsDblCon()->gtDconVal);
copy->gtType = tree->gtType; // keep the same type
goto DONE;
case GT_CNS_STR:
copy = gtNewSconNode(tree->AsStrCon()->gtSconCPX, tree->AsStrCon()->gtScpHnd);
goto DONE;
case GT_LCL_VAR:
if (tree->AsLclVarCommon()->GetLclNum() == varNum)
{
copy = gtNewIconNode(varVal, tree->gtType);
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
copy->LabelIndex(this);
}
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVar()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
copy->AsLclVarCommon()->SetSsaNum(tree->AsLclVarCommon()->GetSsaNum());
}
goto DONE;
case GT_LCL_FLD:
if (tree->AsLclFld()->GetLclNum() == varNum)
{
IMPL_LIMITATION("replacing GT_LCL_FLD with a constant");
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy =
new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
copy->gtFlags = tree->gtFlags;
}
goto DONE;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->TypeGet(), tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
goto DONE;
case GT_RET_EXPR:
// GT_RET_EXPR is unique node, that contains a link to a gtInlineCandidate node,
// that is part of another statement. We cannot clone both here and cannot
// create another GT_RET_EXPR that points to the same gtInlineCandidate.
NO_WAY("Cloning of GT_RET_EXPR node not supported");
goto DONE;
case GT_MEMORYBARRIER:
copy = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
goto DONE;
case GT_ARGPLACE:
copy = gtNewArgPlaceHolderNode(tree->gtType, tree->AsArgPlace()->gtArgPlaceClsHnd);
goto DONE;
case GT_FTN_ADDR:
copy = new (this, oper) GenTreeFptrVal(tree->gtType, tree->AsFptrVal()->gtFptrMethod);
#ifdef FEATURE_READYTORUN
copy->AsFptrVal()->gtEntryPoint = tree->AsFptrVal()->gtEntryPoint;
#endif
goto DONE;
case GT_CATCH_ARG:
case GT_NO_OP:
case GT_LABEL:
copy = new (this, oper) GenTree(oper, tree->gtType);
goto DONE;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_JMP:
copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1);
goto DONE;
case GT_LCL_VAR_ADDR:
copy = new (this, oper) GenTreeLclVar(oper, tree->TypeGet(), tree->AsLclVar()->GetLclNum());
goto DONE;
case GT_LCL_FLD_ADDR:
copy = new (this, oper)
GenTreeLclFld(oper, tree->TypeGet(), tree->AsLclFld()->GetLclNum(), tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
goto DONE;
default:
NO_WAY("Cloning of node not supported");
goto DONE;
}
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
/* If necessary, make sure we allocate a "fat" tree node */
CLANG_FORMAT_COMMENT_ANCHOR;
switch (oper)
{
/* These nodes sometimes get bashed to "fat" ones */
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
// In the implementation of gtNewLargeOperNode you have
// to give an oper that will create a small node,
// otherwise it asserts.
//
if (GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1,
tree->OperIsBinary() ? tree->AsOp()->gtOp2 : nullptr);
}
else // Always a large tree
{
if (tree->OperIsBinary())
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
else
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1);
}
}
break;
case GT_CAST:
copy = new (this, LargeOpOpcode())
GenTreeCast(tree->TypeGet(), tree->AsCast()->CastOp(), tree->IsUnsigned(),
tree->AsCast()->gtCastType DEBUGARG(/*largeNode*/ TRUE));
break;
case GT_INDEX:
{
GenTreeIndex* asInd = tree->AsIndex();
copy = new (this, GT_INDEX)
GenTreeIndex(asInd->TypeGet(), asInd->Arr(), asInd->Index(), asInd->gtIndElemSize);
copy->AsIndex()->gtStructElemClass = asInd->gtStructElemClass;
}
break;
case GT_INDEX_ADDR:
{
GenTreeIndexAddr* asIndAddr = tree->AsIndexAddr();
copy = new (this, GT_INDEX_ADDR)
GenTreeIndexAddr(asIndAddr->Arr(), asIndAddr->Index(), asIndAddr->gtElemType,
asIndAddr->gtStructElemClass, asIndAddr->gtElemSize, asIndAddr->gtLenOffset,
asIndAddr->gtElemOffset);
copy->AsIndexAddr()->gtIndRngFailBB = asIndAddr->gtIndRngFailBB;
}
break;
case GT_ALLOCOBJ:
{
GenTreeAllocObj* asAllocObj = tree->AsAllocObj();
copy = new (this, GT_ALLOCOBJ)
GenTreeAllocObj(tree->TypeGet(), asAllocObj->gtNewHelper, asAllocObj->gtHelperHasSideEffects,
asAllocObj->gtAllocObjClsHnd, asAllocObj->gtOp1);
#ifdef FEATURE_READYTORUN
copy->AsAllocObj()->gtEntryPoint = asAllocObj->gtEntryPoint;
#endif
}
break;
case GT_RUNTIMELOOKUP:
{
GenTreeRuntimeLookup* asRuntimeLookup = tree->AsRuntimeLookup();
copy = new (this, GT_RUNTIMELOOKUP)
GenTreeRuntimeLookup(asRuntimeLookup->gtHnd, asRuntimeLookup->gtHndType, asRuntimeLookup->gtOp1);
}
break;
case GT_ARR_LENGTH:
copy = gtNewArrLen(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsArrLen()->ArrLenOffset(), nullptr);
break;
case GT_ARR_INDEX:
copy = new (this, GT_ARR_INDEX)
GenTreeArrIndex(tree->TypeGet(),
gtCloneExpr(tree->AsArrIndex()->ArrObj(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrIndex()->IndexExpr(), addFlags, deepVarNum, deepVarVal),
tree->AsArrIndex()->gtCurrDim, tree->AsArrIndex()->gtArrRank,
tree->AsArrIndex()->gtArrElemType);
break;
case GT_QMARK:
copy = new (this, GT_QMARK)
GenTreeQmark(tree->TypeGet(), tree->AsOp()->gtGetOp1(), tree->AsOp()->gtGetOp2()->AsColon());
break;
case GT_OBJ:
copy =
new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->AsObj()->Addr(), tree->AsObj()->GetLayout());
break;
case GT_BLK:
copy = new (this, GT_BLK)
GenTreeBlk(GT_BLK, tree->TypeGet(), tree->AsBlk()->Addr(), tree->AsBlk()->GetLayout());
break;
case GT_FIELD:
copy = new (this, GT_FIELD) GenTreeField(tree->TypeGet(), tree->AsField()->GetFldObj(),
tree->AsField()->gtFldHnd, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
break;
case GT_BOX:
copy = new (this, GT_BOX)
GenTreeBox(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsBox()->gtAsgStmtWhenInlinedBoxValue,
tree->AsBox()->gtCopyStmtWhenInlinedBoxValue);
break;
case GT_INTRINSIC:
copy = new (this, GT_INTRINSIC)
GenTreeIntrinsic(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2,
tree->AsIntrinsic()->gtIntrinsicName, tree->AsIntrinsic()->gtMethodHandle);
#ifdef FEATURE_READYTORUN
copy->AsIntrinsic()->gtEntryPoint = tree->AsIntrinsic()->gtEntryPoint;
#endif
break;
case GT_BOUNDS_CHECK:
copy = new (this, GT_BOUNDS_CHECK)
GenTreeBoundsChk(tree->AsBoundsChk()->GetIndex(), tree->AsBoundsChk()->GetArrayLength(),
tree->AsBoundsChk()->gtThrowKind);
copy->AsBoundsChk()->gtIndRngFailBB = tree->AsBoundsChk()->gtIndRngFailBB;
break;
case GT_LEA:
{
GenTreeAddrMode* addrModeOp = tree->AsAddrMode();
copy = new (this, GT_LEA)
GenTreeAddrMode(addrModeOp->TypeGet(), addrModeOp->Base(), addrModeOp->Index(), addrModeOp->gtScale,
static_cast<unsigned>(addrModeOp->Offset()));
}
break;
case GT_COPY:
case GT_RELOAD:
{
copy = new (this, oper) GenTreeCopyOrReload(oper, tree->TypeGet(), tree->gtGetOp1());
}
break;
default:
assert(!GenTree::IsExOp(tree->OperKind()) && tree->OperIsSimple());
// We're in the SimpleOp case, so it's always unary or binary.
if (GenTree::OperIsUnary(tree->OperGet()))
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, /*doSimplifications*/ false);
}
else
{
assert(GenTree::OperIsBinary(tree->OperGet()));
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
break;
}
// Some flags are conceptually part of the gtOper, and should be copied immediately.
if (tree->gtOverflowEx())
{
copy->gtFlags |= GTF_OVERFLOW;
}
if (tree->AsOp()->gtOp1)
{
if (tree->gtOper == GT_ASG)
{
// Don't replace varNum if it appears as the LHS of an assign.
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, -1, 0, deepVarNum, deepVarVal);
}
else
{
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, deepVarNum, deepVarVal);
}
}
if (tree->gtGetOp2IfPresent())
{
copy->AsOp()->gtOp2 = gtCloneExpr(tree->AsOp()->gtOp2, addFlags, deepVarNum, deepVarVal);
}
/* Flags */
addFlags |= tree->gtFlags;
// Copy any node annotations, if necessary.
switch (tree->gtOper)
{
case GT_STOREIND:
case GT_IND:
case GT_OBJ:
case GT_STORE_OBJ:
{
ArrayInfo arrInfo;
if (!tree->AsIndir()->gtOp1->OperIs(GT_INDEX_ADDR) && TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
GetArrayInfoMap()->Set(copy, arrInfo);
}
}
break;
default:
break;
}
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
// Effects flags propagate upwards.
if (copy->AsOp()->gtOp1 != nullptr)
{
copy->gtFlags |= (copy->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT);
}
if (copy->gtGetOp2IfPresent() != nullptr)
{
copy->gtFlags |= (copy->gtGetOp2()->gtFlags & GTF_ALL_EFFECT);
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
// We can't safely clone calls that have GT_RET_EXPRs via gtCloneExpr.
// You must use gtCloneCandidateCall for these calls (and then do appropriate other fixup)
if (tree->AsCall()->IsInlineCandidate() || tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
NO_WAY("Cloning of calls with associated GT_RET_EXPR nodes is not supported");
}
copy = gtCloneExprCallHelper(tree->AsCall(), addFlags, deepVarNum, deepVarVal);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
copy = new (this, GT_SIMD)
GenTreeSIMD(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsSIMD()),
tree->AsSIMD()->GetSIMDIntrinsicId(), tree->AsSIMD()->GetSimdBaseJitType(),
tree->AsSIMD()->GetSimdSize());
goto CLONE_MULTIOP_OPERANDS;
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
copy = new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()),
tree->AsHWIntrinsic()->GetHWIntrinsicId(),
tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize(),
tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic());
copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType());
goto CLONE_MULTIOP_OPERANDS;
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
CLONE_MULTIOP_OPERANDS:
for (GenTree** use : copy->AsMultiOp()->UseEdges())
{
*use = gtCloneExpr(*use, addFlags, deepVarNum, deepVarVal);
}
break;
#endif
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
inds[dim] = gtCloneExpr(arrElem->gtArrInds[dim], addFlags, deepVarNum, deepVarVal);
}
copy = new (this, GT_ARR_ELEM)
GenTreeArrElem(arrElem->TypeGet(), gtCloneExpr(arrElem->gtArrObj, addFlags, deepVarNum, deepVarVal),
arrElem->gtArrRank, arrElem->gtArrElemSize, arrElem->gtArrElemType, &inds[0]);
}
break;
case GT_ARR_OFFSET:
{
copy = new (this, GT_ARR_OFFSET)
GenTreeArrOffs(tree->TypeGet(),
gtCloneExpr(tree->AsArrOffs()->gtOffset, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtIndex, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtArrObj, addFlags, deepVarNum, deepVarVal),
tree->AsArrOffs()->gtCurrDim, tree->AsArrOffs()->gtArrRank,
tree->AsArrOffs()->gtArrElemType);
}
break;
case GT_PHI:
{
copy = new (this, GT_PHI) GenTreePhi(tree->TypeGet());
GenTreePhi::Use** prevUse = ©->AsPhi()->gtUses;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
*prevUse = new (this, CMK_ASTNode)
GenTreePhi::Use(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal), *prevUse);
prevUse = &((*prevUse)->NextRef());
}
}
break;
case GT_FIELD_LIST:
copy = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
copy->AsFieldList()->AddField(this, gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal),
use.GetOffset(), use.GetType());
}
break;
case GT_CMPXCHG:
copy = new (this, GT_CMPXCHG)
GenTreeCmpXchg(tree->TypeGet(),
gtCloneExpr(tree->AsCmpXchg()->gtOpLocation, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpValue, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpComparand, addFlags, deepVarNum, deepVarVal));
break;
case GT_STORE_DYN_BLK:
copy = new (this, oper)
GenTreeStoreDynBlk(gtCloneExpr(tree->AsStoreDynBlk()->Addr(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->Data(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->gtDynamicSize, addFlags, deepVarNum, deepVarVal));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
NO_WAY("unexpected operator");
}
DONE:
// If it has a zero-offset field seq, copy annotation.
if (tree->TypeGet() == TYP_BYREF)
{
FieldSeqNode* fldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &fldSeq))
{
fgAddFieldSeqForZeroOffset(copy, fldSeq);
}
}
copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
/* Compute the flags for the copied node. Note that we can do this only
if we didnt gtFoldExpr(copy) */
if (copy->gtOper == oper)
{
addFlags |= tree->gtFlags;
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
copy->gtFlags |= addFlags;
// Update side effect flags since they may be different from the source side effect flags.
// For example, we may have replaced some locals with constants and made indirections non-throwing.
gtUpdateNodeSideEffects(copy);
}
/* GTF_COLON_COND should be propagated from 'tree' to 'copy' */
copy->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
#if defined(DEBUG)
// Non-node debug flags should be propagated from 'tree' to 'copy'
copy->gtDebugFlags |= (tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
/* Make sure to copy back fields that may have been initialized */
copy->CopyRawCosts(tree);
copy->gtRsvdRegs = tree->gtRsvdRegs;
copy->CopyReg(tree);
return copy;
}
//------------------------------------------------------------------------
// gtCloneExprCallHelper: clone a call tree
//
// Notes:
// Do not invoke this method directly, instead call either gtCloneExpr
// or gtCloneCandidateCall, as appropriate.
//
// Arguments:
// tree - the call to clone
// addFlags - GTF_* flags to add to the copied tree nodes
// deepVarNum - lclNum to replace uses of beyond the root, or BAD_VAR_NUM for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneExprCallHelper(GenTreeCall* tree,
GenTreeFlags addFlags,
unsigned deepVarNum,
int deepVarVal)
{
GenTreeCall* copy = new (this, GT_CALL) GenTreeCall(tree->TypeGet());
if (tree->gtCallThisArg == nullptr)
{
copy->gtCallThisArg = nullptr;
}
else
{
copy->gtCallThisArg =
gtNewCallArgs(gtCloneExpr(tree->gtCallThisArg->GetNode(), addFlags, deepVarNum, deepVarVal));
}
copy->gtCallMoreFlags = tree->gtCallMoreFlags;
copy->gtCallArgs = nullptr;
copy->gtCallLateArgs = nullptr;
GenTreeCall::Use** argsTail = ©->gtCallArgs;
for (GenTreeCall::Use& use : tree->Args())
{
*argsTail = gtNewCallArgs(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal));
argsTail = &((*argsTail)->NextRef());
}
argsTail = ©->gtCallLateArgs;
for (GenTreeCall::Use& use : tree->LateArgs())
{
*argsTail = gtNewCallArgs(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal));
argsTail = &((*argsTail)->NextRef());
}
// The call sig comes from the EE and doesn't change throughout the compilation process, meaning
// we only really need one physical copy of it. Therefore a shallow pointer copy will suffice.
// (Note that this still holds even if the tree we are cloning was created by an inlinee compiler,
// because the inlinee still uses the inliner's memory allocator anyway.)
INDEBUG(copy->callSig = tree->callSig;)
// The tail call info does not change after it is allocated, so for the same reasons as above
// a shallow copy suffices.
copy->tailCallInfo = tree->tailCallInfo;
copy->gtRetClsHnd = tree->gtRetClsHnd;
copy->gtControlExpr = gtCloneExpr(tree->gtControlExpr, addFlags, deepVarNum, deepVarVal);
copy->gtStubCallStubAddr = tree->gtStubCallStubAddr;
/* Copy the union */
if (tree->gtCallType == CT_INDIRECT)
{
copy->gtCallCookie =
tree->gtCallCookie ? gtCloneExpr(tree->gtCallCookie, addFlags, deepVarNum, deepVarVal) : nullptr;
copy->gtCallAddr = tree->gtCallAddr ? gtCloneExpr(tree->gtCallAddr, addFlags, deepVarNum, deepVarVal) : nullptr;
}
else
{
copy->gtCallMethHnd = tree->gtCallMethHnd;
copy->gtInlineCandidateInfo = tree->gtInlineCandidateInfo;
}
copy->gtCallType = tree->gtCallType;
copy->gtReturnType = tree->gtReturnType;
if (tree->fgArgInfo)
{
// Create and initialize the fgArgInfo for our copy of the call tree
copy->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(copy, tree);
}
else
{
copy->fgArgInfo = nullptr;
}
#if FEATURE_MULTIREG_RET
copy->gtReturnTypeDesc = tree->gtReturnTypeDesc;
#endif
#ifdef FEATURE_READYTORUN
copy->setEntryPoint(tree->gtEntryPoint);
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
copy->gtInlineObservation = tree->gtInlineObservation;
copy->gtRawILOffset = tree->gtRawILOffset;
copy->gtInlineContext = tree->gtInlineContext;
#endif
copy->CopyOtherRegFlags(tree);
// We keep track of the number of no return calls, so if we've cloned
// one of these, update the tracking.
//
if (tree->IsNoReturn())
{
assert(copy->IsNoReturn());
setMethodHasNoReturnCalls();
}
return copy;
}
//------------------------------------------------------------------------
// gtCloneCandidateCall: clone a call that is an inline or guarded
// devirtualization candidate (~ any call that can have a GT_RET_EXPR)
//
// Notes:
// If the call really is a candidate, the caller must take additional steps
// after cloning to re-establish candidate info and the relationship between
// the candidate and any associated GT_RET_EXPR.
//
// Arguments:
// call - the call to clone
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneCandidateCall(GenTreeCall* call)
{
assert(call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate());
GenTreeCall* result = gtCloneExprCallHelper(call);
// There is some common post-processing in gtCloneExpr that we reproduce
// here, for the fields that make sense for candidate calls.
result->gtFlags |= call->gtFlags;
#if defined(DEBUG)
result->gtDebugFlags |= (call->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
result->CopyReg(call);
return result;
}
//------------------------------------------------------------------------
// gtUpdateSideEffects: Update the side effects of a tree and its ancestors
//
// Arguments:
// stmt - The tree's statement
// tree - Tree to update the side effects for
//
// Note: If tree's order hasn't been established, the method updates side effect
// flags on all statement's nodes.
void Compiler::gtUpdateSideEffects(Statement* stmt, GenTree* tree)
{
if (fgStmtListThreaded)
{
gtUpdateTreeAncestorsSideEffects(tree);
}
else
{
gtUpdateStmtSideEffects(stmt);
}
}
//------------------------------------------------------------------------
// gtUpdateTreeAncestorsSideEffects: Update the side effects of a tree and its ancestors
// when statement order has been established.
//
// Arguments:
// tree - Tree to update the side effects for
//
void Compiler::gtUpdateTreeAncestorsSideEffects(GenTree* tree)
{
assert(fgStmtListThreaded);
while (tree != nullptr)
{
gtUpdateNodeSideEffects(tree);
tree = tree->gtGetParent(nullptr);
}
}
//------------------------------------------------------------------------
// gtUpdateStmtSideEffects: Update the side effects for statement tree nodes.
//
// Arguments:
// stmt - The statement to update side effects on
//
void Compiler::gtUpdateStmtSideEffects(Statement* stmt)
{
fgWalkTree(stmt->GetRootNodePointer(), fgUpdateSideEffectsPre, fgUpdateSideEffectsPost);
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffects: Update the side effects based on the node operation.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
// The caller of this method is expected to update the flags based on the children's flags.
//
void Compiler::gtUpdateNodeOperSideEffects(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
tree->gtFlags &= ~GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
tree->gtFlags &= ~GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffectsPost: Update the side effects based on the node operation,
// in the post-order visit of a tree walk. It is expected that the pre-order visit cleared
// the bits, so the post-order visit only sets them. This is important for binary nodes
// where one child already may have set the GTF_EXCEPT bit. Note that `SetIndirExceptionFlags`
// looks at its child, which is why we need to do this in a bottom-up walk.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_ASG, GTF_CALL, and GTF_EXCEPT flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeOperSideEffectsPost(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeSideEffects: Update the side effects based on the node operation and
// children's side efects.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeSideEffects(GenTree* tree)
{
gtUpdateNodeOperSideEffects(tree);
tree->VisitOperands([tree](GenTree* operand) -> GenTree::VisitResult {
tree->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
return GenTree::VisitResult::Continue;
});
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPre: Update the side effects based on the tree operation.
// The pre-visit walk clears GTF_ASG, GTF_CALL, and GTF_EXCEPT; the post-visit walk sets
// the bits as necessary.
//
// Arguments:
// pTree - Pointer to the tree to update the side effects
// fgWalkPre - Walk data
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPre(GenTree** pTree, fgWalkData* fgWalkPre)
{
GenTree* tree = *pTree;
tree->gtFlags &= ~(GTF_ASG | GTF_CALL | GTF_EXCEPT);
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPost: Update the side effects of the node and parent based on the tree's flags.
//
// Arguments:
// pTree - Pointer to the tree
// fgWalkPost - Walk data
//
// Notes:
// The routine is used for updating the stale side effect flags for ancestor
// nodes starting from treeParent up to the top-level stmt expr.
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPost(GenTree** pTree, fgWalkData* fgWalkPost)
{
GenTree* tree = *pTree;
// Update the node's side effects first.
fgWalkPost->compiler->gtUpdateNodeOperSideEffectsPost(tree);
// If this node is an indir or array length, and it doesn't have the GTF_EXCEPT bit set, we
// set the GTF_IND_NONFAULTING bit. This needs to be done after all children, and this node, have
// been processed.
if (tree->OperIsIndirOrArrLength() && ((tree->gtFlags & GTF_EXCEPT) == 0))
{
tree->gtFlags |= GTF_IND_NONFAULTING;
}
// Then update the parent's side effects based on this node.
GenTree* parent = fgWalkPost->parent;
if (parent != nullptr)
{
parent->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
}
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// gtGetThisArg: Return this pointer node for the call.
//
// Arguments:
// call - the call node with a this argument.
//
// Return value:
// the this pointer node.
//
GenTree* Compiler::gtGetThisArg(GenTreeCall* call)
{
assert(call->gtCallThisArg != nullptr);
GenTree* thisArg = call->gtCallThisArg->GetNode();
if (!thisArg->OperIs(GT_ASG))
{
if ((thisArg->gtFlags & GTF_LATE_ARG) == 0)
{
return thisArg;
}
}
assert(call->gtCallLateArgs != nullptr);
unsigned argNum = 0;
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, argNum);
GenTree* result = thisArgTabEntry->GetNode();
// Assert if we used DEBUG_DESTROY_NODE.
assert(result->gtOper != GT_COUNT);
return result;
}
bool GenTree::gtSetFlags() const
{
//
// When FEATURE_SET_FLAGS (TARGET_ARM) is active the method returns true
// when the gtFlags has the flag GTF_SET_FLAGS set
// otherwise the architecture will be have instructions that typically set
// the flags and this method will return true.
//
// Exceptions: GT_IND (load/store) is not allowed to set the flags
// and on XARCH the GT_MUL/GT_DIV and all overflow instructions
// do not set the condition flags
//
// Precondition we have a GTK_SMPOP
//
if (!varTypeIsIntegralOrI(TypeGet()) && (TypeGet() != TYP_VOID))
{
return false;
}
if (((gtFlags & GTF_SET_FLAGS) != 0) && (gtOper != GT_IND))
{
// GTF_SET_FLAGS is not valid on GT_IND and is overlaid with GTF_NONFAULTING_IND
return true;
}
else
{
return false;
}
}
bool GenTree::gtRequestSetFlags()
{
bool result = false;
#if FEATURE_SET_FLAGS
// This method is a Nop unless FEATURE_SET_FLAGS is defined
// In order to set GTF_SET_FLAGS
// we must have a GTK_SMPOP
// and we have a integer or machine size type (not floating point or TYP_LONG on 32-bit)
//
if (!OperIsSimple())
return false;
if (!varTypeIsIntegralOrI(TypeGet()))
return false;
switch (gtOper)
{
case GT_IND:
case GT_ARR_LENGTH:
// These will turn into simple load from memory instructions
// and we can't force the setting of the flags on load from memory
break;
case GT_MUL:
case GT_DIV:
// These instructions don't set the flags (on x86/x64)
//
break;
default:
// Otherwise we can set the flags for this gtOper
// and codegen must set the condition flags.
//
gtFlags |= GTF_SET_FLAGS;
result = true;
break;
}
#endif // FEATURE_SET_FLAGS
// Codegen for this tree must set the condition flags if
// this method returns true.
//
return result;
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator()
: m_advance(nullptr), m_node(nullptr), m_edge(nullptr), m_statePtr(nullptr), m_state(-1)
{
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
: m_advance(nullptr), m_node(node), m_edge(nullptr), m_statePtr(nullptr), m_state(0)
{
assert(m_node != nullptr);
// NOTE: the switch statement below must be updated when introducing new nodes.
switch (m_node->OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
m_state = -1;
return;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
case GT_RETURNTRAP:
m_edge = &m_node->AsUnOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
return;
// Unary operators with an optional operand
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
if (m_node->AsUnOp()->gtOp1 == nullptr)
{
assert(m_node->NullOp1Legal());
m_state = -1;
}
else
{
m_edge = &m_node->AsUnOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
return;
// Variadic nodes
#ifdef FEATURE_SIMD
case GT_SIMD:
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
SetEntryStateForMultiOp();
return;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// LEA, which may have no first operand
case GT_LEA:
if (m_node->AsAddrMode()->gtOp1 == nullptr)
{
m_edge = &m_node->AsAddrMode()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else
{
SetEntryStateForBinOp();
}
return;
// Special nodes
case GT_FIELD_LIST:
m_statePtr = m_node->AsFieldList()->Uses().GetHead();
m_advance = &GenTreeUseEdgeIterator::AdvanceFieldList;
AdvanceFieldList();
return;
case GT_PHI:
m_statePtr = m_node->AsPhi()->gtUses;
m_advance = &GenTreeUseEdgeIterator::AdvancePhi;
AdvancePhi();
return;
case GT_CMPXCHG:
m_edge = &m_node->AsCmpXchg()->gtOpLocation;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceCmpXchg;
return;
case GT_ARR_ELEM:
m_edge = &m_node->AsArrElem()->gtArrObj;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrElem;
return;
case GT_ARR_OFFSET:
m_edge = &m_node->AsArrOffs()->gtOffset;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrOffset;
return;
case GT_STORE_DYN_BLK:
m_edge = &m_node->AsStoreDynBlk()->Addr();
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceStoreDynBlk;
return;
case GT_CALL:
AdvanceCall<CALL_INSTANCE>();
return;
// Binary nodes
default:
assert(m_node->OperIsBinary());
SetEntryStateForBinOp();
return;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCmpXchg: produces the next operand of a CmpXchg node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceCmpXchg()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsCmpXchg()->gtOpValue;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsCmpXchg()->gtOpComparand;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrElem: produces the next operand of a ArrElem node and advances the state.
//
// Because these nodes are variadic, this function uses `m_state` to index into the list of array indices.
//
void GenTreeUseEdgeIterator::AdvanceArrElem()
{
if (m_state < m_node->AsArrElem()->gtArrRank)
{
m_edge = &m_node->AsArrElem()->gtArrInds[m_state];
assert(*m_edge != nullptr);
m_state++;
}
else
{
m_state = -1;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrOffset: produces the next operand of a ArrOffset node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceArrOffset()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsArrOffs()->gtIndex;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsArrOffs()->gtArrObj;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceStoreDynBlk: produces the next operand of a StoreDynBlk node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceStoreDynBlk()
{
GenTreeStoreDynBlk* const dynBlock = m_node->AsStoreDynBlk();
switch (m_state)
{
case 0:
m_edge = &dynBlock->Data();
m_state = 1;
break;
case 1:
m_edge = &dynBlock->gtDynamicSize;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceFieldList: produces the next operand of a FieldList node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceFieldList()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreeFieldList::Use* currentUse = static_cast<GenTreeFieldList::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvancePhi: produces the next operand of a Phi node and advances the state.
//
void GenTreeUseEdgeIterator::AdvancePhi()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreePhi::Use* currentUse = static_cast<GenTreePhi::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceBinOp: produces the next operand of a binary node and advances the state.
//
// This function must be instantiated s.t. `ReverseOperands` is `true` iff the node is marked with the
// `GTF_REVERSE_OPS` flag.
//
template <bool ReverseOperands>
void GenTreeUseEdgeIterator::AdvanceBinOp()
{
assert(ReverseOperands == ((m_node->gtFlags & GTF_REVERSE_OPS) != 0));
m_edge = !ReverseOperands ? &m_node->AsOp()->gtOp2 : &m_node->AsOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForBinOp: produces the first operand of a binary node and chooses
// the appropriate advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForBinOp()
{
assert(m_node != nullptr);
assert(m_node->OperIsBinary());
GenTreeOp* const node = m_node->AsOp();
if (node->gtOp2 == nullptr)
{
assert(node->gtOp1 != nullptr);
assert(node->NullOp2Legal());
m_edge = &node->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else if ((node->gtFlags & GTF_REVERSE_OPS) != 0)
{
m_edge = &m_node->AsOp()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<true>;
}
else
{
m_edge = &m_node->AsOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<false>;
}
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceMultiOp: produces the next operand of a multi-op node and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// incrementing the "m_edge" pointer, unless the end, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
m_edge++;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceReversedMultiOp: produces the next operand of a multi-op node
// marked with GTF_REVRESE_OPS and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// decrementing the "m_edge" pointer, unless the beginning, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceReversedMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
assert((m_node->AsMultiOp()->GetOperandCount() == 2) && m_node->IsReverseOp());
m_edge--;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForMultiOp: produces the first operand of a multi-op node and sets the
// required advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForMultiOp()
{
size_t operandCount = m_node->AsMultiOp()->GetOperandCount();
if (operandCount == 0)
{
Terminate();
}
else
{
if (m_node->IsReverseOp())
{
assert(operandCount == 2);
m_edge = m_node->AsMultiOp()->GetOperandArray() + 1;
m_statePtr = m_node->AsMultiOp()->GetOperandArray() - 1;
m_advance = &GenTreeUseEdgeIterator::AdvanceReversedMultiOp;
}
else
{
m_edge = m_node->AsMultiOp()->GetOperandArray();
m_statePtr = m_node->AsMultiOp()->GetOperandArray(operandCount);
m_advance = &GenTreeUseEdgeIterator::AdvanceMultiOp;
}
}
}
#endif
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCall: produces the next operand of a call node and advances the state.
//
// This function is a bit tricky: in order to avoid doing unnecessary work, it is instantiated with the
// state number the iterator will be in when it is called. For example, `AdvanceCall<CALL_INSTANCE>`
// is the instantiation used when the iterator is at the `CALL_INSTANCE` state (i.e. the entry state).
// This sort of templating allows each state to avoid processing earlier states without unnecessary
// duplication of code.
//
// Note that this method expands the argument lists (`gtCallArgs` and `gtCallLateArgs`) into their
// component operands.
//
template <int state>
void GenTreeUseEdgeIterator::AdvanceCall()
{
GenTreeCall* const call = m_node->AsCall();
switch (state)
{
case CALL_INSTANCE:
m_statePtr = call->gtCallArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ARGS>;
if (call->gtCallThisArg != nullptr)
{
m_edge = &call->gtCallThisArg->NodeRef();
return;
}
FALLTHROUGH;
case CALL_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_statePtr = call->gtCallLateArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_LATE_ARGS>;
FALLTHROUGH;
case CALL_LATE_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_CONTROL_EXPR>;
FALLTHROUGH;
case CALL_CONTROL_EXPR:
if (call->gtControlExpr != nullptr)
{
if (call->gtCallType == CT_INDIRECT)
{
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_COOKIE>;
}
else
{
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
m_edge = &call->gtControlExpr;
return;
}
else if (call->gtCallType != CT_INDIRECT)
{
m_state = -1;
return;
}
FALLTHROUGH;
case CALL_COOKIE:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ADDRESS>;
if (call->gtCallCookie != nullptr)
{
m_edge = &call->gtCallCookie;
return;
}
FALLTHROUGH;
case CALL_ADDRESS:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::Terminate;
if (call->gtCallAddr != nullptr)
{
m_edge = &call->gtCallAddr;
}
return;
default:
unreached();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::Terminate: advances the iterator to the terminal state.
//
void GenTreeUseEdgeIterator::Terminate()
{
m_state = -1;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::operator++: advances the iterator to the next operand.
//
GenTreeUseEdgeIterator& GenTreeUseEdgeIterator::operator++()
{
// If we've reached the terminal state, do nothing.
if (m_state != -1)
{
(this->*m_advance)();
}
return *this;
}
GenTreeUseEdgeIterator GenTree::UseEdgesBegin()
{
return GenTreeUseEdgeIterator(this);
}
GenTreeUseEdgeIterator GenTree::UseEdgesEnd()
{
return GenTreeUseEdgeIterator();
}
IteratorPair<GenTreeUseEdgeIterator> GenTree::UseEdges()
{
return MakeIteratorPair(UseEdgesBegin(), UseEdgesEnd());
}
GenTreeOperandIterator GenTree::OperandsBegin()
{
return GenTreeOperandIterator(this);
}
GenTreeOperandIterator GenTree::OperandsEnd()
{
return GenTreeOperandIterator();
}
IteratorPair<GenTreeOperandIterator> GenTree::Operands()
{
return MakeIteratorPair(OperandsBegin(), OperandsEnd());
}
bool GenTree::Precedes(GenTree* other)
{
assert(other != nullptr);
for (GenTree* node = gtNext; node != nullptr; node = node->gtNext)
{
if (node == other)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------------
// SetIndirExceptionFlags : Set GTF_EXCEPT and GTF_IND_NONFAULTING flags as appropriate
// on an indirection or an array length node.
//
// Arguments:
// comp - compiler instance
//
void GenTree::SetIndirExceptionFlags(Compiler* comp)
{
assert(OperIsIndirOrArrLength());
if (OperMayThrow(comp))
{
gtFlags |= GTF_EXCEPT;
return;
}
GenTree* addr = nullptr;
if (OperIsIndir())
{
addr = AsIndir()->Addr();
}
else
{
assert(gtOper == GT_ARR_LENGTH);
addr = AsArrLen()->ArrRef();
}
if ((addr->gtFlags & GTF_EXCEPT) != 0)
{
gtFlags |= GTF_EXCEPT;
}
else
{
gtFlags &= ~GTF_EXCEPT;
gtFlags |= GTF_IND_NONFAULTING;
}
}
#ifdef DEBUG
/* static */ int GenTree::gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags)
{
int charsDisplayed = 11; // 11 is the "baseline" number of flag characters displayed
printf("%c", (flags & GTF_ASG) ? 'A' : (IsContained(flags) ? 'c' : '-'));
printf("%c", (flags & GTF_CALL) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-');
printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set
(flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-'
printf("%c", (flags & GTF_COLON_COND) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-');
#if FEATURE_SET_FLAGS
printf("%c", (flags & GTF_SET_FLAGS) ? 'S' : '-');
++charsDisplayed;
#endif
printf("%c", (flags & GTF_LATE_ARG) ? 'L' : '-');
printf("%c", (flags & GTF_SPILLED) ? 'z' : (flags & GTF_SPILL) ? 'Z' : '-');
return charsDisplayed;
}
#ifdef TARGET_X86
inline const char* GetCallConvName(CorInfoCallConvExtension callConv)
{
switch (callConv)
{
case CorInfoCallConvExtension::Managed:
return "Managed";
case CorInfoCallConvExtension::C:
return "C";
case CorInfoCallConvExtension::Stdcall:
return "Stdcall";
case CorInfoCallConvExtension::Thiscall:
return "Thiscall";
case CorInfoCallConvExtension::Fastcall:
return "Fastcall";
case CorInfoCallConvExtension::CMemberFunction:
return "CMemberFunction";
case CorInfoCallConvExtension::StdcallMemberFunction:
return "StdcallMemberFunction";
case CorInfoCallConvExtension::FastcallMemberFunction:
return "FastcallMemberFunction";
default:
return "UnknownCallConv";
}
}
#endif // TARGET_X86
/*****************************************************************************/
void Compiler::gtDispNodeName(GenTree* tree)
{
/* print the node name */
const char* name;
assert(tree);
if (tree->gtOper < GT_COUNT)
{
name = GenTree::OpName(tree->OperGet());
}
else
{
name = "<ERROR>";
}
char buf[32];
char* bufp = &buf[0];
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
sprintf_s(bufp, sizeof(buf), " %s(h)%c", name, 0);
}
else if (tree->gtOper == GT_PUTARG_STK)
{
sprintf_s(bufp, sizeof(buf), " %s [+0x%02x]%c", name, tree->AsPutArgStk()->getArgOffset(), 0);
}
else if (tree->gtOper == GT_CALL)
{
const char* callType = "CALL";
const char* gtfType = "";
const char* ctType = "";
char gtfTypeBuf[100];
if (tree->AsCall()->gtCallType == CT_USER_FUNC)
{
if (tree->AsCall()->IsVirtual())
{
callType = "CALLV";
}
}
else if (tree->AsCall()->gtCallType == CT_HELPER)
{
ctType = " help";
}
else if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
ctType = " ind";
}
else
{
assert(!"Unknown gtCallType");
}
if (tree->gtFlags & GTF_CALL_NULLCHECK)
{
gtfType = " nullcheck";
}
if (tree->AsCall()->IsVirtualVtable())
{
gtfType = " vt-ind";
}
else if (tree->AsCall()->IsVirtualStub())
{
gtfType = " stub";
}
#ifdef FEATURE_READYTORUN
else if (tree->AsCall()->IsR2RRelativeIndir())
{
gtfType = " r2r_ind";
}
#endif // FEATURE_READYTORUN
else if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
char* gtfTypeBufWalk = gtfTypeBuf;
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " unman");
if (tree->gtFlags & GTF_CALL_POP_ARGS)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " popargs");
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " thiscall");
}
#ifdef TARGET_X86
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " %s",
GetCallConvName(tree->AsCall()->GetUnmanagedCallConv()));
#endif // TARGET_X86
gtfType = gtfTypeBuf;
}
sprintf_s(bufp, sizeof(buf), " %s%s%s%c", callType, ctType, gtfType, 0);
}
else if (tree->gtOper == GT_ARR_ELEM)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
for (unsigned rank = tree->AsArrElem()->gtArrRank - 1; rank; rank--)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_ARR_OFFSET || tree->gtOper == GT_ARR_INDEX)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
unsigned char currDim;
unsigned char rank;
if (tree->gtOper == GT_ARR_OFFSET)
{
currDim = tree->AsArrOffs()->gtCurrDim;
rank = tree->AsArrOffs()->gtArrRank;
}
else
{
currDim = tree->AsArrIndex()->gtCurrDim;
rank = tree->AsArrIndex()->gtArrRank;
}
for (unsigned char dim = 0; dim < rank; dim++)
{
// Use a defacto standard i,j,k for the dimensions.
// Note that we only support up to rank 3 arrays with these nodes, so we won't run out of characters.
char dimChar = '*';
if (dim == currDim)
{
dimChar = 'i' + dim;
}
else if (dim > currDim)
{
dimChar = ' ';
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%c", dimChar);
if (dim != rank - 1)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_LEA)
{
GenTreeAddrMode* lea = tree->AsAddrMode();
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s(", name);
if (lea->Base() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+");
}
if (lea->Index() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale);
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%d)", lea->Offset());
}
else if (tree->gtOper == GT_BOUNDS_CHECK)
{
switch (tree->AsBoundsChk()->gtThrowKind)
{
case SCK_RNGCHK_FAIL:
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s_Rng", name);
if (tree->AsBoundsChk()->gtIndRngFailBB != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " -> " FMT_BB,
tree->AsBoundsChk()->gtIndRngFailBB->bbNum);
}
break;
}
case SCK_ARG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_Arg", name);
break;
case SCK_ARG_RNG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_ArgRng", name);
break;
default:
unreached();
}
}
else if (tree->gtOverflowEx())
{
sprintf_s(bufp, sizeof(buf), " %s_ovfl%c", name, 0);
}
else
{
sprintf_s(bufp, sizeof(buf), " %s%c", name, 0);
}
if (strlen(buf) < 10)
{
printf(" %-10s", buf);
}
else
{
printf(" %s", buf);
}
}
//------------------------------------------------------------------------
// gtDispZeroFieldSeq: If this node has a zero fieldSeq annotation
// then print this Field Sequence
//
void Compiler::gtDispZeroFieldSeq(GenTree* tree)
{
NodeToFieldSeqMap* map = GetZeroOffsetFieldMap();
// THe most common case is having no entries in this map
if (map->GetCount() > 0)
{
FieldSeqNode* fldSeq = nullptr;
if (map->Lookup(tree, &fldSeq))
{
printf(" Zero");
gtDispAnyFieldSeq(fldSeq);
}
}
}
//------------------------------------------------------------------------
// gtDispVN: Utility function that prints a tree's ValueNumber: gtVNPair
//
void Compiler::gtDispVN(GenTree* tree)
{
if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
{
assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);
printf(" ");
vnpPrint(tree->gtVNPair, 0);
}
}
//------------------------------------------------------------------------
// gtDispCommonEndLine
// Utility function that prints the following node information
// 1: The associated zero field sequence (if any)
// 2. The register assigned to this node (if any)
// 2. The value number assigned (if any)
// 3. A newline character
//
void Compiler::gtDispCommonEndLine(GenTree* tree)
{
gtDispZeroFieldSeq(tree);
gtDispRegVal(tree);
gtDispVN(tree);
printf("\n");
}
//------------------------------------------------------------------------
// gtDispNode: Print a tree to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// msg - a contextual method (i.e. from the parent) to print
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' may be null
void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_z_ const char* msg, bool isLIR)
{
bool printFlags = true; // always true..
int msgLength = 25;
GenTree* prev;
if (tree->gtSeqNum)
{
printf("N%03u ", tree->gtSeqNum);
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
printf("(???"
",???"
") "); // This probably indicates a bug: the node has a sequence number, but not costs.
}
}
else
{
prev = tree;
bool hasSeqNum = true;
unsigned dotNum = 0;
do
{
dotNum++;
prev = prev->gtPrev;
if ((prev == nullptr) || (prev == tree))
{
hasSeqNum = false;
break;
}
assert(prev);
} while (prev->gtSeqNum == 0);
// If we have an indent stack, don't add additional characters,
// as it will mess up the alignment.
bool displayDotNum = hasSeqNum && (indentStack == nullptr);
if (displayDotNum)
{
printf("N%03u.%02u ", prev->gtSeqNum, dotNum);
}
else
{
printf(" ");
}
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
if (displayDotNum)
{
// Do better alignment in this case
printf(" ");
}
else
{
printf(" ");
}
}
}
if (optValnumCSE_phase)
{
if (IS_CSE_INDEX(tree->gtCSEnum))
{
printf(FMT_CSE " (%s)", GET_CSE_INDEX(tree->gtCSEnum), (IS_CSE_USE(tree->gtCSEnum) ? "use" : "def"));
}
else
{
printf(" ");
}
}
/* Print the node ID */
printTreeID(tree);
printf(" ");
if (tree->gtOper >= GT_COUNT)
{
printf(" **** ILLEGAL NODE ****");
return;
}
if (printFlags)
{
/* First print the flags associated with the node */
switch (tree->gtOper)
{
case GT_LEA:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_IND:
// We prefer printing V or U
if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
{
if (tree->gtFlags & GTF_IND_TGTANYWHERE)
{
printf("*");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP)
{
printf("s");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_INVARIANT)
{
printf("#");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ARR_INDEX)
{
printf("a");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
printf("n"); // print a n for non-faulting
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ASG_LHS)
{
printf("D"); // print a D for definition
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONNULL)
{
printf("@");
--msgLength;
break;
}
}
FALLTHROUGH;
case GT_INDEX:
case GT_INDEX_ADDR:
case GT_FIELD:
case GT_CLS_VAR:
if (tree->gtFlags & GTF_IND_VOLATILE)
{
printf("V");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_UNALIGNED)
{
printf("U");
--msgLength;
break;
}
goto DASH;
case GT_ASG:
if (tree->OperIsInitBlkOp())
{
printf("I");
--msgLength;
break;
}
goto DASH;
case GT_CALL:
if (tree->AsCall()->IsInlineCandidate())
{
if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("&");
}
else
{
printf("I");
}
--msgLength;
break;
}
else if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("G");
--msgLength;
break;
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
{
printf("S");
--msgLength;
break;
}
if (tree->gtFlags & GTF_CALL_HOISTABLE)
{
printf("H");
--msgLength;
break;
}
goto DASH;
case GT_MUL:
#if !defined(TARGET_64BIT)
case GT_MUL_LONG:
#endif
if (tree->gtFlags & GTF_MUL_64RSLT)
{
printf("L");
--msgLength;
break;
}
goto DASH;
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (tree->gtFlags & GTF_DIV_BY_CNS_OPT)
{
printf("M"); // We will use a Multiply by reciprical
--msgLength;
break;
}
goto DASH;
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
case GT_STORE_LCL_VAR:
if (tree->gtFlags & GTF_VAR_USEASG)
{
printf("U");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_MULTIREG)
{
printf((tree->gtFlags & GTF_VAR_DEF) ? "M" : "m");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_DEF)
{
printf("D");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CAST)
{
printf("C");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
printf("i");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CONTEXT)
{
printf("!");
--msgLength;
break;
}
goto DASH;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
printf("N");
--msgLength;
break;
}
if (tree->gtFlags & GTF_RELOP_JMP_USED)
{
printf("J");
--msgLength;
break;
}
goto DASH;
case GT_JCMP:
printf((tree->gtFlags & GTF_JCMP_TST) ? "T" : "C");
printf((tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
goto DASH;
case GT_CNS_INT:
if (tree->IsIconHandle())
{
if ((tree->gtFlags & GTF_ICON_INITCLASS) != 0)
{
printf("I"); // Static Field handle with INITCLASS requirement
--msgLength;
break;
}
else if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf("O");
--msgLength;
break;
}
else
{
// Some other handle
printf("H");
--msgLength;
break;
}
}
goto DASH;
default:
DASH:
printf("-");
--msgLength;
break;
}
/* Then print the general purpose flags */
GenTreeFlags flags = tree->gtFlags;
if (tree->OperIsBinary() || tree->OperIsMultiOp())
{
genTreeOps oper = tree->OperGet();
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul/shl Binary Operators
if ((oper == GT_ADD) || (oper == GT_MUL) || (oper == GT_LSH))
{
if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
{
flags |= GTF_DONT_CSE; // Force the GTF_ADDRMODE_NO_CSE flag to print out like GTF_DONT_CSE
}
}
}
else // !(tree->OperIsBinary() || tree->OperIsMultiOp())
{
// the GTF_REVERSE flag only applies to binary operations (which some MultiOp nodes are).
flags &= ~GTF_REVERSE_OPS; // we use this value for GTF_VAR_ARR_INDEX above
}
msgLength -= GenTree::gtDispFlags(flags, tree->gtDebugFlags);
/*
printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
printf("%c", (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-');
printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
(flags & GTF_BOOLEAN ) ? 'B' : '-');
printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-');
printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-');
*/
}
// If we're printing a node for LIR, we use the space normally associated with the message
// to display the node's temp name (if any)
const bool hasOperands = tree->OperandsBegin() != tree->OperandsEnd();
if (isLIR)
{
assert(msg == nullptr);
// If the tree does not have any operands, we do not display the indent stack. This gives us
// two additional characters for alignment.
if (!hasOperands)
{
msgLength += 1;
}
if (tree->IsValue())
{
const size_t bufLength = msgLength - 1;
msg = reinterpret_cast<char*>(_alloca(bufLength * sizeof(char)));
sprintf_s(const_cast<char*>(msg), bufLength, "t%d = %s", tree->gtTreeID, hasOperands ? "" : " ");
}
}
/* print the msg associated with the node */
if (msg == nullptr)
{
msg = "";
}
if (msgLength < 0)
{
msgLength = 0;
}
printf(isLIR ? " %+*s" : " %-*s", msgLength, msg);
/* Indent the node accordingly */
if (!isLIR || hasOperands)
{
printIndent(indentStack);
}
gtDispNodeName(tree);
assert(tree == nullptr || tree->gtOper < GT_COUNT);
if (tree)
{
/* print the type of the node */
if (tree->gtOper != GT_CAST)
{
printf(" %-6s", varTypeName(tree->TypeGet()));
if (varTypeIsStruct(tree->TypeGet()))
{
ClassLayout* layout = nullptr;
if (tree->OperIs(GT_BLK, GT_OBJ, GT_STORE_BLK, GT_STORE_OBJ))
{
layout = tree->AsBlk()->GetLayout();
}
else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varTypeIsStruct(varDsc->TypeGet()))
{
layout = varDsc->GetLayout();
}
}
if (layout != nullptr)
{
gtDispClassLayout(layout, tree->TypeGet());
}
}
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_STORE_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->IsAddressExposed())
{
printf("(AX)"); // Variable has address exposed.
}
if (varDsc->lvUnusedStruct)
{
assert(varDsc->lvPromoted);
printf("(U)"); // Unused struct
}
else if (varDsc->lvPromoted)
{
if (varTypeIsPromotable(varDsc))
{
printf("(P)"); // Promoted struct
}
else
{
// Promoted implicit by-refs can have this state during
// global morph while they are being rewritten
printf("(P?!)"); // Promoted struct
}
}
}
if (tree->IsArgPlaceHolderNode() && (tree->AsArgPlace()->gtArgPlaceClsHnd != nullptr))
{
printf(" => [clsHnd=%08X]", dspPtr(tree->AsArgPlace()->gtArgPlaceClsHnd));
}
if (tree->gtOper == GT_RUNTIMELOOKUP)
{
#ifdef TARGET_64BIT
printf(" 0x%llx", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#else
printf(" 0x%x", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#endif
switch (tree->AsRuntimeLookup()->gtHndType)
{
case CORINFO_HANDLETYPE_CLASS:
printf(" class");
break;
case CORINFO_HANDLETYPE_METHOD:
printf(" method");
break;
case CORINFO_HANDLETYPE_FIELD:
printf(" field");
break;
default:
printf(" unknown");
break;
}
}
}
// for tracking down problems in reguse prediction or liveness tracking
if (verbose && 0)
{
printf(" RR=");
dspRegMask(tree->gtRsvdRegs);
printf("\n");
}
}
}
#if FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispRegCount: determine how many registers to print for a multi-reg node
//
// Arguments:
// tree - Gentree node whose registers we want to print
//
// Return Value:
// The number of registers to print
//
// Notes:
// This is not the same in all cases as GenTree::GetMultiRegCount().
// In particular, for COPY or RELOAD it only returns the number of *valid* registers,
// and for CALL, it will return 0 if the ReturnTypeDesc hasn't yet been initialized.
// But we want to print all register positions.
//
unsigned Compiler::gtDispRegCount(GenTree* tree)
{
if (tree->IsCopyOrReload())
{
// GetRegCount() will return only the number of valid regs for COPY or RELOAD,
// but we want to print all positions, so we get the reg count for op1.
return gtDispRegCount(tree->gtGetOp1());
}
else if (!tree->IsMultiRegNode())
{
// We can wind up here because IsMultiRegNode() always returns true for COPY or RELOAD,
// even if its op1 is not multireg.
// Note that this method won't be called for non-register-producing nodes.
return 1;
}
else if (tree->IsMultiRegLclVar())
{
return tree->AsLclVar()->GetFieldCount(this);
}
else if (tree->OperIs(GT_CALL))
{
unsigned regCount = tree->AsCall()->GetReturnTypeDesc()->TryGetReturnRegCount();
// If it hasn't yet been initialized, we'd still like to see the registers printed.
if (regCount == 0)
{
regCount = MAX_RET_REG_COUNT;
}
return regCount;
}
else
{
return tree->GetMultiRegCount();
}
}
#endif // FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispRegVal: Print the register(s) defined by the given node
//
// Arguments:
// tree - Gentree node whose registers we want to print
//
void Compiler::gtDispRegVal(GenTree* tree)
{
switch (tree->GetRegTag())
{
// Don't display anything for the GT_REGTAG_NONE case;
// the absence of printed register values will imply this state.
case GenTree::GT_REGTAG_REG:
printf(" REG %s", compRegVarName(tree->GetRegNum()));
break;
default:
return;
}
#if FEATURE_MULTIREG_RET
if (tree->IsMultiRegNode())
{
// 0th reg is GetRegNum(), which is already printed above.
// Print the remaining regs of a multi-reg node.
unsigned regCount = gtDispRegCount(tree);
// For some nodes, e.g. COPY, RELOAD or CALL, we may not have valid regs for all positions.
for (unsigned i = 1; i < regCount; ++i)
{
regNumber reg = tree->GetRegByIndex(i);
printf(",%s", genIsValidReg(reg) ? compRegVarName(reg) : "NA");
}
}
#endif
}
// We usually/commonly don't expect to print anything longer than this string,
#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame"
#define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY))
#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2)
void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut)
{
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = compMap2ILvarNum(lclNum);
if (ilNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM)
{
ilName = "RetBuf";
}
else if (ilNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM)
{
ilName = "VarArgHandle";
}
else if (ilNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM)
{
ilName = "TypeCtx";
}
else if (ilNum == (unsigned)ICorDebugInfo::UNKNOWN_ILNUM)
{
if (lclNumIsTrueCSE(lclNum))
{
ilKind = "cse";
ilNum = lclNum - optCSEstart;
}
else if (lclNum >= optCSEstart)
{
// Currently any new LclVar's introduced after the CSE phase
// are believed to be created by the "rationalizer" that is what is meant by the "rat" prefix.
ilKind = "rat";
ilNum = lclNum - (optCSEstart + optCSEcount);
}
else
{
if (lclNum == info.compLvFrameListRoot)
{
ilName = "FramesRoot";
}
else if (lclNum == lvaInlinedPInvokeFrameVar)
{
ilName = "PInvokeFrame";
}
else if (lclNum == lvaGSSecurityCookie)
{
ilName = "GsCookie";
}
else if (lclNum == lvaRetAddrVar)
{
ilName = "ReturnAddress";
}
#if FEATURE_FIXED_OUT_ARGS
else if (lclNum == lvaPInvokeFrameRegSaveVar)
{
ilName = "PInvokeFrameRegSave";
}
else if (lclNum == lvaOutgoingArgSpaceVar)
{
ilName = "OutArgs";
}
#endif // FEATURE_FIXED_OUT_ARGS
#if !defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaShadowSPslotsVar)
{
ilName = "EHSlots";
}
#endif // !FEATURE_EH_FUNCLETS
#ifdef JIT32_GCENCODER
else if (lclNum == lvaLocAllocSPvar)
{
ilName = "LocAllocSP";
}
#endif // JIT32_GCENCODER
#if defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaPSPSym)
{
ilName = "PSPSym";
}
#endif // FEATURE_EH_FUNCLETS
else
{
ilKind = "tmp";
if (compIsForInlining())
{
ilNum = lclNum - impInlineInfo->InlinerCompiler->info.compLocalsCount;
}
else
{
ilNum = lclNum - info.compLocalsCount;
}
}
}
}
else if (lclNum < (compIsForInlining() ? impInlineInfo->InlinerCompiler->info.compArgsCount : info.compArgsCount))
{
if (ilNum == 0 && !info.compIsStatic)
{
ilName = "this";
}
else
{
ilKind = "arg";
}
}
else
{
if (!lvaTable[lclNum].lvIsStructField)
{
ilKind = "loc";
}
if (compIsForInlining())
{
ilNum -= impInlineInfo->InlinerCompiler->info.compILargsCount;
}
else
{
ilNum -= info.compILargsCount;
}
}
*ilKindOut = ilKind;
*ilNameOut = ilName;
*ilNumOut = ilNum;
}
/*****************************************************************************/
int Compiler::gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining)
{
char* bufp_next = buf;
unsigned charsPrinted = 0;
int sprintf_result;
sprintf_result = sprintf_s(bufp_next, buf_remaining, "V%02u", lclNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = 0;
gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
if (ilName != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s", ilName);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
else if (ilKind != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s%d", ilKind, ilNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
assert(charsPrinted > 0);
assert(buf_remaining > 0);
return (int)charsPrinted;
}
/*****************************************************************************
* Get the local var name, and create a copy of the string that can be used in debug output.
*/
char* Compiler::gtGetLclVarName(unsigned lclNum)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return nullptr;
}
char* retBuf = new (this, CMK_DebugOnly) char[charsPrinted + 1];
strcpy_s(retBuf, charsPrinted + 1, buf);
return retBuf;
}
/*****************************************************************************/
void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return;
}
printf("%s", buf);
if (padForBiggestDisp && (charsPrinted < (int)LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH))
{
printf("%*c", LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH - charsPrinted, ' ');
}
}
//------------------------------------------------------------------------
// gtDispLclVarStructType: Print size and type information about a struct or lclBlk local variable.
//
// Arguments:
// lclNum - The local var id.
//
void Compiler::gtDispLclVarStructType(unsigned lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
var_types type = varDsc->TypeGet();
if (type == TYP_STRUCT)
{
ClassLayout* layout = varDsc->GetLayout();
assert(layout != nullptr);
gtDispClassLayout(layout, type);
}
else if (type == TYP_LCLBLK)
{
#if FEATURE_FIXED_OUT_ARGS
assert(lclNum == lvaOutgoingArgSpaceVar);
// Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until
// after we set it to something.
if (lvaOutgoingArgSpaceSize.HasFinalValue())
{
// A PhasedVar<T> can't be directly used as an arg to a variadic function
unsigned value = lvaOutgoingArgSpaceSize;
printf("<%u> ", value);
}
else
{
printf("<na> "); // The value hasn't yet been determined
}
#else
assert(!"Unknown size");
NO_WAY("Target doesn't support TYP_LCLBLK");
#endif // FEATURE_FIXED_OUT_ARGS
}
}
//------------------------------------------------------------------------
// gtDispClassLayout: Print size and type information about a layout.
//
// Arguments:
// layout - the layout;
// type - variable type, used to avoid printing size for SIMD nodes.
//
void Compiler::gtDispClassLayout(ClassLayout* layout, var_types type)
{
assert(layout != nullptr);
if (layout->IsBlockLayout())
{
printf("<%u>", layout->GetSize());
}
else if (varTypeIsSIMD(type))
{
printf("<%s>", layout->GetClassName());
}
else
{
printf("<%s, %u>", layout->GetClassName(), layout->GetSize());
}
}
/*****************************************************************************/
void Compiler::gtDispConst(GenTree* tree)
{
assert(tree->OperIsConst());
switch (tree->gtOper)
{
case GT_CNS_INT:
if (tree->IsIconHandle(GTF_ICON_STR_HDL))
{
const WCHAR* str = eeGetCPString(tree->AsIntCon()->gtIconVal);
// If *str points to a '\0' then don't print the string's values
if ((str != nullptr) && (*str != '\0'))
{
printf(" 0x%X \"%S\"", dspPtr(tree->AsIntCon()->gtIconVal), str);
}
else // We can't print the value of the string
{
// Note that eeGetCPString isn't currently implemented on Linux/ARM
// and instead always returns nullptr
printf(" 0x%X [ICON_STR_HDL]", dspPtr(tree->AsIntCon()->gtIconVal));
}
}
else
{
ssize_t dspIconVal =
tree->IsIconHandle() ? dspPtr(tree->AsIntCon()->gtIconVal) : tree->AsIntCon()->gtIconVal;
if (tree->TypeGet() == TYP_REF)
{
assert(tree->AsIntCon()->gtIconVal == 0);
printf(" null");
}
else if ((tree->AsIntCon()->gtIconVal > -1000) && (tree->AsIntCon()->gtIconVal < 1000))
{
printf(" %ld", dspIconVal);
}
#ifdef TARGET_64BIT
else if ((tree->AsIntCon()->gtIconVal & 0xFFFFFFFF00000000LL) != 0)
{
if (dspIconVal >= 0)
{
printf(" 0x%llx", dspIconVal);
}
else
{
printf(" -0x%llx", -dspIconVal);
}
}
#endif
else
{
if (dspIconVal >= 0)
{
printf(" 0x%X", dspIconVal);
}
else
{
printf(" -0x%X", -dspIconVal);
}
}
if (tree->IsIconHandle())
{
switch (tree->GetIconHandleFlag())
{
case GTF_ICON_SCOPE_HDL:
printf(" scope");
break;
case GTF_ICON_CLASS_HDL:
printf(" class");
break;
case GTF_ICON_METHOD_HDL:
printf(" method");
break;
case GTF_ICON_FIELD_HDL:
printf(" field");
break;
case GTF_ICON_STATIC_HDL:
printf(" static");
break;
case GTF_ICON_STR_HDL:
unreached(); // This case is handled above
break;
case GTF_ICON_CONST_PTR:
printf(" const ptr");
break;
case GTF_ICON_GLOBAL_PTR:
printf(" global ptr");
break;
case GTF_ICON_VARG_HDL:
printf(" vararg");
break;
case GTF_ICON_PINVKI_HDL:
printf(" pinvoke");
break;
case GTF_ICON_TOKEN_HDL:
printf(" token");
break;
case GTF_ICON_TLS_HDL:
printf(" tls");
break;
case GTF_ICON_FTN_ADDR:
printf(" ftn");
break;
case GTF_ICON_CIDMID_HDL:
printf(" cid/mid");
break;
case GTF_ICON_BBC_PTR:
printf(" bbc");
break;
case GTF_ICON_STATIC_BOX_PTR:
printf(" static box ptr");
break;
default:
printf(" UNKNOWN");
break;
}
}
if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf(" field offset");
}
#ifdef FEATURE_SIMD
if ((tree->gtFlags & GTF_ICON_SIMD_COUNT) != 0)
{
printf(" vector element count");
}
#endif
if ((tree->IsReuseRegVal()) != 0)
{
printf(" reuse reg val");
}
}
gtDispFieldSeq(tree->AsIntCon()->gtFieldSeq);
break;
case GT_CNS_LNG:
printf(" 0x%016I64x", tree->AsLngCon()->gtLconVal);
break;
case GT_CNS_DBL:
if (*((__int64*)&tree->AsDblCon()->gtDconVal) == (__int64)I64(0x8000000000000000))
{
printf(" -0.00000");
}
else
{
printf(" %#.17g", tree->AsDblCon()->gtDconVal);
}
break;
case GT_CNS_STR:
printf("<string constant>");
break;
default:
assert(!"unexpected constant node");
}
}
//------------------------------------------------------------------------
// gtDispFieldSeq: "gtDispFieldSeq" that also prints "<NotAField>".
//
// Useful for printing zero-offset field sequences.
//
void Compiler::gtDispAnyFieldSeq(FieldSeqNode* fieldSeq)
{
if (fieldSeq == FieldSeqStore::NotAField())
{
printf(" Fseq<NotAField>");
return;
}
gtDispFieldSeq(fieldSeq);
}
//------------------------------------------------------------------------
// gtDispFieldSeq: Print out the fields in this field sequence.
//
void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
{
if ((pfsn == nullptr) || (pfsn == FieldSeqStore::NotAField()))
{
return;
}
// Otherwise...
printf(" Fseq[");
while (pfsn != nullptr)
{
assert(pfsn != FieldSeqStore::NotAField()); // Can't exist in a field sequence list except alone
CORINFO_FIELD_HANDLE fldHnd = pfsn->m_fieldHnd;
// First check the "pseudo" field handles...
if (fldHnd == FieldSeqStore::FirstElemPseudoField)
{
printf("#FirstElem");
}
else if (fldHnd == FieldSeqStore::ConstantIndexPseudoField)
{
printf("#ConstantIndex");
}
else
{
printf("%s", eeGetFieldName(fldHnd));
}
pfsn = pfsn->m_next;
if (pfsn != nullptr)
{
printf(", ");
}
}
printf("]");
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a single leaf node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
{
if (tree->OperIsConst())
{
gtDispConst(tree);
return;
}
bool isLclFld = false;
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
isLclFld = true;
FALLTHROUGH;
case GT_PHI_ARG:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_STORE_LCL_VAR:
{
printf(" ");
const unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(varNum);
gtDispLclVar(varNum);
if (tree->AsLclVarCommon()->HasSsaName())
{
if (tree->gtFlags & GTF_VAR_USEASG)
{
assert(tree->gtFlags & GTF_VAR_DEF);
printf("ud:%d->%d", tree->AsLclVarCommon()->GetSsaNum(), GetSsaNumForLocalVarDef(tree));
}
else
{
printf("%s:%d", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->AsLclVarCommon()->GetSsaNum());
}
}
if (isLclFld)
{
printf("[+%u]", tree->AsLclFld()->GetLclOffs());
gtDispFieldSeq(tree->AsLclFld()->GetFieldSeq());
}
if (varDsc->lvRegister)
{
printf(" ");
varDsc->PrintVarReg();
}
else if (tree->InReg())
{
printf(" %s", compRegVarName(tree->GetRegNum()));
}
if (varDsc->lvPromoted)
{
if (!varTypeIsPromotable(varDsc) && !varDsc->lvUnusedStruct)
{
// Promoted implicit byrefs can get in this state while they are being rewritten
// in global morph.
}
else
{
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(i);
const char* fieldName;
#if !defined(TARGET_64BIT)
if (varTypeIsLong(varDsc))
{
fieldName = (i == 0) ? "lo" : "hi";
}
else
#endif // !defined(TARGET_64BIT)
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
CORINFO_FIELD_HANDLE fldHnd =
info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal);
fieldName = eeGetFieldName(fldHnd);
}
printf("\n");
printf(" ");
printIndent(indentStack);
printf(" %-6s V%02u.%s (offs=0x%02x) -> ", varTypeName(fieldVarDsc->TypeGet()),
tree->AsLclVarCommon()->GetLclNum(), fieldName, fieldVarDsc->lvFldOffset);
gtDispLclVar(i);
if (fieldVarDsc->lvRegister)
{
printf(" ");
fieldVarDsc->PrintVarReg();
}
if (fieldVarDsc->lvTracked && fgLocalVarLivenessDone && tree->IsMultiRegLclVar() &&
tree->AsLclVar()->IsLastUse(i - varDsc->lvFieldLclStart))
{
printf(" (last use)");
}
}
}
}
else // a normal not-promoted lclvar
{
if (varDsc->lvTracked && fgLocalVarLivenessDone && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
printf(" (last use)");
}
}
}
break;
case GT_JMP:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsVal()->gtVal1, &className);
printf(" %s.%s\n", className, methodName);
}
break;
case GT_CLS_VAR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
gtDispFieldSeq(tree->AsClsVar()->gtFieldSeq);
break;
case GT_CLS_VAR_ADDR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
break;
case GT_LABEL:
break;
case GT_FTN_ADDR:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsFptrVal()->gtFptrMethod, &className);
printf(" %s.%s\n", className, methodName);
}
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
printf(" endNstLvl=%d", tree->AsVal()->gtVal1);
break;
#endif // !FEATURE_EH_FUNCLETS
// Vanilla leaves. No qualifying information available. So do nothing
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
case GT_CATCH_ARG:
case GT_MEMORYBARRIER:
case GT_ARGPLACE:
case GT_PINVOKE_PROLOG:
case GT_JMPTABLE:
break;
case GT_RET_EXPR:
{
GenTree* const associatedTree = tree->AsRetExpr()->gtInlineCandidate;
printf("(inl return %s ", tree->IsCall() ? " from call" : "expr");
printTreeID(associatedTree);
printf(")");
}
break;
case GT_PHYSREG:
printf(" %s", getRegName(tree->AsPhysReg()->gtSrcReg));
break;
case GT_IL_OFFSET:
printf(" ");
tree->AsILOffset()->gtStmtDI.Dump(true);
break;
case GT_JCC:
case GT_SETCC:
printf(" cond=%s", tree->AsCC()->gtCondition.Name());
break;
case GT_JCMP:
printf(" cond=%s%s", (tree->gtFlags & GTF_JCMP_TST) ? "TEST_" : "",
(tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
break;
default:
assert(!"don't know how to display tree leaf node");
}
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a child node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// arcType - the type of arc to use for this child
// msg - a contextual method (i.e. from the parent) to print
// topOnly - a boolean indicating whether to print the children, or just the top node
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' has a default value of null
// 'topOnly' is an optional argument that defaults to false
void Compiler::gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg, /* = nullptr */
bool topOnly) /* = false */
{
indentStack->Push(arcType);
gtDispTree(child, indentStack, msg, topOnly);
indentStack->Pop();
}
#ifdef FEATURE_SIMD
// Intrinsic Id to name map
extern const char* const simdIntrinsicNames[] = {
#define SIMD_INTRINSIC(mname, inst, id, name, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) name,
#include "simdintrinsiclist.h"
};
#endif // FEATURE_SIMD
/*****************************************************************************/
void Compiler::gtDispTree(GenTree* tree,
IndentStack* indentStack, /* = nullptr */
_In_ _In_opt_z_ const char* msg, /* = nullptr */
bool topOnly, /* = false */
bool isLIR) /* = false */
{
if (tree == nullptr)
{
printf(" [%08X] <NULL>\n", tree);
printf(""); // null string means flush
return;
}
if (indentStack == nullptr)
{
indentStack = new (this, CMK_DebugOnly) IndentStack(this);
}
if (IsUninitialized(tree))
{
/* Value used to initalize nodes */
printf("Uninitialized tree node!\n");
return;
}
if (tree->gtOper >= GT_COUNT)
{
gtDispNode(tree, indentStack, msg, isLIR);
printf("Bogus operator!\n");
return;
}
/* Is tree a leaf node? */
if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
{
gtDispNode(tree, indentStack, msg, isLIR);
gtDispLeaf(tree, indentStack);
gtDispCommonEndLine(tree);
if (tree->OperIsLocalStore() && !topOnly)
{
gtDispChild(tree->AsOp()->gtOp1, indentStack, IINone);
}
return;
}
// Determine what kind of arc to propagate.
IndentInfo myArc = IINone;
IndentInfo lowerArc = IINone;
if (indentStack->Depth() > 0)
{
myArc = indentStack->Pop();
switch (myArc)
{
case IIArcBottom:
indentStack->Push(IIArc);
lowerArc = IINone;
break;
case IIArc:
indentStack->Push(IIArc);
lowerArc = IIArc;
break;
case IIArcTop:
indentStack->Push(IINone);
lowerArc = IIArc;
break;
case IINone:
indentStack->Push(IINone);
lowerArc = IINone;
break;
default:
unreached();
break;
}
}
/* Is it a 'simple' unary/binary operator? */
const char* childMsg = nullptr;
if (tree->OperIsSimple())
{
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
if (tree->gtOper == GT_CAST)
{
/* Format a message that explains the effect of this GT_CAST */
var_types fromType = genActualType(tree->AsCast()->CastOp()->TypeGet());
var_types toType = tree->CastToType();
var_types finalType = tree->TypeGet();
/* if GTF_UNSIGNED is set then force fromType to an unsigned type */
if (tree->gtFlags & GTF_UNSIGNED)
{
fromType = varTypeToUnsigned(fromType);
}
if (finalType != toType)
{
printf(" %s <-", varTypeName(finalType));
}
printf(" %s <- %s", varTypeName(toType), varTypeName(fromType));
}
if (tree->OperIsBlkOp())
{
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
if (tree->OperIsStoreBlk() && (tree->AsBlk()->gtBlkOpKind != GenTreeBlk::BlkOpKindInvalid))
{
switch (tree->AsBlk()->gtBlkOpKind)
{
#ifdef TARGET_XARCH
case GenTreeBlk::BlkOpKindRepInstr:
printf(" (RepInstr)");
break;
#endif
case GenTreeBlk::BlkOpKindUnroll:
printf(" (Unroll)");
break;
#ifndef TARGET_X86
case GenTreeBlk::BlkOpKindHelper:
printf(" (Helper)");
break;
#endif
default:
unreached();
}
}
}
#if FEATURE_PUT_STRUCT_ARG_STK
else if (tree->OperGet() == GT_PUTARG_STK)
{
const GenTreePutArgStk* putArg = tree->AsPutArgStk();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d slot), (%d byteOffset)", putArg->gtNumSlots,
putArg->GetStackByteSize(), putArg->gtSlotNum, putArg->getArgOffset());
}
#endif
if (putArg->gtPutArgStkKind != GenTreePutArgStk::Kind::Invalid)
{
switch (putArg->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
printf(" (RepInstr)");
break;
case GenTreePutArgStk::Kind::PartialRepInstr:
printf(" (PartialRepInstr)");
break;
case GenTreePutArgStk::Kind::Unroll:
printf(" (Unroll)");
break;
case GenTreePutArgStk::Kind::Push:
printf(" (Push)");
break;
case GenTreePutArgStk::Kind::PushAllSlots:
printf(" (PushAllSlots)");
break;
default:
unreached();
}
}
}
#if FEATURE_ARG_SPLIT
else if (tree->OperGet() == GT_PUTARG_SPLIT)
{
const GenTreePutArgSplit* putArg = tree->AsPutArgSplit();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d numRegs)", putArg->gtNumSlots, putArg->GetStackByteSize(),
putArg->gtNumRegs);
}
#endif
}
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
if (tree->OperIs(GT_FIELD))
{
if (FieldSeqStore::IsPseudoField(tree->AsField()->gtFldHnd))
{
printf(" #PseudoField:0x%x", tree->AsField()->gtFldOffset);
}
else
{
printf(" %s", eeGetFieldName(tree->AsField()->gtFldHnd), 0);
}
}
if (tree->gtOper == GT_INTRINSIC)
{
GenTreeIntrinsic* intrinsic = tree->AsIntrinsic();
switch (intrinsic->gtIntrinsicName)
{
case NI_System_Math_Abs:
printf(" abs");
break;
case NI_System_Math_Acos:
printf(" acos");
break;
case NI_System_Math_Acosh:
printf(" acosh");
break;
case NI_System_Math_Asin:
printf(" asin");
break;
case NI_System_Math_Asinh:
printf(" asinh");
break;
case NI_System_Math_Atan:
printf(" atan");
break;
case NI_System_Math_Atanh:
printf(" atanh");
break;
case NI_System_Math_Atan2:
printf(" atan2");
break;
case NI_System_Math_Cbrt:
printf(" cbrt");
break;
case NI_System_Math_Ceiling:
printf(" ceiling");
break;
case NI_System_Math_Cos:
printf(" cos");
break;
case NI_System_Math_Cosh:
printf(" cosh");
break;
case NI_System_Math_Exp:
printf(" exp");
break;
case NI_System_Math_Floor:
printf(" floor");
break;
case NI_System_Math_FMod:
printf(" fmod");
break;
case NI_System_Math_FusedMultiplyAdd:
printf(" fma");
break;
case NI_System_Math_ILogB:
printf(" ilogb");
break;
case NI_System_Math_Log:
printf(" log");
break;
case NI_System_Math_Log2:
printf(" log2");
break;
case NI_System_Math_Log10:
printf(" log10");
break;
case NI_System_Math_Max:
printf(" max");
break;
case NI_System_Math_Min:
printf(" min");
break;
case NI_System_Math_Pow:
printf(" pow");
break;
case NI_System_Math_Round:
printf(" round");
break;
case NI_System_Math_Sin:
printf(" sin");
break;
case NI_System_Math_Sinh:
printf(" sinh");
break;
case NI_System_Math_Sqrt:
printf(" sqrt");
break;
case NI_System_Math_Tan:
printf(" tan");
break;
case NI_System_Math_Tanh:
printf(" tanh");
break;
case NI_System_Math_Truncate:
printf(" truncate");
break;
case NI_System_Object_GetType:
printf(" objGetType");
break;
case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant:
printf(" isKnownConst");
break;
default:
unreached();
}
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
if (tree->AsOp()->gtOp1 != nullptr)
{
// Label the child of the GT_COLON operator
// op1 is the else part
if (tree->gtOper == GT_COLON)
{
childMsg = "else";
}
else if (tree->gtOper == GT_QMARK)
{
childMsg = " if";
}
gtDispChild(tree->AsOp()->gtOp1, indentStack,
(tree->gtGetOp2IfPresent() == nullptr) ? IIArcBottom : IIArc, childMsg, topOnly);
}
if (tree->gtGetOp2IfPresent())
{
// Label the childMsgs of the GT_COLON operator
// op2 is the then part
if (tree->gtOper == GT_COLON)
{
childMsg = "then";
}
gtDispChild(tree->AsOp()->gtOp2, indentStack, IIArcBottom, childMsg, topOnly);
}
}
return;
}
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
// See what kind of a special operator we have here, and handle its special children.
switch (tree->gtOper)
{
case GT_FIELD_LIST:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
char offset[32];
sprintf_s(offset, sizeof(offset), "ofs %u", use.GetOffset());
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, offset);
}
}
break;
case GT_PHI:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
char block[32];
sprintf_s(block, sizeof(block), "pred " FMT_BB, use.GetNode()->AsPhiArg()->gtPredBB->bbNum);
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, block);
}
}
break;
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
GenTree* lastChild = nullptr;
call->VisitOperands([&lastChild](GenTree* operand) -> GenTree::VisitResult {
lastChild = operand;
return GenTree::VisitResult::Continue;
});
if (call->gtCallType != CT_INDIRECT)
{
const char* methodName;
const char* className;
methodName = eeGetMethodName(call->gtCallMethHnd, &className);
printf(" %s.%s", className, methodName);
}
if ((call->gtFlags & GTF_CALL_UNMANAGED) && (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH))
{
printf(" (FramesRoot last use)");
}
if (((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0) && (call->gtInlineCandidateInfo != nullptr) &&
(call->gtInlineCandidateInfo->exactContextHnd != nullptr))
{
printf(" (exactContextHnd=0x%p)", dspPtr(call->gtInlineCandidateInfo->exactContextHnd));
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
char buf[64];
char* bufp;
bufp = &buf[0];
if ((call->gtCallThisArg != nullptr) && !call->gtCallThisArg->GetNode()->OperIs(GT_NOP, GT_ARGPLACE))
{
if (call->gtCallThisArg->GetNode()->OperIs(GT_ASG))
{
sprintf_s(bufp, sizeof(buf), "this SETUP%c", 0);
}
else
{
sprintf_s(bufp, sizeof(buf), "this in %s%c", compRegVarName(REG_ARG_0), 0);
}
gtDispChild(call->gtCallThisArg->GetNode(), indentStack,
(call->gtCallThisArg->GetNode() == lastChild) ? IIArcBottom : IIArc, bufp, topOnly);
}
if (call->gtCallArgs)
{
gtDispArgList(call, lastChild, indentStack);
}
if (call->gtCallType == CT_INDIRECT)
{
gtDispChild(call->gtCallAddr, indentStack, (call->gtCallAddr == lastChild) ? IIArcBottom : IIArc,
"calli tgt", topOnly);
}
if (call->gtControlExpr != nullptr)
{
gtDispChild(call->gtControlExpr, indentStack,
(call->gtControlExpr == lastChild) ? IIArcBottom : IIArc, "control expr", topOnly);
}
int lateArgIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
IndentInfo arcType = (use.GetNext() == nullptr) ? IIArcBottom : IIArc;
gtGetLateArgMsg(call, use.GetNode(), lateArgIndex, bufp, sizeof(buf));
gtDispChild(use.GetNode(), indentStack, arcType, bufp, topOnly);
lateArgIndex++;
}
}
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD)
if (tree->OperIs(GT_SIMD))
{
printf(" %s %s", varTypeName(tree->AsSIMD()->GetSimdBaseType()),
simdIntrinsicNames[tree->AsSIMD()->GetSIMDIntrinsicId()]);
}
#endif // defined(FEATURE_SIMD)
#if defined(FEATURE_HW_INTRINSICS)
if (tree->OperIs(GT_HWINTRINSIC))
{
printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN
? ""
: varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()),
HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->GetHWIntrinsicId()));
}
#endif // defined(FEATURE_HW_INTRINSICS)
gtDispCommonEndLine(tree);
if (!topOnly)
{
size_t index = 0;
size_t count = tree->AsMultiOp()->GetOperandCount();
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
gtDispChild(operand, indentStack, ++index < count ? IIArc : IIArcBottom, nullptr, topOnly);
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrElem()->gtArrObj, indentStack, IIArc, nullptr, topOnly);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
IndentInfo arcType = ((dim + 1) == tree->AsArrElem()->gtArrRank) ? IIArcBottom : IIArc;
gtDispChild(tree->AsArrElem()->gtArrInds[dim], indentStack, arcType, nullptr, topOnly);
}
}
break;
case GT_ARR_OFFSET:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrOffs()->gtOffset, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtIndex, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtArrObj, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_CMPXCHG:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsCmpXchg()->gtOpLocation, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpValue, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpComparand, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_STORE_DYN_BLK:
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsStoreDynBlk()->Addr(), indentStack, IIArc, nullptr, topOnly);
if (tree->AsStoreDynBlk()->Data() != nullptr)
{
gtDispChild(tree->AsStoreDynBlk()->Data(), indentStack, IIArc, nullptr, topOnly);
}
gtDispChild(tree->AsStoreDynBlk()->gtDynamicSize, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
default:
printf("<DON'T KNOW HOW TO DISPLAY THIS NODE> :");
printf(""); // null string means flush
break;
}
}
//------------------------------------------------------------------------
// gtGetArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// arg - The argument for which a message should be constructed
// argNum - The ordinal number of the arg in the argument list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength)
{
if (call->gtCallLateArgs != nullptr)
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(call, argNum);
assert(curArgTabEntry);
if (arg->gtFlags & GTF_LATE_ARG)
{
sprintf_s(bufp, bufLength, "arg%d SETUP%c", argNum, 0);
}
else
{
#ifdef TARGET_ARM
if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
#if FEATURE_FIXED_OUT_ARGS
sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->GetByteOffset(), 0);
#else
sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0);
#endif
}
}
else
{
sprintf_s(bufp, bufLength, "arg%d%c", argNum, 0);
}
}
//------------------------------------------------------------------------
// gtGetLateArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// argx - The argument for which a message should be constructed
// lateArgIndex - The ordinal number of the arg in the lastArg list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetLateArgMsg(GenTreeCall* call, GenTree* argx, int lateArgIndex, char* bufp, unsigned bufLength)
{
assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(call, lateArgIndex);
assert(curArgTabEntry);
regNumber argReg = curArgTabEntry->GetRegNum();
#if FEATURE_FIXED_OUT_ARGS
if (argReg == REG_STK)
{
sprintf_s(bufp, bufLength, "arg%d in out+%02x%c", curArgTabEntry->argNum, curArgTabEntry->GetByteOffset(), 0);
}
else
#endif
{
if (curArgTabEntry->use == call->gtCallThisArg)
{
sprintf_s(bufp, bufLength, "this in %s%c", compRegVarName(argReg), 0);
}
#ifdef TARGET_ARM
else if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
unsigned argNum = curArgTabEntry->argNum;
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
else
{
#if FEATURE_MULTIREG_ARGS
if (curArgTabEntry->numRegs >= 2)
{
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
sprintf_s(bufp, bufLength, "arg%d %s%c%s%c", curArgTabEntry->argNum, compRegVarName(argReg), separator,
compRegVarName(curArgTabEntry->GetRegNum(curArgTabEntry->numRegs - 1)), 0);
}
else
#endif
{
sprintf_s(bufp, bufLength, "arg%d in %s%c", curArgTabEntry->argNum, compRegVarName(argReg), 0);
}
}
}
}
//------------------------------------------------------------------------
// gtDispArgList: Dump the tree for a call arg list
//
// Arguments:
// call - the call to dump arguments for
// lastCallOperand - the call's last operand (to determine the arc types)
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
void Compiler::gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack)
{
unsigned argnum = 0;
if (call->gtCallThisArg != nullptr)
{
argnum++;
}
for (GenTreeCall::Use& use : call->Args())
{
GenTree* argNode = use.GetNode();
if (!argNode->IsNothingNode() && !argNode->IsArgPlaceHolderNode())
{
char buf[256];
gtGetArgMsg(call, argNode, argnum, buf, sizeof(buf));
gtDispChild(argNode, indentStack, (argNode == lastCallOperand) ? IIArcBottom : IIArc, buf, false);
}
argnum++;
}
}
// gtDispStmt: Print a statement to jitstdout.
//
// Arguments:
// stmt - the statement to be printed;
// msg - an additional message to print before the statement.
//
void Compiler::gtDispStmt(Statement* stmt, const char* msg /* = nullptr */)
{
if (opts.compDbgInfo)
{
if (msg != nullptr)
{
printf("%s ", msg);
}
printStmtID(stmt);
printf(" ( ");
const DebugInfo& di = stmt->GetDebugInfo();
// For statements in the root we display just the location without the
// inline context info.
if (di.GetInlineContext() == nullptr || di.GetInlineContext()->IsRoot())
{
di.GetLocation().Dump();
}
else
{
stmt->GetDebugInfo().Dump(false);
}
printf(" ... ");
IL_OFFSET lastILOffs = stmt->GetLastILOffset();
if (lastILOffs == BAD_IL_OFFSET)
{
printf("???");
}
else
{
printf("0x%03X", lastILOffs);
}
printf(" )");
DebugInfo par;
if (stmt->GetDebugInfo().GetParent(&par))
{
printf(" <- ");
par.Dump(true);
}
printf("\n");
}
gtDispTree(stmt->GetRootNode());
}
//------------------------------------------------------------------------
// gtDispBlockStmts: dumps all statements inside `block`.
//
// Arguments:
// block - the block to display statements for.
//
void Compiler::gtDispBlockStmts(BasicBlock* block)
{
for (Statement* const stmt : block->Statements())
{
gtDispStmt(stmt);
printf("\n");
}
}
//------------------------------------------------------------------------
// Compiler::gtDispRange: dumps a range of LIR.
//
// Arguments:
// range - the range of LIR to display.
//
void Compiler::gtDispRange(LIR::ReadOnlyRange const& range)
{
for (GenTree* node : range)
{
gtDispLIRNode(node);
}
}
//------------------------------------------------------------------------
// Compiler::gtDispTreeRange: dumps the LIR range that contains all of the
// nodes in the dataflow tree rooted at a given
// node.
//
// Arguments:
// containingRange - the LIR range that contains the root node.
// tree - the root of the dataflow tree.
//
void Compiler::gtDispTreeRange(LIR::Range& containingRange, GenTree* tree)
{
bool unused;
gtDispRange(containingRange.GetTreeRange(tree, &unused));
}
//------------------------------------------------------------------------
// Compiler::gtDispLIRNode: dumps a single LIR node.
//
// Arguments:
// node - the LIR node to dump.
// prefixMsg - an optional prefix for each line of output.
//
void Compiler::gtDispLIRNode(GenTree* node, const char* prefixMsg /* = nullptr */)
{
auto displayOperand = [](GenTree* operand, const char* message, IndentInfo operandArc, IndentStack& indentStack,
size_t prefixIndent) {
assert(operand != nullptr);
assert(message != nullptr);
if (prefixIndent != 0)
{
printf("%*s", (int)prefixIndent, "");
}
// 50 spaces for alignment
printf("%-50s", "");
#if FEATURE_SET_FLAGS
// additional flag enlarges the flag field by one character
printf(" ");
#endif
indentStack.Push(operandArc);
indentStack.print();
indentStack.Pop();
operandArc = IIArc;
printf(" t%-5d %-6s %s\n", operand->gtTreeID, varTypeName(operand->TypeGet()), message);
};
IndentStack indentStack(this);
size_t prefixIndent = 0;
if (prefixMsg != nullptr)
{
prefixIndent = strlen(prefixMsg);
}
const int bufLength = 256;
char buf[bufLength];
const bool nodeIsCall = node->IsCall();
// Visit operands
IndentInfo operandArc = IIArcTop;
for (GenTree* operand : node->Operands())
{
if (operand->IsArgPlaceHolderNode() || !operand->IsValue())
{
// Either of these situations may happen with calls.
continue;
}
if (nodeIsCall)
{
GenTreeCall* call = node->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
sprintf_s(buf, sizeof(buf), "this in %s", compRegVarName(REG_ARG_0));
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallAddr)
{
displayOperand(operand, "calli tgt", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtControlExpr)
{
displayOperand(operand, "control expr", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallCookie)
{
displayOperand(operand, "cookie", operandArc, indentStack, prefixIndent);
}
else
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByNode(call, operand);
assert(curArgTabEntry);
if (!curArgTabEntry->isLateArg())
{
gtGetArgMsg(call, operand, curArgTabEntry->argNum, buf, sizeof(buf));
}
else
{
gtGetLateArgMsg(call, operand, curArgTabEntry->GetLateArgInx(), buf, sizeof(buf));
}
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_STORE_DYN_BLK))
{
if (operand == node->AsBlk()->Addr())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else if (operand == node->AsBlk()->Data())
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
else
{
assert(operand == node->AsStoreDynBlk()->gtDynamicSize);
displayOperand(operand, "size", operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_ASG))
{
if (operand == node->gtGetOp1())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
}
else
{
displayOperand(operand, "", operandArc, indentStack, prefixIndent);
}
operandArc = IIArc;
}
// Visit the operator
if (prefixMsg != nullptr)
{
printf("%s", prefixMsg);
}
const bool topOnly = true;
const bool isLIR = true;
gtDispTree(node, &indentStack, nullptr, topOnly, isLIR);
}
/*****************************************************************************/
#endif // DEBUG
/*****************************************************************************
*
* Check if the given node can be folded,
* and call the methods to perform the folding
*/
GenTree* Compiler::gtFoldExpr(GenTree* tree)
{
unsigned kind = tree->OperKind();
/* We must have a simple operation to fold */
// If we're in CSE, it's not safe to perform tree
// folding given that it can will potentially
// change considered CSE candidates.
if (optValnumCSE_phase)
{
return tree;
}
if (!(kind & GTK_SMPOP))
{
return tree;
}
GenTree* op1 = tree->AsOp()->gtOp1;
/* Filter out non-foldable trees that can have constant children */
assert(kind & (GTK_UNOP | GTK_BINOP));
switch (tree->gtOper)
{
case GT_RETFILT:
case GT_RETURN:
case GT_IND:
return tree;
default:
break;
}
/* try to fold the current node */
if ((kind & GTK_UNOP) && op1)
{
if (op1->OperIsConst())
{
return gtFoldExprConst(tree);
}
}
else if ((kind & GTK_BINOP) && op1 && tree->AsOp()->gtOp2 &&
// Don't take out conditionals for debugging
(opts.OptimizationEnabled() || !tree->OperIsCompare()))
{
GenTree* op2 = tree->AsOp()->gtOp2;
// The atomic operations are exempted here because they are never computable statically;
// one of their arguments is an address.
if (op1->OperIsConst() && op2->OperIsConst() && !tree->OperIsAtomicOp())
{
/* both nodes are constants - fold the expression */
return gtFoldExprConst(tree);
}
else if (op1->OperIsConst() || op2->OperIsConst())
{
/* at least one is a constant - see if we have a
* special operator that can use only one constant
* to fold - e.g. booleans */
return gtFoldExprSpecial(tree);
}
else if (tree->OperIsCompare())
{
/* comparisons of two local variables can sometimes be folded */
return gtFoldExprCompare(tree);
}
}
/* Return the original node (folded/bashed or not) */
return tree;
}
//------------------------------------------------------------------------
// gtFoldExprCall: see if a call is foldable
//
// Arguments:
// call - call to examine
//
// Returns:
// The original call if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// Checks for calls to Type.op_Equality, Type.op_Inequality, and
// Enum.HasFlag, and if the call is to one of these,
// attempts to optimize.
GenTree* Compiler::gtFoldExprCall(GenTreeCall* call)
{
// Can only fold calls to special intrinsics.
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0)
{
return call;
}
// Defer folding if not optimizing.
if (opts.OptimizationDisabled())
{
return call;
}
// Check for a new-style jit intrinsic.
const NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
switch (ni)
{
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = call->gtCallThisArg->GetNode();
GenTree* flagOp = call->gtCallArgs->GetNode();
GenTree* result = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (result != nullptr)
{
return result;
}
break;
}
case NI_System_Type_op_Equality:
case NI_System_Type_op_Inequality:
{
noway_assert(call->TypeGet() == TYP_INT);
GenTree* op1 = call->gtCallArgs->GetNode();
GenTree* op2 = call->gtCallArgs->GetNext()->GetNode();
// If either operand is known to be a RuntimeType, this can be folded
GenTree* result = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2);
if (result != nullptr)
{
return result;
}
break;
}
default:
break;
}
return call;
}
//------------------------------------------------------------------------
// gtFoldTypeEqualityCall: see if a (potential) type equality call is foldable
//
// Arguments:
// isEq -- is it == or != operator
// op1 -- first argument to call
// op2 -- second argument to call
//
// Returns:
// nulltpr if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// If either operand is known to be a a RuntimeType, then the type
// equality methods will simply check object identity and so we can
// fold the call into a simple compare of the call's operands.
GenTree* Compiler::gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2)
{
if ((gtGetTypeProducerKind(op1) == TPK_Unknown) && (gtGetTypeProducerKind(op2) == TPK_Unknown))
{
return nullptr;
}
const genTreeOps simpleOp = isEq ? GT_EQ : GT_NE;
JITDUMP("\nFolding call to Type:op_%s to a simple compare via %s\n", isEq ? "Equality" : "Inequality",
GenTree::OpName(simpleOp));
GenTree* compare = gtNewOperNode(simpleOp, TYP_INT, op1, op2);
return compare;
}
/*****************************************************************************
*
* Some comparisons can be folded:
*
* locA == locA
* classVarA == classVarA
* locA + locB == locB + locA
*
*/
GenTree* Compiler::gtFoldExprCompare(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
assert(tree->OperIsCompare());
/* Filter out cases that cannot be folded here */
/* Do not fold floats or doubles (e.g. NaN != Nan) */
if (varTypeIsFloating(op1->TypeGet()))
{
return tree;
}
// Currently we can only fold when the two subtrees exactly match
// and everything is side effect free.
//
if (((tree->gtFlags & GTF_SIDE_EFFECT) != 0) || !GenTree::Compare(op1, op2, true))
{
// No folding.
//
return tree;
}
// GTF_ORDER_SIDEEFF here may indicate volatile subtrees.
// Or it may indicate a non-null assertion prop into an indir subtree.
//
// Check the operands.
//
if ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)
{
// If op1 is "volatle" and op2 is not, we can still fold.
//
const bool op1MayBeVolatile = (op1->gtFlags & GTF_ORDER_SIDEEFF) != 0;
const bool op2MayBeVolatile = (op2->gtFlags & GTF_ORDER_SIDEEFF) != 0;
if (!op1MayBeVolatile || op2MayBeVolatile)
{
// No folding.
//
return tree;
}
}
GenTree* cons;
switch (tree->gtOper)
{
case GT_EQ:
case GT_LE:
case GT_GE:
cons = gtNewIconNode(true); /* Folds to GT_CNS_INT(true) */
break;
case GT_NE:
case GT_LT:
case GT_GT:
cons = gtNewIconNode(false); /* Folds to GT_CNS_INT(false) */
break;
default:
assert(!"Unexpected relOp");
return tree;
}
/* The node has beeen folded into 'cons' */
JITDUMP("\nFolding comparison with identical operands:\n");
DISPTREE(tree);
if (fgGlobalMorph)
{
fgMorphTreeDone(cons);
}
else
{
cons->gtNext = tree->gtNext;
cons->gtPrev = tree->gtPrev;
}
JITDUMP("Bashed to %s:\n", cons->AsIntConCommon()->IconValue() ? "true" : "false");
DISPTREE(cons);
return cons;
}
//------------------------------------------------------------------------
// gtCreateHandleCompare: generate a type handle comparison
//
// Arguments:
// oper -- comparison operation (equal/not equal)
// op1 -- first operand
// op2 -- second operand
// typeCheckInliningResult -- indicates how the comparison should happen
//
// Returns:
// Type comparison tree
//
GenTree* Compiler::gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult)
{
// If we can compare pointers directly, just emit the binary operation
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_PASS)
{
return gtNewOperNode(oper, TYP_INT, op1, op2);
}
assert(typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_USE_HELPER);
// Emit a call to a runtime helper
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1, op2);
GenTree* ret = gtNewHelperCallNode(CORINFO_HELP_ARE_TYPES_EQUIVALENT, TYP_INT, helperArgs);
if (oper == GT_EQ)
{
ret = gtNewOperNode(GT_NE, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
else
{
assert(oper == GT_NE);
ret = gtNewOperNode(GT_EQ, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
return ret;
}
//------------------------------------------------------------------------
// gtFoldTypeCompare: see if a type comparison can be further simplified
//
// Arguments:
// tree -- tree possibly comparing types
//
// Returns:
// An alternative tree if folding happens.
// Original tree otherwise.
//
// Notes:
// Checks for
// typeof(...) == obj.GetType()
// typeof(...) == typeof(...)
// obj1.GetType() == obj2.GetType()
//
// And potentially optimizes away the need to obtain actual
// RuntimeType objects to do the comparison.
GenTree* Compiler::gtFoldTypeCompare(GenTree* tree)
{
// Only handle EQ and NE
// (maybe relop vs null someday)
const genTreeOps oper = tree->OperGet();
if ((oper != GT_EQ) && (oper != GT_NE))
{
return tree;
}
// Screen for the right kinds of operands
GenTree* const op1 = tree->AsOp()->gtOp1;
const TypeProducerKind op1Kind = gtGetTypeProducerKind(op1);
if (op1Kind == TPK_Unknown)
{
return tree;
}
GenTree* const op2 = tree->AsOp()->gtOp2;
const TypeProducerKind op2Kind = gtGetTypeProducerKind(op2);
if (op2Kind == TPK_Unknown)
{
return tree;
}
// If both types are created via handles, we can simply compare
// handles instead of the types that they'd create.
if ((op1Kind == TPK_Handle) && (op2Kind == TPK_Handle))
{
JITDUMP("Optimizing compare of types-from-handles to instead compare handles\n");
GenTree* op1ClassFromHandle = tree->AsOp()->gtOp1->AsCall()->gtCallArgs->GetNode();
GenTree* op2ClassFromHandle = tree->AsOp()->gtOp2->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE cls1Hnd = NO_CLASS_HANDLE;
CORINFO_CLASS_HANDLE cls2Hnd = NO_CLASS_HANDLE;
// Try and find class handles from op1 and op2
cls1Hnd = gtGetHelperArgClassHandle(op1ClassFromHandle);
cls2Hnd = gtGetHelperArgClassHandle(op2ClassFromHandle);
// If we have both class handles, try and resolve the type equality test completely.
bool resolveFailed = false;
if ((cls1Hnd != NO_CLASS_HANDLE) && (cls2Hnd != NO_CLASS_HANDLE))
{
JITDUMP("Asking runtime to compare %p (%s) and %p (%s) for equality\n", dspPtr(cls1Hnd),
info.compCompHnd->getClassName(cls1Hnd), dspPtr(cls2Hnd), info.compCompHnd->getClassName(cls2Hnd));
TypeCompareState s = info.compCompHnd->compareTypesForEquality(cls1Hnd, cls2Hnd);
if (s != TypeCompareState::May)
{
// Type comparison result is known.
const bool typesAreEqual = (s == TypeCompareState::Must);
const bool operatorIsEQ = (oper == GT_EQ);
const int compareResult = operatorIsEQ ^ typesAreEqual ? 0 : 1;
JITDUMP("Runtime reports comparison is known at jit time: %u\n", compareResult);
GenTree* result = gtNewIconNode(compareResult);
return result;
}
else
{
resolveFailed = true;
}
}
if (resolveFailed)
{
JITDUMP("Runtime reports comparison is NOT known at jit time\n");
}
else
{
JITDUMP("Could not find handle for %s%s\n", (cls1Hnd == NO_CLASS_HANDLE) ? " cls1" : "",
(cls2Hnd == NO_CLASS_HANDLE) ? " cls2" : "");
}
// We can't answer the equality comparison definitively at jit
// time, but can still simplify the comparison.
//
// Find out how we can compare the two handles.
// NOTE: We're potentially passing NO_CLASS_HANDLE, but the runtime knows what to do with it here.
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(cls1Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
// If the first type needs helper, check the other type: it might be okay with a simple compare.
if (inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER)
{
inliningKind = info.compCompHnd->canInlineTypeCheck(cls2Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
}
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, op1ClassFromHandle, op2ClassFromHandle, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
if ((op1Kind == TPK_GetType) && (op2Kind == TPK_GetType))
{
GenTree* arg1;
if (op1->OperGet() == GT_INTRINSIC)
{
arg1 = op1->AsUnOp()->gtOp1;
}
else
{
arg1 = op1->AsCall()->gtCallThisArg->GetNode();
}
arg1 = gtNewMethodTableLookup(arg1);
GenTree* arg2;
if (op2->OperGet() == GT_INTRINSIC)
{
arg2 = op2->AsUnOp()->gtOp1;
}
else
{
arg2 = op2->AsCall()->gtCallThisArg->GetNode();
}
arg2 = gtNewMethodTableLookup(arg2);
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(nullptr, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, arg1, arg2, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
// If one operand creates a type from a handle and the other operand is fetching the type from an object,
// we can sometimes optimize the type compare into a simpler
// method table comparison.
//
// TODO: if other operand is null...
if (!(((op1Kind == TPK_GetType) && (op2Kind == TPK_Handle)) ||
((op1Kind == TPK_Handle) && (op2Kind == TPK_GetType))))
{
return tree;
}
GenTree* const opHandle = (op1Kind == TPK_Handle) ? op1 : op2;
GenTree* const opOther = (op1Kind == TPK_Handle) ? op2 : op1;
// Tunnel through the handle operand to get at the class handle involved.
GenTree* const opHandleArgument = opHandle->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE clsHnd = gtGetHelperArgClassHandle(opHandleArgument);
// If we couldn't find the class handle, give up.
if (clsHnd == NO_CLASS_HANDLE)
{
return tree;
}
// Ask the VM if this type can be equality tested by a simple method
// table comparison.
CorInfoInlineTypeCheck typeCheckInliningResult =
info.compCompHnd->canInlineTypeCheck(clsHnd, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_NONE)
{
return tree;
}
// We're good to go.
JITDUMP("Optimizing compare of obj.GetType()"
" and type-from-handle to compare method table pointer\n");
// opHandleArgument is the method table we're looking for.
GenTree* const knownMT = opHandleArgument;
// Fetch object method table from the object itself.
GenTree* objOp = nullptr;
// Note we may see intrinsified or regular calls to GetType
if (opOther->OperGet() == GT_INTRINSIC)
{
objOp = opOther->AsUnOp()->gtOp1;
}
else
{
objOp = opOther->AsCall()->gtCallThisArg->GetNode();
}
bool pIsExact = false;
bool pIsNonNull = false;
CORINFO_CLASS_HANDLE objCls = gtGetClassHandle(objOp, &pIsExact, &pIsNonNull);
// if both classes are "final" (e.g. System.String[]) we can replace the comparison
// with `true/false` + null check.
if ((objCls != NO_CLASS_HANDLE) && (pIsExact || impIsClassExact(objCls)))
{
TypeCompareState tcs = info.compCompHnd->compareTypesForEquality(objCls, clsHnd);
if (tcs != TypeCompareState::May)
{
const bool operatorIsEQ = oper == GT_EQ;
const bool typesAreEqual = tcs == TypeCompareState::Must;
GenTree* compareResult = gtNewIconNode((operatorIsEQ ^ typesAreEqual) ? 0 : 1);
if (!pIsNonNull)
{
// we still have to emit a null-check
// obj.GetType == typeof() -> (nullcheck) true/false
GenTree* nullcheck = gtNewNullCheck(objOp, compCurBB);
return gtNewOperNode(GT_COMMA, tree->TypeGet(), nullcheck, compareResult);
}
else if (objOp->gtFlags & GTF_ALL_EFFECT)
{
return gtNewOperNode(GT_COMMA, tree->TypeGet(), objOp, compareResult);
}
else
{
return compareResult;
}
}
}
// Fetch the method table from the object
GenTree* const objMT = gtNewMethodTableLookup(objOp);
// Compare the two method tables
GenTree* const compare = gtCreateHandleCompare(oper, objMT, knownMT, typeCheckInliningResult);
// Drop any now irrelevant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// And we're done
return compare;
}
//------------------------------------------------------------------------
// gtGetHelperArgClassHandle: find the compile time class handle from
// a helper call argument tree
//
// Arguments:
// tree - tree that passes the handle to the helper
//
// Returns:
// The compile time class handle if known.
//
CORINFO_CLASS_HANDLE Compiler::gtGetHelperArgClassHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE result = NO_CLASS_HANDLE;
// Walk through any wrapping nop.
if ((tree->gtOper == GT_NOP) && (tree->gtType == TYP_I_IMPL))
{
tree = tree->AsOp()->gtOp1;
}
// The handle could be a literal constant
if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() == TYP_I_IMPL))
{
assert(tree->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)tree->AsIntCon()->gtCompileTimeHandle;
}
// Or the result of a runtime lookup
else if (tree->OperGet() == GT_RUNTIMELOOKUP)
{
result = tree->AsRuntimeLookup()->GetClassHandle();
}
// Or something reached indirectly
else if (tree->gtOper == GT_IND)
{
// The handle indirs we are looking for will be marked as non-faulting.
// Certain others (eg from refanytype) may not be.
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
GenTree* handleTreeInternal = tree->AsOp()->gtOp1;
if ((handleTreeInternal->OperGet() == GT_CNS_INT) && (handleTreeInternal->TypeGet() == TYP_I_IMPL))
{
// These handle constants should be class handles.
assert(handleTreeInternal->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)handleTreeInternal->AsIntCon()->gtCompileTimeHandle;
}
}
}
return result;
}
//------------------------------------------------------------------------
// gtFoldExprSpecial -- optimize binary ops with one constant operand
//
// Arguments:
// tree - tree to optimize
//
// Return value:
// Tree (possibly modified at root or below), or a new tree
// Any new tree is fully morphed, if necessary.
//
GenTree* Compiler::gtFoldExprSpecial(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
genTreeOps oper = tree->OperGet();
GenTree* op;
GenTree* cons;
ssize_t val;
assert(tree->OperKind() & GTK_BINOP);
/* Filter out operators that cannot be folded here */
if (oper == GT_CAST)
{
return tree;
}
/* We only consider TYP_INT for folding
* Do not fold pointer arithmetic (e.g. addressing modes!) */
if (oper != GT_QMARK && !varTypeIsIntOrI(tree->gtType))
{
return tree;
}
/* Find out which is the constant node */
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
/* Get the constant value */
val = cons->AsIntConCommon()->IconValue();
// Transforms that would drop op cannot be performed if op has side effects
bool opHasSideEffects = (op->gtFlags & GTF_SIDE_EFFECT) != 0;
// Helper function that creates a new IntCon node and morphs it, if required
auto NewMorphedIntConNode = [&](int value) -> GenTreeIntCon* {
GenTreeIntCon* icon = gtNewIconNode(value);
if (fgGlobalMorph)
{
fgMorphTreeDone(icon);
}
return icon;
};
// Here `op` is the non-constant operand, `cons` is the constant operand
// and `val` is the constant value.
switch (oper)
{
case GT_LE:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 <= x) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_GE:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x >= 0) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_LT:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x < 0) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
break;
case GT_GT:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 > x) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
FALLTHROUGH;
case GT_EQ:
case GT_NE:
// Optimize boxed value classes; these are always false. This IL is
// generated when a generic value is tested against null:
// <T> ... foo(T x) { ... if ((object)x == null) ...
if ((val == 0) && op->IsBoxedValue())
{
JITDUMP("\nAttempting to optimize BOX(valueType) %s null [%06u]\n", GenTree::OpName(oper),
dspTreeID(tree));
// We don't expect GT_GT with signed compares, and we
// can't predict the result if we do see it, since the
// boxed object addr could have its high bit set.
if ((oper == GT_GT) && !tree->IsUnsigned())
{
JITDUMP(" bailing; unexpected signed compare via GT_GT\n");
}
else
{
// The tree under the box must be side effect free
// since we will drop it if we optimize.
assert(!gtTreeHasSideEffects(op->AsBox()->BoxOp(), GTF_SIDE_EFFECT));
// See if we can optimize away the box and related statements.
GenTree* boxSourceTree = gtTryRemoveBoxUpstreamEffects(op);
bool didOptimize = (boxSourceTree != nullptr);
// If optimization succeeded, remove the box.
if (didOptimize)
{
// Set up the result of the compare.
int compareResult = 0;
if (oper == GT_GT)
{
// GT_GT(null, box) == false
// GT_GT(box, null) == true
compareResult = (op1 == op);
}
else if (oper == GT_EQ)
{
// GT_EQ(box, null) == false
// GT_EQ(null, box) == false
compareResult = 0;
}
else
{
assert(oper == GT_NE);
// GT_NE(box, null) == true
// GT_NE(null, box) == true
compareResult = 1;
}
JITDUMP("\nSuccess: replacing BOX(valueType) %s null with %d\n", GenTree::OpName(oper),
compareResult);
return NewMorphedIntConNode(compareResult);
}
}
}
else
{
return gtFoldBoxNullable(tree);
}
break;
case GT_ADD:
if (val == 0)
{
goto DONE_FOLD;
}
break;
case GT_MUL:
if (val == 1)
{
goto DONE_FOLD;
}
else if (val == 0)
{
/* Multiply by zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_DIV:
case GT_UDIV:
if ((op2 == cons) && (val == 1) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_SUB:
if ((op2 == cons) && (val == 0) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_AND:
if (val == 0)
{
/* AND with zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
else
{
/* The GTF_BOOLEAN flag is set for nodes that are part
* of a boolean expression, thus all their children
* are known to evaluate to only 0 or 1 */
if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1
* AND with 1 stays the same */
assert(val == 1);
goto DONE_FOLD;
}
}
break;
case GT_OR:
if (val == 0)
{
goto DONE_FOLD;
}
else if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1 - OR with 1 is 1 */
assert(val == 1);
/* OR with one - return the 'one' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
if (val == 0)
{
if (op2 == cons)
{
goto DONE_FOLD;
}
else if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_QMARK:
{
assert(op1 == cons && op2 == op && op2->gtOper == GT_COLON);
assert(op2->AsOp()->gtOp1 && op2->AsOp()->gtOp2);
assert(val == 0 || val == 1);
if (val)
{
op = op2->AsColon()->ThenNode();
}
else
{
op = op2->AsColon()->ElseNode();
}
// Clear colon flags only if the qmark itself is not conditionaly executed
if ((tree->gtFlags & GTF_COLON_COND) == 0)
{
fgWalkTreePre(&op, gtClearColonCond);
}
}
goto DONE_FOLD;
default:
break;
}
/* The node is not foldable */
return tree;
DONE_FOLD:
/* The node has beeen folded into 'op' */
// If there was an assigment update, we just morphed it into
// a use, update the flags appropriately
if (op->gtOper == GT_LCL_VAR)
{
assert(tree->OperIs(GT_ASG) || (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_DEF)) == 0);
op->gtFlags &= ~(GTF_VAR_USEASG | GTF_VAR_DEF);
}
JITDUMP("\nFolding binary operator with a constant operand:\n");
DISPTREE(tree);
JITDUMP("Transformed into:\n");
DISPTREE(op);
return op;
}
//------------------------------------------------------------------------
// gtFoldBoxNullable -- optimize a boxed nullable feeding a compare to zero
//
// Arguments:
// tree - binop tree to potentially optimize, must be
// GT_GT, GT_EQ, or GT_NE
//
// Return value:
// Tree (possibly modified below the root).
//
GenTree* Compiler::gtFoldBoxNullable(GenTree* tree)
{
assert(tree->OperKind() & GTK_BINOP);
assert(tree->OperIs(GT_GT, GT_EQ, GT_NE));
genTreeOps const oper = tree->OperGet();
if ((oper == GT_GT) && !tree->IsUnsigned())
{
return tree;
}
GenTree* const op1 = tree->AsOp()->gtOp1;
GenTree* const op2 = tree->AsOp()->gtOp2;
GenTree* op;
GenTree* cons;
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
ssize_t const val = cons->AsIntConCommon()->IconValue();
if (val != 0)
{
return tree;
}
if (!op->IsCall())
{
return tree;
}
GenTreeCall* const call = op->AsCall();
if (!call->IsHelperCall(this, CORINFO_HELP_BOX_NULLABLE))
{
return tree;
}
JITDUMP("\nAttempting to optimize BOX_NULLABLE(&x) %s null [%06u]\n", GenTree::OpName(oper), dspTreeID(tree));
// Get the address of the struct being boxed
GenTree* const arg = call->gtCallArgs->GetNext()->GetNode();
if (arg->OperIs(GT_ADDR) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
CORINFO_CLASS_HANDLE nullableHnd = gtGetStructHandle(arg->AsOp()->gtOp1);
CORINFO_FIELD_HANDLE fieldHnd = info.compCompHnd->getFieldInClass(nullableHnd, 0);
// Replace the box with an access of the nullable 'hasValue' field.
JITDUMP("\nSuccess: replacing BOX_NULLABLE(&x) [%06u] with x.hasValue\n", dspTreeID(op));
GenTree* newOp = gtNewFieldRef(TYP_BOOL, fieldHnd, arg, 0);
if (op == op1)
{
tree->AsOp()->gtOp1 = newOp;
}
else
{
tree->AsOp()->gtOp2 = newOp;
}
cons->gtType = TYP_INT;
}
return tree;
}
//------------------------------------------------------------------------
// gtTryRemoveBoxUpstreamEffects: given an unused value type box,
// try and remove the upstream allocation and unnecessary parts of
// the copy.
//
// Arguments:
// op - the box node to optimize
// options - controls whether and how trees are modified
// (see notes)
//
// Return Value:
// A tree representing the original value to box, if removal
// is successful/possible (but see note). nullptr if removal fails.
//
// Notes:
// Value typed box gets special treatment because it has associated
// side effects that can be removed if the box result is not used.
//
// By default (options == BR_REMOVE_AND_NARROW) this method will
// try and remove unnecessary trees and will try and reduce remaning
// operations to the minimal set, possibly narrowing the width of
// loads from the box source if it is a struct.
//
// To perform a trial removal, pass BR_DONT_REMOVE. This can be
// useful to determine if this optimization should only be
// performed if some other conditions hold true.
//
// To remove but not alter the access to the box source, pass
// BR_REMOVE_BUT_NOT_NARROW.
//
// To remove and return the tree for the type handle used for
// the boxed newobj, pass BR_REMOVE_BUT_NOT_NARROW_WANT_TYPE_HANDLE.
// This can be useful when the only part of the box that is "live"
// is its type.
//
// If removal fails, is is possible that a subsequent pass may be
// able to optimize. Blocking side effects may now be minimized
// (null or bounds checks might have been removed) or might be
// better known (inline return placeholder updated with the actual
// return expression). So the box is perhaps best left as is to
// help trigger this re-examination.
GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions options)
{
assert(op->IsBoxedValue());
// grab related parts for the optimization
GenTreeBox* box = op->AsBox();
Statement* asgStmt = box->gtAsgStmtWhenInlinedBoxValue;
Statement* copyStmt = box->gtCopyStmtWhenInlinedBoxValue;
JITDUMP("gtTryRemoveBoxUpstreamEffects: %s to %s of BOX (valuetype)"
" [%06u] (assign/newobj " FMT_STMT " copy " FMT_STMT "\n",
(options == BR_DONT_REMOVE) ? "checking if it is possible" : "attempting",
(options == BR_MAKE_LOCAL_COPY) ? "make local unboxed version" : "remove side effects", dspTreeID(op),
asgStmt->GetID(), copyStmt->GetID());
// If we don't recognize the form of the assign, bail.
GenTree* asg = asgStmt->GetRootNode();
if (asg->gtOper != GT_ASG)
{
JITDUMP(" bailing; unexpected assignment op %s\n", GenTree::OpName(asg->gtOper));
return nullptr;
}
// If we're eventually going to return the type handle, remember it now.
GenTree* boxTypeHandle = nullptr;
if ((options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE) || (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE))
{
GenTree* asgSrc = asg->AsOp()->gtOp2;
genTreeOps asgSrcOper = asgSrc->OperGet();
// Allocation may be via AllocObj or via helper call, depending
// on when this is invoked and whether the jit is using AllocObj
// for R2R allocations.
if (asgSrcOper == GT_ALLOCOBJ)
{
GenTreeAllocObj* allocObj = asgSrc->AsAllocObj();
boxTypeHandle = allocObj->AsOp()->gtOp1;
}
else if (asgSrcOper == GT_CALL)
{
GenTreeCall* newobjCall = asgSrc->AsCall();
GenTreeCall::Use* newobjArgs = newobjCall->gtCallArgs;
// In R2R expansions the handle may not be an explicit operand to the helper,
// so we can't remove the box.
if (newobjArgs == nullptr)
{
assert(newobjCall->IsHelperCall(this, CORINFO_HELP_READYTORUN_NEW));
JITDUMP(" bailing; newobj via R2R helper\n");
return nullptr;
}
boxTypeHandle = newobjArgs->GetNode();
}
else
{
unreached();
}
assert(boxTypeHandle != nullptr);
}
// If we don't recognize the form of the copy, bail.
GenTree* copy = copyStmt->GetRootNode();
if (copy->gtOper != GT_ASG)
{
// GT_RET_EXPR is a tolerable temporary failure.
// The jit will revisit this optimization after
// inlining is done.
if (copy->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy %s\n", GenTree::OpName(copy->gtOper));
}
else
{
// Anything else is a missed case we should
// figure out how to handle. One known case
// is GT_COMMAs enclosing the GT_ASG we are
// looking for.
JITDUMP(" bailing; unexpected copy op %s\n", GenTree::OpName(copy->gtOper));
}
return nullptr;
}
// Handle case where we are optimizing the box into a local copy
if (options == BR_MAKE_LOCAL_COPY)
{
// Drill into the box to get at the box temp local and the box type
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
assert(lvaTable[boxTempLcl].lvType == TYP_REF);
CORINFO_CLASS_HANDLE boxClass = lvaTable[boxTempLcl].lvClassHnd;
assert(boxClass != nullptr);
// Verify that the copyDst has the expected shape
// (blk|obj|ind (add (boxTempLcl, ptr-size)))
//
// The shape here is constrained to the patterns we produce
// over in impImportAndPushBox for the inlined box case.
GenTree* copyDst = copy->AsOp()->gtOp1;
if (!copyDst->OperIs(GT_BLK, GT_IND, GT_OBJ))
{
JITDUMP("Unexpected copy dest operator %s\n", GenTree::OpName(copyDst->gtOper));
return nullptr;
}
GenTree* copyDstAddr = copyDst->AsOp()->gtOp1;
if (copyDstAddr->OperGet() != GT_ADD)
{
JITDUMP("Unexpected copy dest address tree\n");
return nullptr;
}
GenTree* copyDstAddrOp1 = copyDstAddr->AsOp()->gtOp1;
if ((copyDstAddrOp1->OperGet() != GT_LCL_VAR) || (copyDstAddrOp1->AsLclVarCommon()->GetLclNum() != boxTempLcl))
{
JITDUMP("Unexpected copy dest address 1st addend\n");
return nullptr;
}
GenTree* copyDstAddrOp2 = copyDstAddr->AsOp()->gtOp2;
if (!copyDstAddrOp2->IsIntegralConst(TARGET_POINTER_SIZE))
{
JITDUMP("Unexpected copy dest address 2nd addend\n");
return nullptr;
}
// Screening checks have all passed. Do the transformation.
//
// Retype the box temp to be a struct
JITDUMP("Retyping box temp V%02u to struct %s\n", boxTempLcl, eeGetClassName(boxClass));
lvaTable[boxTempLcl].lvType = TYP_UNDEF;
const bool isUnsafeValueClass = false;
lvaSetStruct(boxTempLcl, boxClass, isUnsafeValueClass);
var_types boxTempType = lvaTable[boxTempLcl].lvType;
// Remove the newobj and assigment to box temp
JITDUMP("Bashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Update the copy from the value to be boxed to the box temp
GenTree* newDst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
copyDst->AsOp()->gtOp1 = newDst;
// Return the address of the now-struct typed box temp
GenTree* retValue = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
return retValue;
}
// If the copy is a struct copy, make sure we know how to isolate
// any source side effects.
GenTree* copySrc = copy->AsOp()->gtOp2;
// If the copy source is from a pending inline, wait for it to resolve.
if (copySrc->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy source %s\n", GenTree::OpName(copySrc->gtOper));
return nullptr;
}
bool hasSrcSideEffect = false;
bool isStructCopy = false;
if (gtTreeHasSideEffects(copySrc, GTF_SIDE_EFFECT))
{
hasSrcSideEffect = true;
if (varTypeIsStruct(copySrc->gtType))
{
isStructCopy = true;
if ((copySrc->gtOper != GT_OBJ) && (copySrc->gtOper != GT_IND) && (copySrc->gtOper != GT_FIELD))
{
// We don't know how to handle other cases, yet.
JITDUMP(" bailing; unexpected copy source struct op with side effect %s\n",
GenTree::OpName(copySrc->gtOper));
return nullptr;
}
}
}
// If this was a trial removal, we're done.
if (options == BR_DONT_REMOVE)
{
return copySrc;
}
if (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
// Otherwise, proceed with the optimization.
//
// Change the assignment expression to a NOP.
JITDUMP("\nBashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Change the copy expression so it preserves key
// source side effects.
JITDUMP("\nBashing COPY [%06u]", dspTreeID(copy));
if (!hasSrcSideEffect)
{
// If there were no copy source side effects just bash
// the copy to a NOP.
copy->gtBashToNOP();
JITDUMP(" to NOP; no source side effects.\n");
}
else if (!isStructCopy)
{
// For scalar types, go ahead and produce the
// value as the copy is fairly cheap and likely
// the optimizer can trim things down to just the
// minimal side effect parts.
copyStmt->SetRootNode(copySrc);
JITDUMP(" to scalar read via [%06u]\n", dspTreeID(copySrc));
}
else
{
// For struct types read the first byte of the
// source struct; there's no need to read the
// entire thing, and no place to put it.
assert(copySrc->OperIs(GT_OBJ, GT_IND, GT_FIELD));
copyStmt->SetRootNode(copySrc);
if (options == BR_REMOVE_AND_NARROW || options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
JITDUMP(" to read first byte of struct via modified [%06u]\n", dspTreeID(copySrc));
gtChangeOperToNullCheck(copySrc, compCurBB);
}
else
{
JITDUMP(" to read entire struct via modified [%06u]\n", dspTreeID(copySrc));
}
}
if (fgStmtListThreaded)
{
fgSetStmtSeq(asgStmt);
fgSetStmtSeq(copyStmt);
}
// Box effects were successfully optimized.
if (options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
else
{
return copySrc;
}
}
//------------------------------------------------------------------------
// gtOptimizeEnumHasFlag: given the operands for a call to Enum.HasFlag,
// try and optimize the call to a simple and/compare tree.
//
// Arguments:
// thisOp - first argument to the call
// flagOp - second argument to the call
//
// Return Value:
// A new cmp/amd tree if successful. nullptr on failure.
//
// Notes:
// If successful, may allocate new temps and modify connected
// statements.
GenTree* Compiler::gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp)
{
JITDUMP("Considering optimizing call to Enum.HasFlag....\n");
// Operands must be boxes
if (!thisOp->IsBoxedValue() || !flagOp->IsBoxedValue())
{
JITDUMP("bailing, need both inputs to be BOXes\n");
return nullptr;
}
// Operands must have same type
bool isExactThis = false;
bool isNonNullThis = false;
CORINFO_CLASS_HANDLE thisHnd = gtGetClassHandle(thisOp, &isExactThis, &isNonNullThis);
if (thisHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'this' operand\n");
return nullptr;
}
// A boxed thisOp should have exact type and non-null instance
assert(isExactThis);
assert(isNonNullThis);
bool isExactFlag = false;
bool isNonNullFlag = false;
CORINFO_CLASS_HANDLE flagHnd = gtGetClassHandle(flagOp, &isExactFlag, &isNonNullFlag);
if (flagHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'flag' operand\n");
return nullptr;
}
// A boxed flagOp should have exact type and non-null instance
assert(isExactFlag);
assert(isNonNullFlag);
if (flagHnd != thisHnd)
{
JITDUMP("bailing, operand types differ\n");
return nullptr;
}
// If we have a shared type instance we can't safely check type
// equality, so bail.
DWORD classAttribs = info.compCompHnd->getClassAttribs(thisHnd);
if (classAttribs & CORINFO_FLG_SHAREDINST)
{
JITDUMP("bailing, have shared instance type\n");
return nullptr;
}
// Simulate removing the box for thisOP. We need to know that it can
// be safely removed before we can optimize.
GenTree* thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_DONT_REMOVE);
if (thisVal == nullptr)
{
// Note we may fail here if the this operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'this' operand\n");
return nullptr;
}
// Do likewise with flagOp.
GenTree* flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_DONT_REMOVE);
if (flagVal == nullptr)
{
// Note we may fail here if the flag operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'flag' operand\n");
return nullptr;
}
// Only proceed when both box sources have the same actual type.
// (this rules out long/int mismatches)
if (genActualType(thisVal->TypeGet()) != genActualType(flagVal->TypeGet()))
{
JITDUMP("bailing, pre-boxed values have different types\n");
return nullptr;
}
// Yes, both boxes can be cleaned up. Optimize.
JITDUMP("Optimizing call to Enum.HasFlag\n");
// Undo the boxing of the Ops and prepare to operate directly
// on the pre-boxed values.
thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_REMOVE_BUT_NOT_NARROW);
flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_REMOVE_BUT_NOT_NARROW);
// Our trial removals above should guarantee successful removals here.
assert(thisVal != nullptr);
assert(flagVal != nullptr);
assert(genActualType(thisVal->TypeGet()) == genActualType(flagVal->TypeGet()));
// Type to use for optimized check
var_types type = genActualType(thisVal->TypeGet());
// The thisVal and flagVal trees come from earlier statements.
//
// Unless they are invariant values, we need to evaluate them both
// to temps at those points to safely transmit the values here.
//
// Also we need to use the flag twice, so we need two trees for it.
GenTree* thisValOpt = nullptr;
GenTree* flagValOpt = nullptr;
GenTree* flagValOptCopy = nullptr;
if (thisVal->IsIntegralConst())
{
thisValOpt = gtClone(thisVal);
assert(thisValOpt != nullptr);
}
else
{
const unsigned thisTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag this temp"));
GenTree* thisAsg = gtNewTempAssign(thisTmp, thisVal);
Statement* thisAsgStmt = thisOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
thisAsgStmt->SetRootNode(thisAsg);
thisValOpt = gtNewLclvNode(thisTmp, type);
}
if (flagVal->IsIntegralConst())
{
flagValOpt = gtClone(flagVal);
assert(flagValOpt != nullptr);
flagValOptCopy = gtClone(flagVal);
assert(flagValOptCopy != nullptr);
}
else
{
const unsigned flagTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag flag temp"));
GenTree* flagAsg = gtNewTempAssign(flagTmp, flagVal);
Statement* flagAsgStmt = flagOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
flagAsgStmt->SetRootNode(flagAsg);
flagValOpt = gtNewLclvNode(flagTmp, type);
flagValOptCopy = gtNewLclvNode(flagTmp, type);
}
// Turn the call into (thisValTmp & flagTmp) == flagTmp.
GenTree* andTree = gtNewOperNode(GT_AND, type, thisValOpt, flagValOpt);
GenTree* cmpTree = gtNewOperNode(GT_EQ, TYP_INT, andTree, flagValOptCopy);
JITDUMP("Optimized call to Enum.HasFlag\n");
return cmpTree;
}
/*****************************************************************************
*
* Fold the given constant tree.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::gtFoldExprConst(GenTree* tree)
{
SSIZE_T i1, i2, itemp;
INT64 lval1, lval2, ltemp;
float f1, f2;
double d1, d2;
var_types switchType;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
assert(tree->OperIsUnary() || tree->OperIsBinary());
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2IfPresent();
if (!opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
return tree;
}
if (tree->OperIs(GT_NOP, GT_ALLOCOBJ, GT_RUNTIMELOOKUP))
{
return tree;
}
// This condition exists to preserve previous behavior.
// TODO-CQ: enable folding for bounds checks nodes.
if (tree->OperIs(GT_BOUNDS_CHECK))
{
return tree;
}
#ifdef FEATURE_SIMD
if (tree->OperIs(GT_SIMD))
{
return tree;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (tree->OperIs(GT_HWINTRINSIC))
{
return tree;
}
#endif
if (tree->OperIsUnary())
{
assert(op1->OperIsConst());
switch (op1->TypeGet())
{
case TYP_INT:
// Fold constant INT unary operator.
if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = (INT32)op1->AsIntCon()->IconValue();
// If we fold a unary oper, then the folded constant
// is considered a ConstantIndexField if op1 was one.
if ((op1->AsIntCon()->gtFieldSeq != nullptr) && op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
switch (tree->OperGet())
{
case GT_NOT:
i1 = ~i1;
break;
case GT_NEG:
i1 = -i1;
break;
case GT_BSWAP:
i1 = ((i1 >> 24) & 0xFF) | ((i1 >> 8) & 0xFF00) | ((i1 << 8) & 0xFF0000) |
((i1 << 24) & 0xFF000000);
break;
case GT_BSWAP16:
i1 = ((i1 >> 8) & 0xFF) | ((i1 << 8) & 0xFF00);
break;
case GT_CAST:
// assert (genActualType(tree->CastToType()) == tree->TypeGet());
if (tree->gtOverflow() &&
CheckedOps::CastFromIntOverflows((INT32)i1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(i1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(i1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(i1));
goto CNS_INT;
case TYP_BOOL:
case TYP_UBYTE:
i1 = INT32(UINT8(i1));
goto CNS_INT;
case TYP_UINT:
case TYP_INT:
goto CNS_INT;
case TYP_ULONG:
if (tree->IsUnsigned())
{
lval1 = UINT64(UINT32(i1));
}
else
{
lval1 = UINT64(INT32(i1));
}
goto CNS_LONG;
case TYP_LONG:
if (tree->IsUnsigned())
{
lval1 = INT64(UINT32(i1));
}
else
{
lval1 = INT64(INT32(i1));
}
goto CNS_LONG;
case TYP_FLOAT:
if (tree->IsUnsigned())
{
f1 = forceCastToFloat(UINT32(i1));
}
else
{
f1 = forceCastToFloat(INT32(i1));
}
d1 = f1;
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (tree->IsUnsigned())
{
d1 = (double)UINT32(i1);
}
else
{
d1 = (double)INT32(i1);
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from int");
return tree;
}
default:
return tree;
}
goto CNS_INT;
case TYP_LONG:
// Fold constant LONG unary operator.
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
switch (tree->OperGet())
{
case GT_NOT:
lval1 = ~lval1;
break;
case GT_NEG:
lval1 = -lval1;
break;
case GT_BSWAP:
lval1 = ((lval1 >> 56) & 0xFF) | ((lval1 >> 40) & 0xFF00) | ((lval1 >> 24) & 0xFF0000) |
((lval1 >> 8) & 0xFF000000) | ((lval1 << 8) & 0xFF00000000) |
((lval1 << 24) & 0xFF0000000000) | ((lval1 << 40) & 0xFF000000000000) |
((lval1 << 56) & 0xFF00000000000000);
break;
case GT_CAST:
assert(tree->TypeIs(genActualType(tree->CastToType())));
if (tree->gtOverflow() &&
CheckedOps::CastFromLongOverflows(lval1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(lval1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(lval1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(lval1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(lval1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(lval1);
goto CNS_INT;
case TYP_UINT:
i1 = UINT32(lval1);
goto CNS_INT;
case TYP_ULONG:
case TYP_LONG:
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->IsUnsigned() && (lval1 < 0))
{
d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
}
else
{
d1 = (double)lval1;
}
if (tree->CastToType() == TYP_FLOAT)
{
f1 = forceCastToFloat(d1); // truncate precision
d1 = f1;
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from long");
return tree;
}
default:
return tree;
}
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
assert(op1->OperIs(GT_CNS_DBL));
// Fold constant DOUBLE unary operator.
d1 = op1->AsDblCon()->gtDconVal;
switch (tree->OperGet())
{
case GT_NEG:
d1 = -d1;
break;
case GT_CAST:
f1 = forceCastToFloat(d1);
if ((op1->TypeIs(TYP_DOUBLE) && CheckedOps::CastFromDoubleOverflows(d1, tree->CastToType())) ||
(op1->TypeIs(TYP_FLOAT) && CheckedOps::CastFromFloatOverflows(f1, tree->CastToType())))
{
// The conversion overflows. The ECMA spec says, in III 3.27, that
// "...if overflow occurs converting a floating point type to an integer, ...,
// the value returned is unspecified." However, it would at least be
// desirable to have the same value returned for casting an overflowing
// constant to an int as would be obtained by passing that constant as
// a parameter and then casting that parameter to an int type.
// Don't fold overflowing converions, as the value returned by
// JIT's codegen doesn't always match with the C compiler's cast result.
// We want the behavior to be the same with or without folding.
return tree;
}
assert(tree->TypeIs(genActualType(tree->CastToType())));
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(d1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(d1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(d1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(d1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(d1);
goto CNS_INT;
case TYP_UINT:
i1 = forceCastToUInt32(d1);
goto CNS_INT;
case TYP_LONG:
lval1 = INT64(d1);
goto CNS_LONG;
case TYP_ULONG:
lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
goto CNS_LONG;
case TYP_FLOAT:
d1 = forceCastToFloat(d1);
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (op1->TypeIs(TYP_FLOAT))
{
d1 = forceCastToFloat(d1); // Truncate precision.
}
goto CNS_DOUBLE; // Redundant cast.
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from double/float");
break;
}
return tree;
default:
return tree;
}
goto CNS_DOUBLE;
default:
// Not a foldable typ - e.g. RET const.
return tree;
}
}
// We have a binary operator.
assert(tree->OperIsBinary());
assert(op2 != nullptr);
assert(op1->OperIsConst());
assert(op2->OperIsConst());
if (tree->OperIs(GT_COMMA))
{
return op2;
}
switchType = op1->TypeGet();
// Normally we will just switch on op1 types, but for the case where
// only op2 is a GC type and op1 is not a GC type, we use the op2 type.
// This makes us handle this as a case of folding for GC type.
if (varTypeIsGC(op2->gtType) && !varTypeIsGC(op1->gtType))
{
switchType = op2->TypeGet();
}
switch (switchType)
{
// Fold constant REF of BYREF binary operator.
// These can only be comparisons or null pointers.
case TYP_REF:
// String nodes are an RVA at this point.
if (op1->OperIs(GT_CNS_STR) || op2->OperIs(GT_CNS_STR))
{
// Fold "ldstr" ==/!= null.
if (op2->IsIntegralConst(0))
{
if (tree->OperIs(GT_EQ))
{
i1 = 0;
goto FOLD_COND;
}
if (tree->OperIs(GT_NE) || (tree->OperIs(GT_GT) && tree->IsUnsigned()))
{
i1 = 1;
goto FOLD_COND;
}
}
return tree;
}
FALLTHROUGH;
case TYP_BYREF:
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (i1 == i2);
goto FOLD_COND;
case GT_NE:
i1 = (i1 != i2);
goto FOLD_COND;
case GT_ADD:
noway_assert(!tree->TypeIs(TYP_REF));
// We only fold a GT_ADD that involves a null reference.
if ((op1->TypeIs(TYP_REF) && (i1 == 0)) || (op2->TypeIs(TYP_REF) && (i2 == 0)))
{
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Fold into GT_IND of null byref.
tree->BashToConst(0, TYP_BYREF);
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("\nFolded to null byref:\n");
DISPTREE(tree);
goto DONE;
}
break;
default:
break;
}
return tree;
// Fold constant INT binary operator.
case TYP_INT:
assert(tree->TypeIs(TYP_INT) || varTypeIsGC(tree) || tree->OperIs(GT_MKREFANY));
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (INT32(i1) == INT32(i2));
break;
case GT_NE:
i1 = (INT32(i1) != INT32(i2));
break;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) < UINT32(i2));
}
else
{
i1 = (INT32(i1) < INT32(i2));
}
break;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) <= UINT32(i2));
}
else
{
i1 = (INT32(i1) <= INT32(i2));
}
break;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) >= UINT32(i2));
}
else
{
i1 = (INT32(i1) >= INT32(i2));
}
break;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) > UINT32(i2));
}
else
{
i1 = (INT32(i1) > INT32(i2));
}
break;
case GT_ADD:
itemp = i1 + i2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
break;
case GT_SUB:
itemp = i1 - i2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
break;
case GT_MUL:
itemp = i1 * i2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
// For the very particular case of the "constant array index" pseudo-field, we
// assume that multiplication is by the field width, and preserves that field.
// This could obviously be made more robust by a more complicated set of annotations...
if ((op1->AsIntCon()->gtFieldSeq != nullptr) &&
op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
else if ((op2->AsIntCon()->gtFieldSeq != nullptr) &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op1->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op2->AsIntCon()->gtFieldSeq;
}
i1 = itemp;
break;
case GT_OR:
i1 |= i2;
break;
case GT_XOR:
i1 ^= i2;
break;
case GT_AND:
i1 &= i2;
break;
case GT_LSH:
i1 <<= (i2 & 0x1f);
break;
case GT_RSH:
i1 >>= (i2 & 0x1f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
i1 = UINT32(i1) >> (i2 & 0x1f);
break;
case GT_ROL:
i1 = (i1 << (i2 & 0x1f)) | (UINT32(i1) >> ((32 - i2) & 0x1f));
break;
case GT_ROR:
i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f));
break;
// DIV and MOD can throw an exception - if the division is by 0
// or there is overflow - when dividing MIN by -1.
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (INT32(i2) == 0)
{
// Division by zero.
// We have to evaluate this expression and throw an exception.
return tree;
}
else if ((INT32(i2) == -1) && (UINT32(i1) == 0x80000000))
{
// Overflow Division.
// We have to evaluate this expression and throw an exception.
return tree;
}
if (tree->OperIs(GT_DIV))
{
i1 = INT32(i1) / INT32(i2);
}
else if (tree->OperIs(GT_MOD))
{
i1 = INT32(i1) % INT32(i2);
}
else if (tree->OperIs(GT_UDIV))
{
i1 = UINT32(i1) / UINT32(i2);
}
else
{
assert(tree->OperIs(GT_UMOD));
i1 = UINT32(i1) % UINT32(i2);
}
break;
default:
return tree;
}
// We get here after folding to a GT_CNS_INT type.
// change the node to the new type / value and make sure the node sizes are OK.
CNS_INT:
FOLD_COND:
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Also all conditional folding jumps here since the node hanging from
// GT_JTRUE has to be a GT_CNS_INT - value 0 or 1.
// Some operations are performed as 64 bit instead of 32 bit so the upper 32 bits
// need to be discarded. Since constant values are stored as ssize_t and the node
// has TYP_INT the result needs to be sign extended rather than zero extended.
tree->BashToConst(static_cast<int>(i1));
tree->AsIntCon()->gtFieldSeq = fieldSeq;
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to int constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant LONG binary operator.
case TYP_LONG:
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
// op1 is known to be a TYP_LONG, op2 is normally a TYP_LONG, unless we have a shift operator in which case
// it is a TYP_INT.
assert(op2->TypeIs(TYP_LONG, TYP_INT));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
// For the shift operators we can have a op2 that is a TYP_INT.
// Thus we cannot just use LngValue(), as it will assert on 32 bit if op2 is not GT_CNS_LNG.
lval2 = op2->AsIntConCommon()->IntegralValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (lval1 == lval2);
goto FOLD_COND;
case GT_NE:
i1 = (lval1 != lval2);
goto FOLD_COND;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) < UINT64(lval2));
}
else
{
i1 = (lval1 < lval2);
}
goto FOLD_COND;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) <= UINT64(lval2));
}
else
{
i1 = (lval1 <= lval2);
}
goto FOLD_COND;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) >= UINT64(lval2));
}
else
{
i1 = (lval1 >= lval2);
}
goto FOLD_COND;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) > UINT64(lval2));
}
else
{
i1 = (lval1 > lval2);
}
goto FOLD_COND;
case GT_ADD:
ltemp = lval1 + lval2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
#ifdef TARGET_64BIT
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
#endif
break;
case GT_SUB:
ltemp = lval1 - lval2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_MUL:
ltemp = lval1 * lval2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_OR:
lval1 |= lval2;
break;
case GT_XOR:
lval1 ^= lval2;
break;
case GT_AND:
lval1 &= lval2;
break;
case GT_LSH:
lval1 <<= (lval2 & 0x3f);
break;
case GT_RSH:
lval1 >>= (lval2 & 0x3f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
lval1 = UINT64(lval1) >> (lval2 & 0x3f);
break;
case GT_ROL:
lval1 = (lval1 << (lval2 & 0x3f)) | (UINT64(lval1) >> ((64 - lval2) & 0x3f));
break;
case GT_ROR:
lval1 = (lval1 << ((64 - lval2) & 0x3f)) | (UINT64(lval1) >> (lval2 & 0x3f));
break;
// Both DIV and IDIV on x86 raise an exception for min_int (and min_long) / -1. So we preserve
// that behavior here.
case GT_DIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 /= lval2;
break;
case GT_MOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 %= lval2;
break;
case GT_UDIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) / UINT64(lval2);
break;
case GT_UMOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) % UINT64(lval2);
break;
default:
return tree;
}
CNS_LONG:
#if !defined(TARGET_64BIT)
if (fieldSeq != FieldSeqStore::NotAField())
{
assert(!"Field sequences on CNS_LNG nodes!?");
return tree;
}
#endif // !defined(TARGET_64BIT)
JITDUMP("\nFolding long operator with constant nodes into a constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(lval1);
#ifdef TARGET_64BIT
tree->AsIntCon()->gtFieldSeq = fieldSeq;
#endif
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to long constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant FLOAT or DOUBLE binary operator
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->gtOverflowEx())
{
return tree;
}
assert(op1->OperIs(GT_CNS_DBL));
d1 = op1->AsDblCon()->gtDconVal;
assert(varTypeIsFloating(op2->TypeGet()));
assert(op2->OperIs(GT_CNS_DBL));
d2 = op2->AsDblCon()->gtDconVal;
// Special case - check if we have NaN operands.
// For comparisons if not an unordered operation always return 0.
// For unordered operations (i.e. the GTF_RELOP_NAN_UN flag is set)
// the result is always true - return 1.
if (_isnan(d1) || _isnan(d2))
{
JITDUMP("Double operator(s) is NaN\n");
if (tree->OperIsCompare())
{
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
// Unordered comparison with NaN always succeeds.
i1 = 1;
goto FOLD_COND;
}
else
{
// Normal comparison with NaN always fails.
i1 = 0;
goto FOLD_COND;
}
}
}
switch (tree->OperGet())
{
case GT_EQ:
i1 = (d1 == d2);
goto FOLD_COND;
case GT_NE:
i1 = (d1 != d2);
goto FOLD_COND;
case GT_LT:
i1 = (d1 < d2);
goto FOLD_COND;
case GT_LE:
i1 = (d1 <= d2);
goto FOLD_COND;
case GT_GE:
i1 = (d1 >= d2);
goto FOLD_COND;
case GT_GT:
i1 = (d1 > d2);
goto FOLD_COND;
// Floating point arithmetic should be done in declared
// precision while doing constant folding. For this reason though TYP_FLOAT
// constants are stored as double constants, while performing float arithmetic,
// double constants should be converted to float. Here is an example case
// where performing arithmetic in double precision would lead to incorrect
// results.
//
// Example:
// float a = float.MaxValue;
// float b = a*a; This will produce +inf in single precision and 1.1579207543382391e+077 in double
// precision.
// flaot c = b/b; This will produce NaN in single precision and 1 in double precision.
case GT_ADD:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 + f2);
}
else
{
d1 += d2;
}
break;
case GT_SUB:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 - f2);
}
else
{
d1 -= d2;
}
break;
case GT_MUL:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 * f2);
}
else
{
d1 *= d2;
}
break;
case GT_DIV:
// We do not fold division by zero, even for floating point.
// This is because the result will be platform-dependent for an expression like 0d / 0d.
if (d2 == 0)
{
return tree;
}
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 / f2);
}
else
{
d1 /= d2;
}
break;
default:
return tree;
}
CNS_DOUBLE:
JITDUMP("\nFolding fp operator with constant nodes into a fp constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_DBL] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(d1, tree->TypeGet());
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to fp constant:\n");
DISPTREE(tree);
goto DONE;
default:
// Not a foldable type.
return tree;
}
DONE:
// Make sure no side effect flags are set on this constant node.
tree->gtFlags &= ~GTF_ALL_EFFECT;
return tree;
INTEGRAL_OVF:
// This operation is going to cause an overflow exception. Morph into
// an overflow helper. Put a dummy constant value for code generation.
//
// We could remove all subsequent trees in the current basic block,
// unless this node is a child of GT_COLON
//
// NOTE: Since the folded value is not constant we should not change the
// "tree" node - otherwise we confuse the logic that checks if the folding
// was successful - instead use one of the operands, e.g. op1.
// Don't fold overflow operations if not global morph phase.
// The reason for this is that this optimization is replacing a gentree node
// with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
// involving overflow arithmetic. During assertion prop, it is possible
// that the 'arg' could be constant folded and the result could lead to an
// overflow. In such a case 'arg' will get replaced with GT_COMMA node
// but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
// update args table. For this reason this optimization is enabled only
// for global morphing phase.
//
// TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
if (!fgGlobalMorph)
{
assert(tree->gtOverflow());
return tree;
}
var_types type = genActualType(tree->TypeGet());
op1 = type == TYP_LONG ? gtNewLconNode(0) : gtNewIconNode(0);
if (vnStore != nullptr)
{
op1->gtVNPair.SetBoth(vnStore->VNZeroForType(type));
}
JITDUMP("\nFolding binary operator with constant nodes into a comma throw:\n");
DISPTREE(tree);
// We will change the cast to a GT_COMMA and attach the exception helper as AsOp()->gtOp1.
// The constant expression zero becomes op2.
assert(tree->gtOverflow());
assert(tree->OperIs(GT_ADD, GT_SUB, GT_CAST, GT_MUL));
assert(op1 != nullptr);
op2 = op1;
op1 = gtNewHelperCallNode(CORINFO_HELP_OVERFLOW, TYP_VOID, gtNewCallArgs(gtNewIconNode(compCurBB->bbTryIndex)));
// op1 is a call to the JIT helper that throws an Overflow exception.
// Attach the ExcSet for VNF_OverflowExc(Void) to this call.
if (vnStore != nullptr)
{
op1->gtVNPair = vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc,
vnStore->VNPForVoid())));
}
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), op1, op2);
return tree;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//------------------------------------------------------------------------
// gtNewTempAssign: Create an assignment of the given value to a temp.
//
// Arguments:
// tmp - local number for a compiler temp
// val - value to assign to the temp
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// Normally a new assignment node.
// However may return a nop node if val is simply a reference to the temp.
//
// Notes:
// Self-assignments may be represented via NOPs.
//
// May update the type of the temp, if it was previously unknown.
//
// May set compFloatingPointUsed.
GenTree* Compiler::gtNewTempAssign(
unsigned tmp, GenTree* val, Statement** pAfterStmt, const DebugInfo& di, BasicBlock* block)
{
// Self-assignment is a nop.
if (val->OperGet() == GT_LCL_VAR && val->AsLclVarCommon()->GetLclNum() == tmp)
{
return gtNewNothingNode();
}
LclVarDsc* varDsc = lvaGetDesc(tmp);
if (varDsc->TypeGet() == TYP_I_IMPL && val->TypeGet() == TYP_BYREF)
{
impBashVarAddrsToI(val);
}
var_types valTyp = val->TypeGet();
if (val->OperGet() == GT_LCL_VAR && lvaTable[val->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
valTyp = lvaGetRealType(val->AsLclVar()->GetLclNum());
val->gtType = valTyp;
}
var_types dstTyp = varDsc->TypeGet();
/* If the variable's lvType is not yet set then set it here */
if (dstTyp == TYP_UNDEF)
{
varDsc->lvType = dstTyp = genActualType(valTyp);
#if FEATURE_SIMD
if (varTypeIsSIMD(dstTyp))
{
varDsc->lvSIMDType = 1;
}
#endif
}
#ifdef DEBUG
// Make sure the actual types match.
if (genActualType(valTyp) != genActualType(dstTyp))
{
// Plus some other exceptions that are apparently legal:
// 1) TYP_REF or BYREF = TYP_I_IMPL
bool ok = false;
if (varTypeIsGC(dstTyp) && (valTyp == TYP_I_IMPL))
{
ok = true;
}
// 2) TYP_DOUBLE = TYP_FLOAT or TYP_FLOAT = TYP_DOUBLE
else if (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))
{
ok = true;
}
// 3) TYP_BYREF = TYP_REF when object stack allocation is enabled
else if (JitConfig.JitObjectStackAllocation() && (dstTyp == TYP_BYREF) && (valTyp == TYP_REF))
{
ok = true;
}
else if (!varTypeIsGC(dstTyp) && (genTypeSize(valTyp) == genTypeSize(dstTyp)))
{
// We can have assignments that require a change of register file, e.g. for arguments
// and call returns. Lowering and Codegen will handle these.
ok = true;
}
else if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_INT))
{
// It could come from `ASG(struct, 0)` that was propagated to `RETURN struct(0)`,
// and now it is merging to a struct again.
assert(tmp == genReturnLocal);
ok = true;
}
else if (varTypeIsSIMD(dstTyp) && (valTyp == TYP_STRUCT))
{
assert(val->IsCall());
ok = true;
}
if (!ok)
{
gtDispTree(val);
assert(!"Incompatible types for gtNewTempAssign");
}
}
#endif
// Added this noway_assert for runtime\issue 44895, to protect against silent bad codegen
//
if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_REF))
{
noway_assert(!"Incompatible types for gtNewTempAssign");
}
// Floating Point assignments can be created during inlining
// see "Zero init inlinee locals:" in fgInlinePrependStatements
// thus we may need to set compFloatingPointUsed to true here.
//
if (varTypeUsesFloatReg(dstTyp) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
/* Create the assignment node */
GenTree* asg;
GenTree* dest = gtNewLclvNode(tmp, dstTyp);
dest->gtFlags |= GTF_VAR_DEF;
// With first-class structs, we should be propagating the class handle on all non-primitive
// struct types. We don't have a convenient way to do that for all SIMD temps, since some
// internal trees use SIMD types that are not used by the input IL. In this case, we allow
// a null type handle and derive the necessary information about the type from its varType.
CORINFO_CLASS_HANDLE valStructHnd = gtGetStructHandleIfPresent(val);
if (varTypeIsStruct(varDsc) && (valStructHnd == NO_CLASS_HANDLE) && !varTypeIsSIMD(valTyp))
{
// There are 2 special cases:
// 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields,
// the field was transformed as IND opr GT_LCL_FLD;
// 2. we are propagation `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
// in these cases, we can use the type of the merge return for the assignment.
assert(val->gtEffectiveVal(true)->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT));
assert(tmp == genReturnLocal);
valStructHnd = lvaGetStruct(genReturnLocal);
assert(valStructHnd != NO_CLASS_HANDLE);
}
if ((valStructHnd != NO_CLASS_HANDLE) && val->IsConstInitVal())
{
asg = gtNewAssignNode(dest, val);
}
else if (varTypeIsStruct(varDsc) && ((valStructHnd != NO_CLASS_HANDLE) || varTypeIsSIMD(valTyp)))
{
// The struct value may be be a child of a GT_COMMA due to explicit null checks of indirs/fields.
GenTree* valx = val->gtEffectiveVal(/*commaOnly*/ true);
if (valStructHnd != NO_CLASS_HANDLE)
{
lvaSetStruct(tmp, valStructHnd, false);
}
else
{
assert(valx->gtOper != GT_OBJ);
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, di, block);
}
else
{
// We may have a scalar type variable assigned a struct value, e.g. a 'genReturnLocal'
// when the ABI calls for returning a struct as a primitive type.
// TODO-1stClassStructs: When we stop "lying" about the types for ABI purposes, the
// 'genReturnLocal' should be the original struct type.
assert(!varTypeIsStruct(valTyp) || ((valStructHnd != NO_CLASS_HANDLE) &&
(typGetObjLayout(valStructHnd)->GetSize() == genTypeSize(varDsc))));
asg = gtNewAssignNode(dest, val);
}
if (compRationalIRForm)
{
Rationalizer::RewriteAssignmentIntoStoreLcl(asg->AsOp());
}
return asg;
}
/*****************************************************************************
*
* Create a helper call to access a COM field (iff 'assg' is non-zero this is
* an assignment and 'assg' is the new value).
*/
GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg)
{
assert(pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_ADDR_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDR_HELPER);
/* If we can't access it directly, we need to call a helper function */
GenTreeCall::Use* args = nullptr;
var_types helperType = TYP_BYREF;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_SET)
{
assert(assg != nullptr);
// helper needs pointer to struct, not struct itself
if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(structType != nullptr);
assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true);
}
else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT)
{
assg = gtNewCastNode(TYP_DOUBLE, assg, false, TYP_DOUBLE);
}
else if (lclTyp == TYP_FLOAT && assg->TypeGet() == TYP_DOUBLE)
{
assg = gtNewCastNode(TYP_FLOAT, assg, false, TYP_FLOAT);
}
args = gtNewCallArgs(assg);
helperType = TYP_VOID;
}
else if (access & CORINFO_ACCESS_GET)
{
helperType = lclTyp;
// The calling convention for the helper does not take into
// account optimization of primitive structs.
if ((pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT) && !varTypeIsStruct(lclTyp))
{
helperType = TYP_STRUCT;
}
}
}
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT || pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(pFieldInfo->structType != nullptr);
args = gtPrependNewCallArg(gtNewIconEmbClsHndNode(pFieldInfo->structType), args);
}
GenTree* fieldHnd = impTokenToHandle(pResolvedToken);
if (fieldHnd == nullptr)
{ // compDonotInline()
return nullptr;
}
args = gtPrependNewCallArg(fieldHnd, args);
// If it's a static field, we shouldn't have an object node
// If it's an instance field, we have an object node
assert((pFieldInfo->fieldAccessor != CORINFO_FIELD_STATIC_ADDR_HELPER) ^ (objPtr == nullptr));
if (objPtr != nullptr)
{
args = gtPrependNewCallArg(objPtr, args);
}
GenTreeCall* call = gtNewHelperCallNode(pFieldInfo->helper, genActualType(helperType), args);
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(call))
{
call->InitializeStructReturnType(this, structType, call->GetUnmanagedCallConv());
}
#endif // FEATURE_MULTIREG_RET
GenTree* result = call;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_GET)
{
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT)
{
if (!varTypeIsStruct(lclTyp))
{
// get the result as primitive type
result = impGetStructAddr(result, structType, (unsigned)CHECK_SPILL_ALL, true);
result = gtNewOperNode(GT_IND, lclTyp, result);
}
}
else if (varTypeIsIntegral(lclTyp) && genTypeSize(lclTyp) < genTypeSize(TYP_INT))
{
// The helper does not extend the small return types.
result = gtNewCastNode(genActualType(lclTyp), result, false, lclTyp);
}
}
}
else
{
// OK, now do the indirection
if (access & CORINFO_ACCESS_GET)
{
if (varTypeIsStruct(lclTyp))
{
result = gtNewObjNode(structType, result);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
}
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF);
}
else if (access & CORINFO_ACCESS_SET)
{
if (varTypeIsStruct(lclTyp))
{
result = impAssignStructPtr(result, assg, structType, (unsigned)CHECK_SPILL_ALL);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
result = gtNewAssignNode(result, assg);
}
}
}
return result;
}
/*****************************************************************************
*
* Return true if the given node (excluding children trees) contains side effects.
* Note that it does not recurse, and children need to be handled separately.
* It may return false even if the node has GTF_SIDE_EFFECT (because of its children).
*
* Similar to OperMayThrow() (but handles GT_CALLs specially), but considers
* assignments too.
*/
bool Compiler::gtNodeHasSideEffects(GenTree* tree, GenTreeFlags flags)
{
if (flags & GTF_ASG)
{
// TODO-Bug: This only checks for GT_ASG/GT_STORE_DYN_BLK but according to OperRequiresAsgFlag
// there are many more opers that are considered to have an assignment side effect: atomic ops
// (GT_CMPXCHG & co.), GT_MEMORYBARRIER (not classified as an atomic op) and HW intrinsic
// memory stores. Atomic ops have special handling in gtExtractSideEffList but the others
// will simply be dropped is they are ever subject to an "extract side effects" operation.
// It is possible that the reason no bugs have yet been observed in this area is that the
// other nodes are likely to always be tree roots.
if (tree->OperIs(GT_ASG, GT_STORE_DYN_BLK))
{
return true;
}
}
// Are there only GTF_CALL side effects remaining? (and no other side effect kinds)
if (flags & GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
GenTreeCall* const call = tree->AsCall();
const bool ignoreExceptions = (flags & GTF_EXCEPT) == 0;
const bool ignoreCctors = (flags & GTF_IS_IN_CSE) != 0; // We can CSE helpers that run cctors.
if (!call->HasSideEffects(this, ignoreExceptions, ignoreCctors))
{
// If this call is otherwise side effect free, check its arguments.
for (GenTreeCall::Use& use : call->Args())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// I'm a little worried that args that assign to temps that are late args will look like
// side effects...but better to be conservative for now.
for (GenTreeCall::Use& use : call->LateArgs())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// Otherwise:
return false;
}
// Otherwise the GT_CALL is considered to have side-effects.
return true;
}
}
if (flags & GTF_EXCEPT)
{
if (tree->OperMayThrow(this))
{
return true;
}
}
// Expressions declared as CSE by (e.g.) hoisting code are considered to have relevant side
// effects (if we care about GTF_MAKE_CSE).
if ((flags & GTF_MAKE_CSE) && (tree->gtFlags & GTF_MAKE_CSE))
{
return true;
}
return false;
}
/*****************************************************************************
* Returns true if the expr tree has any side effects.
*/
bool Compiler::gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags /* = GTF_SIDE_EFFECT*/)
{
// These are the side effect flags that we care about for this tree
GenTreeFlags sideEffectFlags = tree->gtFlags & flags;
// Does this tree have any Side-effect flags set that we care about?
if (sideEffectFlags == 0)
{
// no it doesn't..
return false;
}
if (sideEffectFlags == GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
// Generally all trees that contain GT_CALL nodes are considered to have side-effects.
//
if (tree->AsCall()->gtCallType == CT_HELPER)
{
// If this node is a helper call we may not care about the side-effects.
// Note that gtNodeHasSideEffects checks the side effects of the helper itself
// as well as the side effects of its arguments.
return gtNodeHasSideEffects(tree, flags);
}
}
else if (tree->OperGet() == GT_INTRINSIC)
{
if (gtNodeHasSideEffects(tree, flags))
{
return true;
}
if (gtNodeHasSideEffects(tree->AsOp()->gtOp1, flags))
{
return true;
}
if ((tree->AsOp()->gtOp2 != nullptr) && gtNodeHasSideEffects(tree->AsOp()->gtOp2, flags))
{
return true;
}
return false;
}
}
return true;
}
GenTree* Compiler::gtBuildCommaList(GenTree* list, GenTree* expr)
{
// 'list' starts off as null,
// and when it is null we haven't started the list yet.
//
if (list != nullptr)
{
// Create a GT_COMMA that appends 'expr' in front of the remaining set of expressions in (*list)
GenTree* result = gtNewOperNode(GT_COMMA, TYP_VOID, expr, list);
// Set the flags in the comma node
result->gtFlags |= (list->gtFlags & GTF_ALL_EFFECT);
result->gtFlags |= (expr->gtFlags & GTF_ALL_EFFECT);
DBEXEC(fgGlobalMorph, result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
// 'list' and 'expr' should have valuenumbers defined for both or for neither one (unless we are remorphing,
// in which case a prior transform involving either node may have discarded or otherwise invalidated the value
// numbers).
assert((list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined()) || !fgGlobalMorph);
// Set the ValueNumber 'gtVNPair' for the new GT_COMMA node
//
if (list->gtVNPair.BothDefined() && expr->gtVNPair.BothDefined())
{
// The result of a GT_COMMA node is op2, the normal value number is op2vnp
// But we also need to include the union of side effects from op1 and op2.
// we compute this value into exceptions_vnp.
ValueNumPair op1vnp;
ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
ValueNumPair op2vnp;
ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(expr->gtVNPair, &op1vnp, &op1Xvnp);
vnStore->VNPUnpackExc(list->gtVNPair, &op2vnp, &op2Xvnp);
ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet();
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
result->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
}
return result;
}
else
{
// The 'expr' will start the list of expressions
return expr;
}
}
//------------------------------------------------------------------------
// gtExtractSideEffList: Extracts side effects from the given expression.
//
// Arguments:
// expr - the expression tree to extract side effects from
// pList - pointer to a (possibly null) GT_COMMA list that
// will contain the extracted side effects
// flags - side effect flags to be considered
// ignoreRoot - ignore side effects on the expression root node
//
// Notes:
// Side effects are prepended to the GT_COMMA list such that op1 of
// each comma node holds the side effect tree and op2 points to the
// next comma node. The original side effect execution order is preserved.
//
void Compiler::gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags flags /* = GTF_SIDE_EFFECT*/,
bool ignoreRoot /* = false */)
{
class SideEffectExtractor final : public GenTreeVisitor<SideEffectExtractor>
{
public:
const GenTreeFlags m_flags;
ArrayStack<GenTree*> m_sideEffects;
enum
{
DoPreOrder = true,
UseExecutionOrder = true
};
SideEffectExtractor(Compiler* compiler, GenTreeFlags flags)
: GenTreeVisitor(compiler), m_flags(flags), m_sideEffects(compiler->getAllocator(CMK_SideEffects))
{
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
GenTree* node = *use;
bool treeHasSideEffects = m_compiler->gtTreeHasSideEffects(node, m_flags);
if (treeHasSideEffects)
{
if (m_compiler->gtNodeHasSideEffects(node, m_flags))
{
PushSideEffects(node);
if (node->OperIsBlk() && !node->OperIsStoreBlk())
{
JITDUMP("Replace an unused OBJ/BLK node [%06d] with a NULLCHECK\n", dspTreeID(node));
m_compiler->gtChangeOperToNullCheck(node, m_compiler->compCurBB);
}
return Compiler::WALK_SKIP_SUBTREES;
}
// TODO-Cleanup: These have GTF_ASG set but for some reason gtNodeHasSideEffects ignores
// them. See the related gtNodeHasSideEffects comment as well.
// Also, these nodes must always be preserved, no matter what side effect flags are passed
// in. But then it should never be the case that gtExtractSideEffList gets called without
// specifying GTF_ASG so there doesn't seem to be any reason to be inconsistent with
// gtNodeHasSideEffects and make this check unconditionally.
if (node->OperIsAtomicOp())
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
if ((m_flags & GTF_EXCEPT) != 0)
{
// Special case - GT_ADDR of GT_IND nodes of TYP_STRUCT have to be kept together.
if (node->OperIs(GT_ADDR) && node->gtGetOp1()->OperIsIndir() &&
(node->gtGetOp1()->TypeGet() == TYP_STRUCT))
{
JITDUMP("Keep the GT_ADDR and GT_IND together:\n");
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
}
// Generally all GT_CALL nodes are considered to have side-effects.
// So if we get here it must be a helper call that we decided it does
// not have side effects that we needed to keep.
assert(!node->OperIs(GT_CALL) || (node->AsCall()->gtCallType == CT_HELPER));
}
if ((m_flags & GTF_IS_IN_CSE) != 0)
{
// If we're doing CSE then we also need to unmark CSE nodes. This will fail for CSE defs,
// those need to be extracted as if they're side effects.
if (!UnmarkCSE(node))
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
// The existence of CSE defs and uses is not propagated up the tree like side
// effects are. We need to continue visiting the tree as if it has side effects.
treeHasSideEffects = true;
}
return treeHasSideEffects ? Compiler::WALK_CONTINUE : Compiler::WALK_SKIP_SUBTREES;
}
private:
bool UnmarkCSE(GenTree* node)
{
assert(m_compiler->optValnumCSE_phase);
if (m_compiler->optUnmarkCSE(node))
{
// The call to optUnmarkCSE(node) should have cleared any CSE info.
assert(!IS_CSE_INDEX(node->gtCSEnum));
return true;
}
else
{
assert(IS_CSE_DEF(node->gtCSEnum));
#ifdef DEBUG
if (m_compiler->verbose)
{
printf("Preserving the CSE def #%02d at ", GET_CSE_INDEX(node->gtCSEnum));
m_compiler->printTreeID(node);
}
#endif
return false;
}
}
void PushSideEffects(GenTree* node)
{
// The extracted side effect will no longer be an argument, so unmark it.
// This is safe to do because the side effects will be visited in pre-order,
// aborting as soon as any tree is extracted. Thus if an argument for a call
// is being extracted, it is guaranteed that the call itself will not be.
node->gtFlags &= ~GTF_LATE_ARG;
m_sideEffects.Push(node);
}
};
SideEffectExtractor extractor(this, flags);
if (ignoreRoot)
{
for (GenTree* op : expr->Operands())
{
extractor.WalkTree(&op, nullptr);
}
}
else
{
extractor.WalkTree(&expr, nullptr);
}
GenTree* list = *pList;
// The extractor returns side effects in execution order but gtBuildCommaList prepends
// to the comma-based side effect list so we have to build the list in reverse order.
// This is also why the list cannot be built while traversing the tree.
// The number of side effects is usually small (<= 4), less than the ArrayStack's
// built-in size, so memory allocation is avoided.
while (!extractor.m_sideEffects.Empty())
{
list = gtBuildCommaList(list, extractor.m_sideEffects.Pop());
}
*pList = list;
}
/*****************************************************************************
*
* For debugging only - displays a tree node list and makes sure all the
* links are correctly set.
*/
#ifdef DEBUG
void dispNodeList(GenTree* list, bool verbose)
{
GenTree* last = nullptr;
GenTree* next;
if (!list)
{
return;
}
for (;;)
{
next = list->gtNext;
if (verbose)
{
printf("%08X -> %08X -> %08X\n", last, list, next);
}
assert(!last || last->gtNext == list);
assert(next == nullptr || next->gtPrev == list);
if (!next)
{
break;
}
last = list;
list = next;
}
printf(""); // null string means flush
}
#endif
/*****************************************************************************
* Callback to mark the nodes of a qmark-colon subtree that are conditionally
* executed.
*/
/* static */
Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTree** pTree, fgWalkData* data)
{
assert(data->pCallbackData == nullptr);
(*pTree)->gtFlags |= GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
* Callback to clear the conditionally executed flags of nodes that no longer
will be conditionally executed. Note that when we find another colon we must
stop, as the nodes below this one WILL be conditionally executed. This callback
is called when folding a qmark condition (ie the condition is constant).
*/
/* static */
Compiler::fgWalkResult Compiler::gtClearColonCond(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
assert(data->pCallbackData == nullptr);
if (tree->OperGet() == GT_COLON)
{
// Nodes below this will be conditionally executed.
return WALK_SKIP_SUBTREES;
}
tree->gtFlags &= ~GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Callback used by the tree walker to implement fgFindLink()
*/
static Compiler::fgWalkResult gtFindLinkCB(GenTree** pTree, Compiler::fgWalkData* cbData)
{
Compiler::FindLinkData* data = (Compiler::FindLinkData*)cbData->pCallbackData;
if (*pTree == data->nodeToFind)
{
data->result = pTree;
data->parent = cbData->parent;
return Compiler::WALK_ABORT;
}
return Compiler::WALK_CONTINUE;
}
Compiler::FindLinkData Compiler::gtFindLink(Statement* stmt, GenTree* node)
{
FindLinkData data = {node, nullptr, nullptr};
fgWalkResult result = fgWalkTreePre(stmt->GetRootNodePointer(), gtFindLinkCB, &data);
if (result == WALK_ABORT)
{
assert(data.nodeToFind == *data.result);
return data;
}
else
{
return {node, nullptr, nullptr};
}
}
/*****************************************************************************
*
* Callback that checks if a tree node has oper type GT_CATCH_ARG
*/
static Compiler::fgWalkResult gtFindCatchArg(GenTree** pTree, Compiler::fgWalkData* /* data */)
{
return ((*pTree)->OperGet() == GT_CATCH_ARG) ? Compiler::WALK_ABORT : Compiler::WALK_CONTINUE;
}
/*****************************************************************************/
bool Compiler::gtHasCatchArg(GenTree* tree)
{
if (((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0) && (fgWalkTreePre(&tree, gtFindCatchArg) == WALK_ABORT))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// gtHasCallOnStack:
//
// Arguments:
// parentStack: a context (stack of parent nodes)
//
// Return Value:
// returns true if any of the parent nodes are a GT_CALL
//
// Assumptions:
// We have a stack of parent nodes. This generally requires that
// we are performing a recursive tree walk using struct fgWalkData
//
//------------------------------------------------------------------------
/* static */ bool Compiler::gtHasCallOnStack(GenTreeStack* parentStack)
{
for (int i = 0; i < parentStack->Height(); i++)
{
GenTree* node = parentStack->Top(i);
if (node->OperGet() == GT_CALL)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------
// gtGetTypeProducerKind: determine if a tree produces a runtime type, and
// if so, how.
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// TypeProducerKind for the tree.
//
// Notes:
// Checks to see if this tree returns a RuntimeType value, and if so,
// how that value is determined.
//
// Currently handles these cases
// 1) The result of Object::GetType
// 2) The result of typeof(...)
// 3) A null reference
// 4) Tree is otherwise known to have type RuntimeType
//
// The null reference case is surprisingly common because operator
// overloading turns the otherwise innocuous
//
// Type t = ....;
// if (t == null)
//
// into a method call.
Compiler::TypeProducerKind Compiler::gtGetTypeProducerKind(GenTree* tree)
{
if (tree->gtOper == GT_CALL)
{
if (tree->AsCall()->gtCallType == CT_HELPER)
{
if (gtIsTypeHandleToRuntimeTypeHelper(tree->AsCall()))
{
return TPK_Handle;
}
}
else if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
if (lookupNamedIntrinsic(tree->AsCall()->gtCallMethHnd) == NI_System_Object_GetType)
{
return TPK_GetType;
}
}
}
else if ((tree->gtOper == GT_INTRINSIC) && (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType))
{
return TPK_GetType;
}
else if ((tree->gtOper == GT_CNS_INT) && (tree->AsIntCon()->gtIconVal == 0))
{
return TPK_Null;
}
else
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull);
if (clsHnd != NO_CLASS_HANDLE && clsHnd == info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE))
{
return TPK_Other;
}
}
return TPK_Unknown;
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHelperCall -- see if tree is constructing
// a RuntimeType from a handle
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call)
{
return call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) ||
call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL);
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHandleHelperCall -- see if tree is constructing
// a RuntimeTypeHandle from a handle
//
// Arguments:
// tree - tree to examine
// pHelper - optional pointer to a variable that receives the type of the helper
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper)
{
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
}
else if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL;
}
if (pHelper != nullptr)
{
*pHelper = helper;
}
return helper != CORINFO_HELP_UNDEF;
}
bool Compiler::gtIsActiveCSE_Candidate(GenTree* tree)
{
return (optValnumCSE_phase && IS_CSE_INDEX(tree->gtCSEnum));
}
/*****************************************************************************/
struct ComplexityStruct
{
unsigned m_numNodes;
unsigned m_nodeLimit;
ComplexityStruct(unsigned nodeLimit) : m_numNodes(0), m_nodeLimit(nodeLimit)
{
}
};
static Compiler::fgWalkResult ComplexityExceedsWalker(GenTree** pTree, Compiler::fgWalkData* data)
{
ComplexityStruct* pComplexity = (ComplexityStruct*)data->pCallbackData;
if (++pComplexity->m_numNodes > pComplexity->m_nodeLimit)
{
return Compiler::WALK_ABORT;
}
else
{
return Compiler::WALK_CONTINUE;
}
}
bool Compiler::gtComplexityExceeds(GenTree** tree, unsigned limit)
{
ComplexityStruct complexity(limit);
if (fgWalkTreePre(tree, &ComplexityExceedsWalker, &complexity) == WALK_ABORT)
{
return true;
}
else
{
return false;
}
}
bool GenTree::IsPhiNode()
{
return (OperGet() == GT_PHI_ARG) || (OperGet() == GT_PHI) || IsPhiDefn();
}
bool GenTree::IsPhiDefn()
{
bool res = ((OperGet() == GT_ASG) && (AsOp()->gtOp2 != nullptr) && (AsOp()->gtOp2->OperGet() == GT_PHI)) ||
((OperGet() == GT_STORE_LCL_VAR) && (AsOp()->gtOp1 != nullptr) && (AsOp()->gtOp1->OperGet() == GT_PHI));
assert(!res || OperGet() == GT_STORE_LCL_VAR || AsOp()->gtOp1->OperGet() == GT_LCL_VAR);
return res;
}
// IsPartialLclFld: Check for a GT_LCL_FLD whose type is a different size than the lclVar.
//
// Arguments:
// comp - the Compiler object.
//
// Return Value:
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR
bool GenTree::IsPartialLclFld(Compiler* comp)
{
return ((gtOper == GT_LCL_FLD) &&
(comp->lvaTable[this->AsLclVarCommon()->GetLclNum()].lvExactSize != genTypeSize(gtType)));
}
bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
GenTreeBlk* blkNode = nullptr;
if (OperIs(GT_ASG))
{
if (AsOp()->gtOp1->IsLocal())
{
GenTreeLclVarCommon* lclVarTree = AsOp()->gtOp1->AsLclVarCommon();
*pLclVarTree = lclVarTree;
if (pIsEntire != nullptr)
{
if (lclVarTree->IsPartialLclFld(comp))
{
*pIsEntire = false;
}
else
{
*pIsEntire = true;
}
}
return true;
}
else if (AsOp()->gtOp1->OperGet() == GT_IND)
{
GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1;
return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire);
}
else if (AsOp()->gtOp1->OperIsBlk())
{
blkNode = AsOp()->gtOp1->AsBlk();
}
}
else if (OperIsBlk())
{
blkNode = this->AsBlk();
}
if (blkNode != nullptr)
{
GenTree* destAddr = blkNode->Addr();
unsigned width = blkNode->Size();
// Do we care about whether this assigns the entire variable?
if (pIsEntire != nullptr && blkNode->OperIs(GT_STORE_DYN_BLK))
{
GenTree* blockWidth = blkNode->AsStoreDynBlk()->gtDynamicSize;
if (blockWidth->IsCnsIntOrI())
{
assert(blockWidth->AsIntConCommon()->FitsInI32());
width = static_cast<unsigned>(blockWidth->AsIntConCommon()->IconValue());
if (width == 0)
{
return false;
}
}
}
return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
// Otherwise...
return false;
}
// Returns true if this GenTree defines a result which is based on the address of a local.
bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
if (OperGet() == GT_ADDR || OperGet() == GT_LCL_VAR_ADDR)
{
GenTree* addrArg = this;
if (OperGet() == GT_ADDR)
{
addrArg = AsOp()->gtOp1;
}
if (addrArg->IsLocal() || addrArg->OperIsLocalAddr())
{
GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon();
*pLclVarTree = addrArgLcl;
if (pIsEntire != nullptr)
{
unsigned lclOffset = addrArgLcl->GetLclOffs();
if (lclOffset != 0)
{
// We aren't updating the bytes at [0..lclOffset-1] so *pIsEntire should be set to false
*pIsEntire = false;
}
else
{
unsigned lclNum = addrArgLcl->GetLclNum();
unsigned varWidth = comp->lvaLclExactSize(lclNum);
if (comp->lvaTable[lclNum].lvNormalizeOnStore())
{
// It's normalize on store, so use the full storage width -- writing to low bytes won't
// necessarily yield a normalized value.
varWidth = genTypeStSz(var_types(comp->lvaTable[lclNum].lvType)) * sizeof(int);
}
*pIsEntire = (varWidth == width);
}
}
return true;
}
else if (addrArg->OperGet() == GT_IND)
{
// A GT_ADDR of a GT_IND can both be optimized away, recurse using the child of the GT_IND
return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp2->DefinesLocalAddr(comp, AsOp()->gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp1->DefinesLocalAddr(comp, AsOp()->gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
}
// Post rationalization we could have GT_IND(GT_LEA(..)) trees.
else if (OperGet() == GT_LEA)
{
// This method gets invoked during liveness computation and therefore it is critical
// that we don't miss 'use' of any local. The below logic is making the assumption
// that in case of LEA(base, index, offset) - only base can be a GT_LCL_VAR_ADDR
// and index is not.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
GenTree* index = AsOp()->gtOp2;
if (index != nullptr)
{
assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire));
}
#endif // DEBUG
// base
GenTree* base = AsOp()->gtOp1;
if (base != nullptr)
{
// Lea could have an Indir as its base.
if (base->OperGet() == GT_IND)
{
base = base->AsOp()->gtOp1->gtEffectiveVal(/*commas only*/ true);
}
return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsLocalExpr: Determine if this is a LclVarCommon node and return some
// additional info about it in the two out parameters.
//
// Arguments:
// comp - The Compiler instance
// pLclVarTree - An "out" argument that returns the local tree as a
// LclVarCommon, if it is indeed local.
// pFldSeq - An "out" argument that returns the value numbering field
// sequence for the node, if any.
//
// Return Value:
// Returns true, and sets the out arguments accordingly, if this is
// a LclVarCommon node.
bool GenTree::IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq)
{
if (IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = AsLclVarCommon();
if (OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
// If this tree evaluates some sum of a local address and some constants,
// return the node for the local being addressed
GenTreeLclVarCommon* GenTree::IsLocalAddrExpr()
{
if (OperGet() == GT_ADDR)
{
return AsOp()->gtOp1->IsLocal() ? AsOp()->gtOp1->AsLclVarCommon() : nullptr;
}
else if (OperIsLocalAddr())
{
return this->AsLclVarCommon();
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp2->IsLocalAddrExpr();
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp1->IsLocalAddrExpr();
}
}
// Otherwise...
return nullptr;
}
//------------------------------------------------------------------------
// IsLocalAddrExpr: finds if "this" is an address of a local var/fld.
//
// Arguments:
// comp - a compiler instance;
// pLclVarTree - [out] sets to the node indicating the local variable if found;
// pFldSeq - [out] sets to the field sequence representing the field, else null;
// pOffset - [out](optional) sets to the sum offset of the lcl/fld if found,
// note it does not include pLclVarTree->GetLclOffs().
//
// Returns:
// Returns true if "this" represents the address of a local, or a field of a local.
//
// Notes:
// It is mostly used for optimizations but assertion propagation depends on it for correctness.
// So if this function does not recognize a def of a LCL_VAR we can have an incorrect optimization.
//
bool GenTree::IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset /* = nullptr */)
{
if (OperGet() == GT_ADDR)
{
assert(!comp->compRationalIRForm);
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = addrArg->AsLclVarCommon();
if (addrArg->OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(addrArg->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
else if (OperIsLocalAddr())
{
*pLclVarTree = this->AsLclVarCommon();
if (this->OperGet() == GT_LCL_FLD_ADDR)
{
*pFldSeq = comp->GetFieldSeqStore()->Append(this->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp1->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp2->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp2->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp1->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsImplicitByrefParameterValue: determine if this tree is the entire
// value of a local implicit byref parameter
//
// Arguments:
// compiler -- compiler instance
//
// Return Value:
// GenTreeLclVar node for the local, or nullptr.
//
GenTreeLclVar* GenTree::IsImplicitByrefParameterValue(Compiler* compiler)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
GenTreeLclVar* lcl = nullptr;
if (OperIs(GT_LCL_VAR))
{
lcl = AsLclVar();
}
else if (OperIs(GT_OBJ))
{
GenTree* addr = AsIndir()->Addr();
if (addr->OperIs(GT_LCL_VAR))
{
lcl = addr->AsLclVar();
}
else if (addr->OperIs(GT_ADDR))
{
GenTree* base = addr->AsOp()->gtOp1;
if (base->OperIs(GT_LCL_VAR))
{
lcl = base->AsLclVar();
}
}
}
if ((lcl != nullptr) && compiler->lvaIsImplicitByRefLocal(lcl->GetLclNum()))
{
return lcl;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return nullptr;
}
//------------------------------------------------------------------------
// IsLclVarUpdateTree: Determine whether this is an assignment tree of the
// form Vn = Vn 'oper' 'otherTree' where Vn is a lclVar
//
// Arguments:
// pOtherTree - An "out" argument in which 'otherTree' will be returned.
// pOper - An "out" argument in which 'oper' will be returned.
//
// Return Value:
// If the tree is of the above form, the lclNum of the variable being
// updated is returned, and 'pOtherTree' and 'pOper' are set.
// Otherwise, returns BAD_VAR_NUM.
//
// Notes:
// 'otherTree' can have any shape.
// We avoid worrying about whether the op is commutative by only considering the
// first operand of the rhs. It is expected that most trees of this form will
// already have the lclVar on the lhs.
// TODO-CQ: Evaluate whether there are missed opportunities due to this, or
// whether gtSetEvalOrder will already have put the lclVar on the lhs in
// the cases of interest.
unsigned GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps* pOper)
{
unsigned lclNum = BAD_VAR_NUM;
if (OperIs(GT_ASG))
{
GenTree* lhs = AsOp()->gtOp1;
GenTree* rhs = AsOp()->gtOp2;
if ((lhs->OperGet() == GT_LCL_VAR) && rhs->OperIsBinary())
{
unsigned lhsLclNum = lhs->AsLclVarCommon()->GetLclNum();
GenTree* rhsOp1 = rhs->AsOp()->gtOp1;
GenTree* rhsOp2 = rhs->AsOp()->gtOp2;
// Some operators, such as HWINTRINSIC, are currently declared as binary but
// may not have two operands. We must check that both operands actually exist.
if ((rhsOp1 != nullptr) && (rhsOp2 != nullptr) && (rhsOp1->OperGet() == GT_LCL_VAR) &&
(rhsOp1->AsLclVarCommon()->GetLclNum() == lhsLclNum))
{
lclNum = lhsLclNum;
*pOtherTree = rhsOp2;
*pOper = rhs->OperGet();
}
}
}
return lclNum;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// canBeContained: check whether this tree node may be a subcomponent of its parent for purposes
// of code generation.
//
// Return Value:
// True if it is possible to contain this node and false otherwise.
//
bool GenTree::canBeContained() const
{
assert(OperIsLIR());
if (gtHasReg())
{
return false;
}
// It is not possible for nodes that do not produce values or that are not containable values to be contained.
if (!IsValue() || ((DebugOperKind() & DBK_NOCONTAIN) != 0) || (OperIsHWIntrinsic() && !isContainableHWIntrinsic()))
{
return false;
}
return true;
}
#endif // DEBUG
//------------------------------------------------------------------------
// isContained: check whether this tree node is a subcomponent of its parent for codegen purposes
//
// Return Value:
// Returns true if there is no code generated explicitly for this node.
// Essentially, it will be rolled into the code generation for the parent.
//
// Assumptions:
// This method relies upon the value of the GTF_CONTAINED flag.
// Therefore this method is only valid after Lowering.
// Also note that register allocation or other subsequent phases may cause
// nodes to become contained (or not) and therefore this property may change.
//
bool GenTree::isContained() const
{
assert(OperIsLIR());
const bool isMarkedContained = ((gtFlags & GTF_CONTAINED) != 0);
#ifdef DEBUG
if (!canBeContained())
{
assert(!isMarkedContained);
}
// these actually produce a register (the flags reg, we just don't model it)
// and are a separate instruction from the branch that consumes the result.
// They can only produce a result if the child is a SIMD equality comparison.
else if (OperIsCompare())
{
assert(isMarkedContained == false);
}
// if it's contained it can't be unused.
if (isMarkedContained)
{
assert(!IsUnusedValue());
}
#endif // DEBUG
return isMarkedContained;
}
// return true if node is contained and an indir
bool GenTree::isContainedIndir() const
{
return OperIsIndir() && isContained();
}
bool GenTree::isIndirAddrMode()
{
return OperIsIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
}
bool GenTree::isIndir() const
{
return OperGet() == GT_IND || OperGet() == GT_STOREIND;
}
bool GenTreeIndir::HasBase()
{
return Base() != nullptr;
}
bool GenTreeIndir::HasIndex()
{
return Index() != nullptr;
}
GenTree* GenTreeIndir::Base()
{
GenTree* addr = Addr();
if (isIndirAddrMode())
{
GenTree* result = addr->AsAddrMode()->Base();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return addr; // TODO: why do we return 'addr' here, but we return 'nullptr' in the equivalent Index() case?
}
}
GenTree* GenTreeIndir::Index()
{
if (isIndirAddrMode())
{
GenTree* result = Addr()->AsAddrMode()->Index();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return nullptr;
}
}
unsigned GenTreeIndir::Scale()
{
if (HasIndex())
{
return Addr()->AsAddrMode()->gtScale;
}
else
{
return 1;
}
}
ssize_t GenTreeIndir::Offset()
{
if (isIndirAddrMode())
{
return Addr()->AsAddrMode()->Offset();
}
else if (Addr()->gtOper == GT_CLS_VAR_ADDR)
{
return static_cast<ssize_t>(reinterpret_cast<intptr_t>(Addr()->AsClsVar()->gtClsVarHnd));
}
else if (Addr()->IsCnsIntOrI() && Addr()->isContained())
{
return Addr()->AsIntConCommon()->IconValue();
}
else
{
return 0;
}
}
//------------------------------------------------------------------------
// GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM?
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if this immediate value requires us to record a relocation for it; false otherwise.
bool GenTreeIntConCommon::ImmedValNeedsReloc(Compiler* comp)
{
return comp->opts.compReloc && (gtOper == GT_CNS_INT) && IsIconHandle();
}
//------------------------------------------------------------------------
// ImmedValCanBeFolded: can this immediate value be folded for op?
//
// Arguments:
// comp - Compiler instance
// op - Tree operator
//
// Return Value:
// True if this immediate value can be folded for op; false otherwise.
bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
{
// In general, immediate values that need relocations can't be folded.
// There are cases where we do want to allow folding of handle comparisons
// (e.g., typeof(T) == typeof(int)).
return !ImmedValNeedsReloc(comp) || (op == GT_EQ) || (op == GT_NE);
}
#ifdef TARGET_AMD64
// Returns true if this absolute address fits within the base of an addr mode.
// On Amd64 this effectively means, whether an absolute indirect address can
// be encoded as 32-bit offset relative to IP or zero.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
// During Jitting, we are allowed to generate non-relocatable code.
// On Amd64 we can encode an absolute indirect addr as an offset relative to zero or RIP.
// An absolute indir addr that can fit within 32-bits can ben encoded as an offset relative
// to zero. All other absolute indir addr could be attempted to be encoded as RIP relative
// based on reloc hint provided by VM. RIP relative encoding is preferred over relative
// to zero, because the former is one byte smaller than the latter. For this reason
// we check for reloc hint first and then whether addr fits in 32-bits next.
//
// VM starts off with an initial state to allow both data and code address to be encoded as
// pc-relative offsets. Hence JIT will attempt to encode all absolute addresses as pc-relative
// offsets. It is possible while jitting a method, an address could not be encoded as a
// pc-relative offset. In that case VM will note the overflow and will trigger re-jitting
// of the method with reloc hints turned off for all future methods. Second time around
// jitting will succeed since JIT will not attempt to encode data addresses as pc-relative
// offsets. Note that JIT will always attempt to relocate code addresses (.e.g call addr).
// After an overflow, VM will assume any relocation recorded is for a code address and will
// emit jump thunk if it cannot be encoded as pc-relative offset.
return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsInI32();
}
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
return IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue());
}
}
#elif defined(TARGET_X86)
// Returns true if this absolute address fits within the base of an addr mode.
// On x86 all addresses are 4-bytes and can be directly encoded in an addr mode.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
return IsCnsIntOrI();
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
// If generating relocatable code, icons should be reported for recording relocatons.
return comp->opts.compReloc && IsIconHandle();
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// IsFieldAddr: Is "this" a static or class field address?
//
// Recognizes the following patterns:
// this: ADD(baseAddr, CONST [FldSeq])
// this: ADD(CONST [FldSeq], baseAddr)
// this: CONST [FldSeq]
// this: Zero [FldSeq]
//
// Arguments:
// comp - the Compiler object
// pBaseAddr - [out] parameter for "the base address"
// pFldSeq - [out] parameter for the field sequence
//
// Return Value:
// If "this" matches patterns denoted above, and the FldSeq found is "full",
// i. e. starts with a class field or a static field, and includes all the
// struct fields that this tree represents the address of, this method will
// return "true" and set either "pBaseAddr" to some value, which must be used
// by the caller as the key into the "first field map" to obtain the actual
// value for the field. For instance fields, "base address" will be the object
// reference, for statics - the address to which the field offset with the
// field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField".
//
bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq)
{
assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF));
*pBaseAddr = nullptr;
*pFldSeq = FieldSeqStore::NotAField();
GenTree* baseAddr = nullptr;
FieldSeqNode* fldSeq = FieldSeqStore::NotAField();
if (OperIs(GT_ADD))
{
// If one operand has a field sequence, the other operand must not have one
// as the order of fields in that case would not be well-defined.
if (AsOp()->gtOp1->IsCnsIntOrI() && AsOp()->gtOp1->IsIconHandle())
{
assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle());
fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle());
fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp1;
}
if (baseAddr != nullptr)
{
assert(!baseAddr->TypeIs(TYP_REF) || !comp->GetZeroOffsetFieldMap()->Lookup(baseAddr));
}
}
else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL))
{
assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr));
fldSeq = AsIntCon()->gtFieldSeq;
baseAddr = nullptr;
}
else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq))
{
baseAddr = this;
}
else
{
return false;
}
assert(fldSeq != nullptr);
if ((fldSeq == FieldSeqStore::NotAField()) || fldSeq->IsPseudoField())
{
return false;
}
// The above screens out obviously invalid cases, but we have more checks to perform. The
// sequence returned from this method *must* start with either a class (NOT struct) field
// or a static field. To avoid the expense of calling "getFieldClass" here, we will instead
// rely on the invariant that TYP_REF base addresses can never appear for struct fields - we
// will effectively treat such cases ("possible" in unsafe code) as undefined behavior.
if (comp->eeIsFieldStatic(fldSeq->GetFieldHandle()))
{
// TODO-VNTypes: this code is out of sync w.r.t. boxed statics that are numbered with
// VNF_PtrToStatic and treated as "simple" while here we treat them as "complex".
// TODO-VNTypes: we will always return the "baseAddr" here for now, but strictly speaking,
// we only need to do that if we have a shared field, to encode the logical "instantiation"
// argument. In all other cases, this serves no purpose and just leads to redundant maps.
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
if (baseAddr->TypeIs(TYP_REF))
{
assert(!comp->eeIsValueClass(comp->info.compCompHnd->getFieldClass(fldSeq->GetFieldHandle())));
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
// This case is reached, for example, if we have a chain of struct fields that are based on
// some pointer. We do not model such cases because we do not model maps for ByrefExposed
// memory, as it does not have the non-aliasing property of GcHeap and reference types.
return false;
}
bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd)
{
if (fieldNodeType != TYP_REF)
{
return false;
}
noway_assert(fldHnd != nullptr);
CorInfoType cit = info.compCompHnd->getFieldType(fldHnd);
var_types fieldTyp = JITtype2varType(cit);
return fieldTyp != TYP_REF;
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// gtGetSIMDZero: Get a zero value of the appropriate SIMD type.
//
// Arguments:
// var_types - The simdType
// simdBaseJitType - The SIMD base JIT type we need
// simdHandle - The handle for the SIMD type
//
// Return Value:
// A node generating the appropriate Zero, if we are able to discern it,
// otherwise null (note that this shouldn't happen, but callers should
// be tolerant of this case).
GenTree* Compiler::gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle)
{
bool found = false;
bool isHWSIMD = true;
noway_assert(m_simdHandleCache != nullptr);
// First, determine whether this is Vector<T>.
if (simdType == getSIMDVectorType())
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
found = (simdHandle == m_simdHandleCache->SIMDFloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
found = (simdHandle == m_simdHandleCache->SIMDDoubleHandle);
break;
case CORINFO_TYPE_INT:
found = (simdHandle == m_simdHandleCache->SIMDIntHandle);
break;
case CORINFO_TYPE_USHORT:
found = (simdHandle == m_simdHandleCache->SIMDUShortHandle);
break;
case CORINFO_TYPE_UBYTE:
found = (simdHandle == m_simdHandleCache->SIMDUByteHandle);
break;
case CORINFO_TYPE_SHORT:
found = (simdHandle == m_simdHandleCache->SIMDShortHandle);
break;
case CORINFO_TYPE_BYTE:
found = (simdHandle == m_simdHandleCache->SIMDByteHandle);
break;
case CORINFO_TYPE_LONG:
found = (simdHandle == m_simdHandleCache->SIMDLongHandle);
break;
case CORINFO_TYPE_UINT:
found = (simdHandle == m_simdHandleCache->SIMDUIntHandle);
break;
case CORINFO_TYPE_ULONG:
found = (simdHandle == m_simdHandleCache->SIMDULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
found = (simdHandle == m_simdHandleCache->SIMDNIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
found = (simdHandle == m_simdHandleCache->SIMDNUIntHandle);
break;
default:
break;
}
if (found)
{
isHWSIMD = false;
}
}
if (!found)
{
// We must still have isHWSIMD set to true, and the only non-HW types left are the fixed types.
switch (simdType)
{
case TYP_SIMD8:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector2Handle)
{
isHWSIMD = false;
}
#if defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector64FloatHandle);
}
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector64IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector64UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector64UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector64ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector64ByteHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector64UIntHandle);
#endif // defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
break;
default:
break;
}
break;
case TYP_SIMD12:
assert((simdBaseJitType == CORINFO_TYPE_FLOAT) && (simdHandle == m_simdHandleCache->SIMDVector3Handle));
isHWSIMD = false;
break;
case TYP_SIMD16:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector4Handle)
{
isHWSIMD = false;
}
#if defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector128FloatHandle);
}
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector128DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector128IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector128UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector128UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector128ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector128ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector128LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector128UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector128ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector128NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector128NUIntHandle);
break;
#endif // defined(FEATURE_HW_INTRINSICS)
default:
break;
}
break;
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
case TYP_SIMD32:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
assert(simdHandle == m_simdHandleCache->Vector256FloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector256DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector256IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector256UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector256UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector256ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector256ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector256LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector256UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector256ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector256NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector256NUIntHandle);
break;
default:
break;
}
break;
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
default:
break;
}
}
unsigned size = genTypeSize(simdType);
if (isHWSIMD)
{
#if defined(FEATURE_HW_INTRINSICS)
return gtNewSimdZeroNode(simdType, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ false);
#else
JITDUMP("Coudn't find the matching SIMD type for %s<%s> in gtGetSIMDZero\n", varTypeName(simdType),
varTypeName(JitType2PreciseVarType(simdBaseJitType)));
return nullptr;
#endif // FEATURE_HW_INTRINSICS
}
else
{
return gtNewSIMDVectorZero(simdType, simdBaseJitType, size);
}
}
#endif // FEATURE_SIMD
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
tree = tree->gtEffectiveVal();
if (varTypeIsStruct(tree->gtType))
{
switch (tree->gtOper)
{
default:
break;
case GT_MKREFANY:
structHnd = impGetRefAnyClass();
break;
case GT_OBJ:
structHnd = tree->AsObj()->GetLayout()->GetClassHandle();
break;
case GT_BLK:
structHnd = tree->AsBlk()->GetLayout()->GetClassHandle();
break;
case GT_CALL:
structHnd = tree->AsCall()->gtRetClsHnd;
break;
case GT_RET_EXPR:
structHnd = tree->AsRetExpr()->gtRetClsHnd;
break;
case GT_ARGPLACE:
structHnd = tree->AsArgPlace()->gtArgPlaceClsHnd;
break;
case GT_INDEX:
structHnd = tree->AsIndex()->gtStructElemClass;
break;
case GT_FIELD:
info.compCompHnd->getFieldType(tree->AsField()->gtFldHnd, &structHnd);
break;
case GT_ASG:
structHnd = gtGetStructHandleIfPresent(tree->gtGetOp1());
break;
case GT_LCL_FLD:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
#endif
break;
case GT_LCL_VAR:
{
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
structHnd = lvaGetStruct(lclNum);
break;
}
case GT_RETURN:
structHnd = gtGetStructHandleIfPresent(tree->AsOp()->gtOp1);
break;
case GT_IND:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
else
#endif
{
// Attempt to find a handle for this expression.
// We can do this for an array element indirection, or for a field indirection.
ArrayInfo arrInfo;
if (TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
structHnd = arrInfo.m_elemStructType;
}
else
{
GenTree* addr = tree->AsIndir()->Addr();
FieldSeqNode* fieldSeq = nullptr;
if ((addr->OperGet() == GT_ADD) && addr->gtGetOp2()->OperIs(GT_CNS_INT))
{
fieldSeq = addr->gtGetOp2()->AsIntCon()->gtFieldSeq;
}
else
{
GetZeroOffsetFieldMap()->Lookup(addr, &fieldSeq);
}
if (fieldSeq != nullptr)
{
while (fieldSeq->m_next != nullptr)
{
fieldSeq = fieldSeq->m_next;
}
if (fieldSeq != FieldSeqStore::NotAField() && !fieldSeq->IsPseudoField())
{
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &structHnd);
// With unsafe code and type casts
// this can return a primitive type and have nullptr for structHnd
// see runtime/issues/38541
}
}
}
}
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->GetSimdBaseJitType());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
if ((tree->gtFlags & GTF_SIMDASHW_OP) != 0)
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
else
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
break;
#endif
break;
}
// TODO-1stClassStructs: add a check that `structHnd != NO_CLASS_HANDLE`,
// nowadays it won't work because the right part of an ASG could have struct type without a handle
// (check `fgMorphBlockOperand(isBlkReqd`) and a few other cases.
}
return structHnd;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(tree);
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
//------------------------------------------------------------------------
// gtGetClassHandle: find class handle for a ref type
//
// Arguments:
// tree -- tree to find handle for
// pIsExact [out] -- whether handle is exact type
// pIsNonNull [out] -- whether tree value is known not to be null
//
// Return Value:
// nullptr if class handle is unknown,
// otherwise the class handle.
// *pIsExact set true if tree type is known to be exactly the handle type,
// otherwise actual type may be a subtype.
// *pIsNonNull set true if tree value is known not to be null,
// otherwise a null value is possible.
CORINFO_CLASS_HANDLE Compiler::gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull)
{
// Set default values for our out params.
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
// Bail out if we're just importing and not generating code, since
// the jit uses TYP_REF for CORINFO_TYPE_VAR locals and args, but
// these may not be ref types.
if (compIsForImportOnly())
{
return objClass;
}
// Bail out if the tree is not a ref type.
var_types treeType = tree->TypeGet();
if (treeType != TYP_REF)
{
return objClass;
}
// Tunnel through commas.
GenTree* obj = tree->gtEffectiveVal(false);
const genTreeOps objOp = obj->OperGet();
switch (objOp)
{
case GT_COMMA:
{
// gtEffectiveVal above means we shouldn't see commas here.
assert(!"unexpected GT_COMMA");
break;
}
case GT_LCL_VAR:
{
// For locals, pick up type info from the local table.
const unsigned objLcl = obj->AsLclVar()->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
break;
}
case GT_FIELD:
{
// For fields, get the type from the field handle.
CORINFO_FIELD_HANDLE fieldHnd = obj->AsField()->gtFldHnd;
if (fieldHnd != nullptr)
{
objClass = gtGetFieldClassHandle(fieldHnd, pIsExact, pIsNonNull);
}
break;
}
case GT_RET_EXPR:
{
// If we see a RET_EXPR, recurse through to examine the
// return value expression.
GenTree* retExpr = tree->AsRetExpr()->gtInlineCandidate;
objClass = gtGetClassHandle(retExpr, pIsExact, pIsNonNull);
break;
}
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
if ((ni == NI_System_Array_Clone) || (ni == NI_System_Object_MemberwiseClone))
{
objClass = gtGetClassHandle(call->gtCallThisArg->GetNode(), pIsExact, pIsNonNull);
break;
}
CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(call->gtCallMethHnd);
if (specialObjClass != nullptr)
{
objClass = specialObjClass;
*pIsExact = true;
*pIsNonNull = true;
break;
}
}
if (call->IsInlineCandidate())
{
// For inline candidates, we've already cached the return
// type class handle in the inline info.
InlineCandidateInfo* inlInfo = call->gtInlineCandidateInfo;
assert(inlInfo != nullptr);
// Grab it as our first cut at a return type.
assert(inlInfo->methInfo.args.retType == CORINFO_TYPE_CLASS);
objClass = inlInfo->methInfo.args.retTypeClass;
// If the method is shared, the above may not capture
// the most precise return type information (that is,
// it may represent a shared return type and as such,
// have instances of __Canon). See if we can use the
// context to get at something more definite.
//
// For now, we do this here on demand rather than when
// processing the call, but we could/should apply
// similar sharpening to the argument and local types
// of the inlinee.
const unsigned retClassFlags = info.compCompHnd->getClassAttribs(objClass);
if (retClassFlags & CORINFO_FLG_SHAREDINST)
{
CORINFO_CONTEXT_HANDLE context = inlInfo->exactContextHnd;
if (context != nullptr)
{
CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(context);
// Grab the signature in this context.
CORINFO_SIG_INFO sig;
eeGetMethodSig(call->gtCallMethHnd, &sig, exactClass);
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
}
else if (call->gtCallType == CT_USER_FUNC)
{
// For user calls, we can fetch the approximate return
// type info from the method handle. Unfortunately
// we've lost the exact context, so this is the best
// we can do for now.
CORINFO_METHOD_HANDLE method = call->gtCallMethHnd;
CORINFO_CLASS_HANDLE exactClass = nullptr;
CORINFO_SIG_INFO sig;
eeGetMethodSig(method, &sig, exactClass);
if (sig.retType == CORINFO_TYPE_VOID)
{
// This is a constructor call.
const unsigned methodFlags = info.compCompHnd->getMethodAttribs(method);
assert((methodFlags & CORINFO_FLG_CONSTRUCTOR) != 0);
objClass = info.compCompHnd->getMethodClass(method);
*pIsExact = true;
*pIsNonNull = true;
}
else
{
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
else if (call->gtCallType == CT_HELPER)
{
objClass = gtGetHelperCallClassHandle(call, pIsExact, pIsNonNull);
}
break;
}
case GT_INTRINSIC:
{
GenTreeIntrinsic* intrinsic = obj->AsIntrinsic();
if (intrinsic->gtIntrinsicName == NI_System_Object_GetType)
{
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsExact = false;
*pIsNonNull = true;
}
break;
}
case GT_CNS_STR:
{
// For literal strings, we know the class and that the
// value is not null.
objClass = impGetStringClass();
*pIsExact = true;
*pIsNonNull = true;
break;
}
case GT_IND:
{
GenTreeIndir* indir = obj->AsIndir();
if (indir->HasBase() && !indir->HasIndex())
{
// indir(addr(lcl)) --> lcl
//
// This comes up during constrained callvirt on ref types.
GenTree* base = indir->Base();
GenTreeLclVarCommon* lcl = base->IsLocalAddrExpr();
if ((lcl != nullptr) && (base->OperGet() != GT_ADD))
{
const unsigned objLcl = lcl->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
}
else if (base->OperGet() == GT_ARR_ELEM)
{
// indir(arr_elem(...)) -> array element type
GenTree* array = base->AsArrElem()->gtArrObj;
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
}
else if (base->OperGet() == GT_ADD)
{
// This could be a static field access.
//
// See if op1 is a static field base helper call
// and if so, op2 will have the field info.
GenTree* op1 = base->AsOp()->gtOp1;
GenTree* op2 = base->AsOp()->gtOp2;
const bool op1IsStaticFieldBase = gtIsStaticGCBaseHelperCall(op1);
if (op1IsStaticFieldBase && (op2->OperGet() == GT_CNS_INT))
{
FieldSeqNode* fieldSeq = op2->AsIntCon()->gtFieldSeq;
if (fieldSeq != nullptr)
{
while (fieldSeq->m_next != nullptr)
{
fieldSeq = fieldSeq->m_next;
}
assert(!fieldSeq->IsPseudoField());
// No benefit to calling gtGetFieldClassHandle here, as
// the exact field being accessed can vary.
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
assert(fieldCorType == CORINFO_TYPE_CLASS);
objClass = fieldClass;
}
}
}
}
break;
}
case GT_BOX:
{
// Box should just wrap a local var reference which has
// the type we're looking for. Also box only represents a
// non-nullable value type so result cannot be null.
GenTreeBox* box = obj->AsBox();
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
objClass = lvaTable[boxTempLcl].lvClassHnd;
*pIsExact = lvaTable[boxTempLcl].lvClassIsExact;
*pIsNonNull = true;
break;
}
case GT_INDEX:
{
GenTree* array = obj->AsIndex()->Arr();
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
break;
}
default:
{
break;
}
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetHelperCallClassHandle: find class handle for return value of a
// helper call
//
// Arguments:
// call - helper call to examine
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if return value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull)
{
assert(call->gtCallType == CT_HELPER);
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE:
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL:
{
// Note for some runtimes these helpers return exact types.
//
// But in those cases the types are also sealed, so there's no
// need to claim exactness here.
const bool helperResultNonNull = (helper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsNonNull = helperResultNonNull;
break;
}
case CORINFO_HELP_CHKCASTCLASS:
case CORINFO_HELP_CHKCASTANY:
case CORINFO_HELP_CHKCASTARRAY:
case CORINFO_HELP_CHKCASTINTERFACE:
case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
case CORINFO_HELP_ISINSTANCEOFINTERFACE:
case CORINFO_HELP_ISINSTANCEOFARRAY:
case CORINFO_HELP_ISINSTANCEOFCLASS:
case CORINFO_HELP_ISINSTANCEOFANY:
{
// Fetch the class handle from the helper call arglist
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* typeArg = args->GetNode();
CORINFO_CLASS_HANDLE castHnd = gtGetHelperArgClassHandle(typeArg);
// We generally assume the type being cast to is the best type
// for the result, unless it is an interface type.
//
// TODO-CQ: when we have default interface methods then
// this might not be the best assumption. We could also
// explore calling something like mergeClasses to identify
// the more specific class. A similar issue arises when
// typing the temp in impCastClassOrIsInstToTree, when we
// expand the cast inline.
if (castHnd != nullptr)
{
DWORD attrs = info.compCompHnd->getClassAttribs(castHnd);
if ((attrs & CORINFO_FLG_INTERFACE) != 0)
{
castHnd = nullptr;
}
}
// If we don't have a good estimate for the type we can use the
// type from the value being cast instead.
if (castHnd == nullptr)
{
GenTree* valueArg = args->GetNext()->GetNode();
castHnd = gtGetClassHandle(valueArg, pIsExact, pIsNonNull);
}
// We don't know at jit time if the cast will succeed or fail, but if it
// fails at runtime then an exception is thrown for cast helpers, or the
// result is set null for instance helpers.
//
// So it safe to claim the result has the cast type.
// Note we don't know for sure that it is exactly this type.
if (castHnd != nullptr)
{
objClass = castHnd;
}
break;
}
case CORINFO_HELP_NEWARR_1_DIRECT:
case CORINFO_HELP_NEWARR_1_OBJ:
case CORINFO_HELP_NEWARR_1_VC:
case CORINFO_HELP_NEWARR_1_ALIGN8:
case CORINFO_HELP_READYTORUN_NEWARR_1:
{
CORINFO_CLASS_HANDLE arrayHnd = (CORINFO_CLASS_HANDLE)call->compileTimeHelperArgumentHandle;
if (arrayHnd != NO_CLASS_HANDLE)
{
objClass = arrayHnd;
*pIsExact = true;
*pIsNonNull = true;
}
break;
}
default:
break;
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetArrayElementClassHandle: find class handle for elements of an array
// of ref types
//
// Arguments:
// array -- array to find handle for
//
// Return Value:
// nullptr if element class handle is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetArrayElementClassHandle(GenTree* array)
{
bool isArrayExact = false;
bool isArrayNonNull = false;
CORINFO_CLASS_HANDLE arrayClassHnd = gtGetClassHandle(array, &isArrayExact, &isArrayNonNull);
if (arrayClassHnd != nullptr)
{
// We know the class of the reference
DWORD attribs = info.compCompHnd->getClassAttribs(arrayClassHnd);
if ((attribs & CORINFO_FLG_ARRAY) != 0)
{
// We know for sure it is an array
CORINFO_CLASS_HANDLE elemClassHnd = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayClassHnd, &elemClassHnd);
if (arrayElemType == CORINFO_TYPE_CLASS)
{
// We know it is an array of ref types
return elemClassHnd;
}
}
}
return nullptr;
}
//------------------------------------------------------------------------
// gtGetFieldClassHandle: find class handle for a field
//
// Arguments:
// fieldHnd - field handle for field in question
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if field value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
//
// May examine runtime state of static field instances.
CORINFO_CLASS_HANDLE Compiler::gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull)
{
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
if (fieldCorType == CORINFO_TYPE_CLASS)
{
// Optionally, look at the actual type of the field's value
bool queryForCurrentClass = true;
INDEBUG(queryForCurrentClass = (JitConfig.JitQueryCurrentStaticFieldClass() > 0););
if (queryForCurrentClass)
{
#if DEBUG
const char* fieldClassName = nullptr;
const char* fieldName = eeGetFieldName(fieldHnd, &fieldClassName);
JITDUMP("Querying runtime about current class of field %s.%s (declared as %s)\n", fieldClassName, fieldName,
eeGetClassName(fieldClass));
#endif // DEBUG
// Is this a fully initialized init-only static field?
//
// Note we're not asking for speculative results here, yet.
CORINFO_CLASS_HANDLE currentClass = info.compCompHnd->getStaticFieldCurrentClass(fieldHnd);
if (currentClass != NO_CLASS_HANDLE)
{
// Yes! We know the class exactly and can rely on this to always be true.
fieldClass = currentClass;
*pIsExact = true;
*pIsNonNull = true;
JITDUMP("Runtime reports field is init-only and initialized and has class %s\n",
eeGetClassName(fieldClass));
}
else
{
JITDUMP("Field's current class not available\n");
}
}
}
return fieldClass;
}
//------------------------------------------------------------------------
// gtIsGCStaticBaseHelperCall: true if tree is fetching the gc static base
// for a subsequent static field access
//
// Arguments:
// tree - tree to consider
//
// Return Value:
// true if the tree is a suitable helper call
//
// Notes:
// Excludes R2R helpers as they specify the target field in a way
// that is opaque to the jit.
bool Compiler::gtIsStaticGCBaseHelperCall(GenTree* tree)
{
if (tree->OperGet() != GT_CALL)
{
return false;
}
GenTreeCall* call = tree->AsCall();
if (call->gtCallType != CT_HELPER)
{
return false;
}
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
// We are looking for a REF type so only need to check for the GC base helpers
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
return true;
default:
break;
}
return false;
}
void GenTree::ParseArrayAddress(
Compiler* comp, ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq)
{
*pArr = nullptr;
ValueNum inxVN = ValueNumStore::NoVN;
target_ssize_t offset = 0;
FieldSeqNode* fldSeq = nullptr;
ParseArrayAddressWork(comp, 1, pArr, &inxVN, &offset, &fldSeq);
// If we didn't find an array reference (perhaps it is the constant null?) we will give up.
if (*pArr == nullptr)
{
return;
}
// OK, new we have to figure out if any part of the "offset" is a constant contribution to the index.
// First, sum the offsets of any fields in fldSeq.
unsigned fieldOffsets = 0;
FieldSeqNode* fldSeqIter = fldSeq;
// Also, find the first non-pseudo field...
assert(*pFldSeq == nullptr);
while (fldSeqIter != nullptr)
{
if (fldSeqIter == FieldSeqStore::NotAField())
{
// TODO-Review: A NotAField here indicates a failure to properly maintain the field sequence
// See test case self_host_tests_x86\jit\regression\CLR-x86-JIT\v1-m12-beta2\ b70992\ b70992.exe
// Safest thing to do here is to drop back to MinOpts
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (comp->opts.optRepeat)
{
// We don't guarantee preserving these annotations through the entire optimizer, so
// just conservatively return null if under optRepeat.
*pArr = nullptr;
return;
}
#endif // DEBUG
noway_assert(!"fldSeqIter is NotAField() in ParseArrayAddress");
}
if (!FieldSeqStore::IsPseudoField(fldSeqIter->m_fieldHnd))
{
if (*pFldSeq == nullptr)
{
*pFldSeq = fldSeqIter;
}
CORINFO_CLASS_HANDLE fldCls = nullptr;
noway_assert(fldSeqIter->m_fieldHnd != nullptr);
CorInfoType cit = comp->info.compCompHnd->getFieldType(fldSeqIter->m_fieldHnd, &fldCls);
fieldOffsets += comp->compGetTypeSize(cit, fldCls);
}
fldSeqIter = fldSeqIter->m_next;
}
// Is there some portion of the "offset" beyond the first-elem offset and the struct field suffix we just computed?
if (!FitsIn<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset) ||
!FitsIn<target_ssize_t>(arrayInfo->m_elemSize))
{
// This seems unlikely, but no harm in being safe...
*pInxVN = comp->GetValueNumStore()->VNForExpr(nullptr, TYP_INT);
return;
}
// Otherwise...
target_ssize_t offsetAccountedFor = static_cast<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset);
target_ssize_t elemSize = static_cast<target_ssize_t>(arrayInfo->m_elemSize);
target_ssize_t constIndOffset = offset - offsetAccountedFor;
// This should be divisible by the element size...
assert((constIndOffset % elemSize) == 0);
target_ssize_t constInd = constIndOffset / elemSize;
ValueNumStore* vnStore = comp->GetValueNumStore();
if (inxVN == ValueNumStore::NoVN)
{
// Must be a constant index.
*pInxVN = vnStore->VNForPtrSizeIntCon(constInd);
}
else
{
//
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
//
// The value associated with the index value number (inxVN) is the offset into the array,
// which has been scaled by element size. We need to recover the array index from that offset
if (vnStore->IsVNConstant(inxVN))
{
target_ssize_t index = vnStore->CoercedConstantValue<target_ssize_t>(inxVN);
noway_assert(elemSize > 0 && ((index % elemSize) == 0));
*pInxVN = vnStore->VNForPtrSizeIntCon((index / elemSize) + constInd);
}
else
{
bool canFoldDiv = false;
// If the index VN is a MUL by elemSize, see if we can eliminate it instead of adding
// the division by elemSize.
VNFuncApp funcApp;
if (vnStore->GetVNFunc(inxVN, &funcApp) && funcApp.m_func == (VNFunc)GT_MUL)
{
ValueNum vnForElemSize = vnStore->VNForLongCon(elemSize);
// One of the multiply operand is elemSize, so the resulting
// index VN should simply be the other operand.
if (funcApp.m_args[1] == vnForElemSize)
{
*pInxVN = funcApp.m_args[0];
canFoldDiv = true;
}
else if (funcApp.m_args[0] == vnForElemSize)
{
*pInxVN = funcApp.m_args[1];
canFoldDiv = true;
}
}
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
if (!canFoldDiv)
{
ValueNum vnForElemSize = vnStore->VNForPtrSizeIntCon(elemSize);
ValueNum vnForScaledInx = vnStore->VNForFunc(TYP_I_IMPL, VNFunc(GT_DIV), inxVN, vnForElemSize);
*pInxVN = vnForScaledInx;
}
if (constInd != 0)
{
ValueNum vnForConstInd = comp->GetValueNumStore()->VNForPtrSizeIntCon(constInd);
VNFunc vnFunc = VNFunc(GT_ADD);
*pInxVN = comp->GetValueNumStore()->VNForFunc(TYP_I_IMPL, vnFunc, *pInxVN, vnForConstInd);
}
}
}
}
void GenTree::ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq)
{
if (TypeGet() == TYP_REF)
{
// This must be the array pointer.
*pArr = this;
assert(inputMul == 1); // Can't multiply the array pointer by anything.
}
else
{
switch (OperGet())
{
case GT_CNS_INT:
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, AsIntCon()->gtFieldSeq);
assert(!AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
*pOffset += (inputMul * (target_ssize_t)(AsIntCon()->gtIconVal));
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
if (OperGet() == GT_SUB)
{
inputMul = -inputMul;
}
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
case GT_MUL:
{
// If one op is a constant, continue parsing down.
target_ssize_t subMul = 0;
GenTree* nonConst = nullptr;
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
AsOp()->gtOp2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
else
{
assert(!AsOp()->gtOp1->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp1->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp2;
}
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
if (nonConst != nullptr)
{
nonConst->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
}
break;
case GT_LSH:
// If one op is a constant, continue parsing down.
if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
target_ssize_t shiftVal = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
target_ssize_t subMul = target_ssize_t{1} << shiftVal;
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
break;
case GT_COMMA:
// We don't care about exceptions for this purpose.
if (AsOp()->gtOp1->OperIs(GT_BOUNDS_CHECK) || AsOp()->gtOp1->IsNothingNode())
{
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
break;
default:
break;
}
// If we didn't return above, must be a contribution to the non-constant part of the index VN.
ValueNum vn = comp->GetValueNumStore()->VNLiberalNormalValue(gtVNPair);
if (inputMul != 1)
{
ValueNum mulVN = comp->GetValueNumStore()->VNForLongCon(inputMul);
vn = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_MUL), mulVN, vn);
}
if (*pInxVN == ValueNumStore::NoVN)
{
*pInxVN = vn;
}
else
{
*pInxVN = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_ADD), *pInxVN, vn);
}
}
}
bool GenTree::ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
if (OperIsIndir())
{
if (gtFlags & GTF_IND_ARR_INDEX)
{
bool b = comp->GetArrayInfoMap()->Lookup(this, arrayInfo);
assert(b);
return true;
}
// Otherwise...
GenTree* addr = AsIndir()->Addr();
return addr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
else
{
return false;
}
}
bool GenTree::ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_ADD:
{
GenTree* arrAddr = nullptr;
GenTree* offset = nullptr;
if (AsOp()->gtOp1->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp1;
offset = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp2;
offset = AsOp()->gtOp1;
}
else
{
return false;
}
if (!offset->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return arrAddr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
case GT_ADDR:
{
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->OperGet() != GT_IND)
{
return false;
}
else
{
// The "Addr" node might be annotated with a zero-offset field sequence.
FieldSeqNode* zeroOffsetFldSeq = nullptr;
if (comp->GetZeroOffsetFieldMap()->Lookup(this, &zeroOffsetFldSeq))
{
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, zeroOffsetFldSeq);
}
return addrArg->ParseArrayElemForm(comp, arrayInfo, pFldSeq);
}
}
default:
return false;
}
}
bool GenTree::ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_CNS_INT:
{
GenTreeIntCon* icon = AsIntCon();
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, icon->gtFieldSeq);
return true;
}
case GT_ADD:
if (!AsOp()->gtOp1->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return AsOp()->gtOp2->ParseOffsetForm(comp, pFldSeq);
default:
return false;
}
}
void GenTree::LabelIndex(Compiler* comp, bool isConst)
{
switch (OperGet())
{
case GT_CNS_INT:
// If we got here, this is a contribution to the constant part of the index.
if (isConst)
{
AsIntCon()->gtFieldSeq =
comp->GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
return;
case GT_LCL_VAR:
gtFlags |= GTF_VAR_ARR_INDEX;
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->LabelIndex(comp, isConst);
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
case GT_CAST:
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
case GT_ARR_LENGTH:
gtFlags |= GTF_ARRLEN_ARR_IDX;
return;
default:
// For all other operators, peel off one constant; and then label the other if it's also a constant.
if (OperIsArithmetic() || OperIsCompare())
{
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
}
else if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
}
// Otherwise continue downward on both, labeling vars.
AsOp()->gtOp1->LabelIndex(comp, false);
AsOp()->gtOp2->LabelIndex(comp, false);
}
break;
}
}
// Note that the value of the below field doesn't matter; it exists only to provide a distinguished address.
//
// static
FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr);
// FieldSeqStore methods.
FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
}
FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd)
{
FieldSeqNode fsn(fieldHnd, nullptr);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
{
if (a == nullptr)
{
return b;
}
else if (a == NotAField())
{
return NotAField();
}
else if (b == nullptr)
{
return a;
}
else if (b == NotAField())
{
return NotAField();
// Extremely special case for ConstantIndex pseudo-fields -- appending consecutive such
// together collapse to one.
}
else if (a->m_next == nullptr && a->m_fieldHnd == ConstantIndexPseudoField &&
b->m_fieldHnd == ConstantIndexPseudoField)
{
return b;
}
else
{
// We should never add a duplicate FieldSeqNode
assert(a != b);
FieldSeqNode* tmp = Append(a->m_next, b);
FieldSeqNode fsn(a->m_fieldHnd, tmp);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
}
// Static vars.
int FieldSeqStore::FirstElemPseudoFieldStruct;
int FieldSeqStore::ConstantIndexPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::FirstElemPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::FirstElemPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::ConstantIndexPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::ConstantIndexPseudoFieldStruct;
bool FieldSeqNode::IsFirstElemFieldSeq()
{
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField;
}
bool FieldSeqNode::IsConstantIndexFieldSeq()
{
return m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
bool FieldSeqNode::IsPseudoField() const
{
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField || m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
#ifdef FEATURE_SIMD
GenTreeSIMD* Compiler::gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, op2, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
//-------------------------------------------------------------------
// SetOpLclRelatedToSIMDIntrinsic: Determine if the tree has a local var that needs to be set
// as used by a SIMD intrinsic, and if so, set that local var appropriately.
//
// Arguments:
// op - The tree, to be an operand of a new GT_SIMD node, to check.
//
void Compiler::SetOpLclRelatedToSIMDIntrinsic(GenTree* op)
{
if (op == nullptr)
{
return;
}
if (op->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(op);
}
else if (op->OperIs(GT_OBJ))
{
GenTree* addr = op->AsIndir()->Addr();
if (addr->OperIs(GT_ADDR))
{
GenTree* addrOp1 = addr->AsOp()->gtGetOp1();
if (addrOp1->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(addrOp1);
}
}
}
}
bool GenTree::isCommutativeSIMDIntrinsic()
{
assert(gtOper == GT_SIMD);
switch (AsSIMD()->GetSIMDIntrinsicId())
{
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
return true;
default:
return false;
}
}
void GenTreeMultiOp::ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount)
{
size_t oldOperandCount = GetOperandCount();
GenTree** oldOperands = GetOperandArray();
if (newOperandCount > oldOperandCount)
{
if (newOperandCount <= inlineOperandCount)
{
assert(oldOperandCount <= inlineOperandCount);
assert(oldOperands == inlineOperands);
}
else
{
// The most difficult case: we need to recreate the dynamic array.
assert(compiler != nullptr);
m_operands = compiler->getAllocator(CMK_ASTNode).allocate<GenTree*>(newOperandCount);
}
}
else
{
// We are shrinking the array and may in process switch to an inline representation.
// We choose to do so for simplicity ("if a node has <= InlineOperandCount operands,
// then it stores them inline"), but actually it may be more profitable to not do that,
// it will save us a copy and a potential cache miss (though the latter seems unlikely).
if ((newOperandCount <= inlineOperandCount) && (oldOperands != inlineOperands))
{
m_operands = inlineOperands;
}
}
#ifdef DEBUG
for (size_t i = 0; i < newOperandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
SetOperandCount(newOperandCount);
}
/* static */ bool GenTreeMultiOp::OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2)
{
if (op1->GetOperandCount() != op2->GetOperandCount())
{
return false;
}
for (size_t i = 1; i <= op1->GetOperandCount(); i++)
{
if (!Compare(op1->Op(i), op2->Op(i)))
{
return false;
}
}
return true;
}
void GenTreeMultiOp::InitializeOperands(GenTree** operands, size_t operandCount)
{
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = operands[i];
gtFlags |= (operands[i]->gtFlags & GTF_ALL_EFFECT);
}
SetOperandCount(operandCount);
}
var_types GenTreeJitIntrinsic::GetAuxiliaryType() const
{
CorInfoType auxiliaryJitType = GetAuxiliaryJitType();
if (auxiliaryJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(auxiliaryJitType);
}
var_types GenTreeJitIntrinsic::GetSimdBaseType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(simdBaseJitType);
}
// Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeSIMD::OperIsMemoryLoad() const
{
if (GetSIMDIntrinsicId() == SIMDIntrinsicInitArray)
{
return true;
}
return false;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeSIMD::Equals(GenTreeSIMD* op1, GenTreeSIMD* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetSIMDIntrinsicId() == op2->GetSIMDIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool GenTree::isCommutativeHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
return HWIntrinsicInfo::IsCommutative(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isContainableHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_SSE_LoadAlignedVector128:
case NI_SSE_LoadScalarVector128:
case NI_SSE_LoadVector128:
case NI_SSE2_LoadAlignedVector128:
case NI_SSE2_LoadScalarVector128:
case NI_SSE2_LoadVector128:
case NI_AVX_LoadAlignedVector256:
case NI_AVX_LoadVector256:
case NI_AVX_ExtractVector128:
case NI_AVX2_ExtractVector128:
{
return true;
}
default:
{
return false;
}
}
#elif TARGET_ARM64
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_Vector64_get_Zero:
case NI_Vector128_get_Zero:
{
return true;
}
default:
{
return false;
}
}
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isRMWHWIntrinsic(Compiler* comp)
{
assert(gtOper == GT_HWINTRINSIC);
assert(comp != nullptr);
#if defined(TARGET_XARCH)
if (!comp->canUseVexEncoding())
{
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
}
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
// TODO-XArch-Cleanup: Move this switch block to be table driven.
case NI_SSE42_Crc32:
case NI_SSE42_X64_Crc32:
case NI_FMA_MultiplyAdd:
case NI_FMA_MultiplyAddNegated:
case NI_FMA_MultiplyAddNegatedScalar:
case NI_FMA_MultiplyAddScalar:
case NI_FMA_MultiplyAddSubtract:
case NI_FMA_MultiplySubtract:
case NI_FMA_MultiplySubtractAdd:
case NI_FMA_MultiplySubtractNegated:
case NI_FMA_MultiplySubtractNegatedScalar:
case NI_FMA_MultiplySubtractScalar:
{
return true;
}
default:
{
return false;
}
}
#elif defined(TARGET_ARM64)
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2, op3);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
SetOpLclRelatedToSIMDIntrinsic(op4);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic, op1, op2, op3, op4);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount);
for (size_t i = 0; i < operandCount; i++)
{
nodeBuilder.AddOperand(i, operands[i]);
SetOpLclRelatedToSIMDIntrinsic(operands[i]);
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++)
{
SetOpLclRelatedToSIMDIntrinsic(nodeBuilder.GetOperand(i));
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeGet() == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
if (varTypeIsUnsigned(simdBaseType))
{
return op1;
}
#if defined(TARGET_XARCH)
if (varTypeIsFloating(simdBaseType))
{
// Abs(v) = v & ~new vector<T>(-0.0);
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
GenTree* bitMask = gtNewDconNode(-0.0, simdBaseType);
bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
if ((simdBaseType != TYP_LONG) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_SSSE3)))
{
NamedIntrinsic intrinsic = (simdSize == 32) ? NI_AVX2_Abs : NI_SSSE3_Abs;
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
GenTree* tmp;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
// op1 = op1 < Zero
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// tmp = Zero - op1Dup1
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, tmp, op1Dup2)
return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
NamedIntrinsic intrinsic = NI_AdvSimd_Abs;
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
else if (varTypeIsLong(simdBaseType))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif
}
GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op1 != nullptr);
assert(op1->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
assert(op2 != nullptr);
if ((op == GT_LSH) || (op == GT_RSH) || (op == GT_RSZ))
{
assert(op2->TypeIs(TYP_INT));
}
else
{
assert(op2->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
}
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_ADD:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Add;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Add;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Add;
}
else
{
intrinsic = NI_SSE2_Add;
}
break;
}
case GT_AND:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_And;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_And;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_And;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_And;
}
else
{
intrinsic = NI_SSE2_And;
}
break;
}
case GT_AND_NOT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_AndNot;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_AndNot;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_AndNot;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_AndNot;
}
else
{
intrinsic = NI_SSE2_AndNot;
}
// GT_AND_NOT expects `op1 & ~op2`, but xarch does `~op1 & op2`
std::swap(op1, op2);
break;
}
case GT_DIV:
{
// TODO-XARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Divide;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Divide;
}
else
{
intrinsic = NI_SSE2_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsByte(simdBaseType));
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT,
16, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (op == GT_LSH)
{
intrinsic = NI_AVX2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AVX2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AVX2_ShiftRightLogical;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_SSE2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_SSE2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_SSE2_ShiftRightLogical;
}
break;
}
case GT_MUL:
{
GenTree** broadcastOp = nullptr;
if (varTypeIsArithmetic(op1))
{
broadcastOp = &op1;
}
else if (varTypeIsArithmetic(op2))
{
broadcastOp = &op2;
}
if (broadcastOp != nullptr)
{
*broadcastOp =
gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else
{
intrinsic = NI_SSE2_MultiplyLow;
}
break;
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_MultiplyLow;
}
else
{
// op1Dup = op1
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector multiply"));
// op2Dup = op2
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector multiply"));
// op1 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Multiply(op2.AsUInt32(), op1.AsUInt32()).AsInt32()
op2 = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Shuffle(op2, (0, 0, 2, 0))
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32()
op1 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_Multiply, CORINFO_TYPE_ULONG,
simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Shuffle(op1, (0, 0, 2, 0))
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = Sse2.UnpackLow(op1, op2)
intrinsic = NI_SSE2_UnpackLow;
}
break;
}
case TYP_FLOAT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE2_Multiply;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Or;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Or;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Or;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Or;
}
else
{
intrinsic = NI_SSE2_Or;
}
break;
}
case GT_SUB:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Subtract;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Subtract;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Subtract;
}
else
{
intrinsic = NI_SSE2_Subtract;
}
break;
}
case GT_XOR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Xor;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Xor;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Xor;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Xor;
}
else
{
intrinsic = NI_SSE2_Xor;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_ADD:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AddScalar : NI_AdvSimd_Arm64_Add;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_AddScalar;
}
else
{
intrinsic = NI_AdvSimd_Add;
}
break;
}
case GT_AND:
{
intrinsic = NI_AdvSimd_And;
break;
}
case GT_AND_NOT:
{
intrinsic = NI_AdvSimd_BitwiseClear;
break;
}
case GT_DIV:
{
// TODO-AARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_DivideScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmeticScalar;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogical;
}
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
if (op != GT_LSH)
{
op2 = gtNewOperNode(GT_NEG, TYP_INT, op2);
}
op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmeticScalar;
}
else
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftLogical;
}
}
break;
}
case GT_MUL:
{
assert(!varTypeIsLong(simdBaseType));
GenTree** scalarOp = nullptr;
if (varTypeIsArithmetic(op1))
{
// MultiplyByScalar requires the scalar op to be op2
std::swap(op1, op2);
scalarOp = &op2;
}
else if (varTypeIsArithmetic(op2))
{
scalarOp = &op2;
}
switch (JitType2PreciseVarType(simdBaseJitType))
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (scalarOp != nullptr)
{
*scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
intrinsic = NI_AdvSimd_Multiply;
break;
}
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_CreateScalarUnsafe,
simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_Arm64_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_Create, simdBaseJitType,
8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Arm64_Multiply;
}
if (simdSize == 8)
{
intrinsic = NI_AdvSimd_MultiplyScalar;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
intrinsic = NI_AdvSimd_Or;
break;
}
case GT_SUB:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_SubtractScalar : NI_AdvSimd_Arm64_Subtract;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_SubtractScalar;
}
else
{
intrinsic = NI_AdvSimd_Subtract;
}
break;
}
case GT_XOR:
{
intrinsic = NI_AdvSimd_Xor;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Ceiling;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Ceiling;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_CeilingScalar : NI_AdvSimd_Arm64_Ceiling;
}
else
{
intrinsic = NI_AdvSimd_Ceiling;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareEqual;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareEqual;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_CompareEqual;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// tmp = (op1 == op2) i.e. compare for equality as if op1 and op2 are vector of int
// op1 = tmp
// op2 = Shuffle(tmp, (2, 3, 0, 1))
// result = BitwiseAnd(op1, op2)
//
// Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of
// respective long elements.
GenTree* tmp =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp for vector Equals"));
op2 = gtNewSimdHWIntrinsicNode(type, tmp, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareEqual;
}
break;
}
case GT_GE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareGreaterThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = GreaterThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_GT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports > for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector GreaterThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareGreaterThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareGreaterThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// GreaterThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector GreaterThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareLessThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = LessThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_LT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports < for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector LessThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareLessThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareLessThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// LessThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector LessThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareLessThan;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareEqualScalar : NI_AdvSimd_Arm64_CompareEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareEqual;
}
break;
}
case GT_GE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareGreaterThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThanOrEqual;
}
break;
}
case GT_GT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic =
(simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanScalar : NI_AdvSimd_Arm64_CompareGreaterThan;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareLessThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThanOrEqual;
}
break;
}
case GT_LT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanScalar : NI_AdvSimd_Arm64_CompareLessThan;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThan;
}
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
}
else
{
intrinsic = NI_Vector128_op_Equality;
}
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
getAllBitsSet = NI_Vector256_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Equality : NI_Vector128_op_Equality;
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 8)
{
intrinsic = NI_Vector64_op_Equality;
getAllBitsSet = NI_Vector64_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
assert(op3 != nullptr);
assert(op3->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
// TODO-XARCH-CQ: It's likely beneficial to have a dedicated CndSel node so we
// can special case when the condition is the result of various compare operations.
//
// When it is, the condition is AllBitsSet or Zero on a per-element basis and we
// could change this to be a Blend operation in lowering as an optimization.
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector conditional select"));
// op2 = op2 & op1
op2 = gtNewSimdBinOpNode(GT_AND, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op3 = op3 & ~op1Dup
op3 = gtNewSimdBinOpNode(GT_AND_NOT, type, op3, op1Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op2 | op3
return gtNewSimdBinOpNode(GT_OR, type, op2, op3, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_Create;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#if defined(TARGET_XARCH)
#if defined(TARGET_X86)
if (varTypeIsLong(simdBaseType) && !op1->IsIntegralConst())
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
// works on 32-bit x86 systems.
unreached();
}
#endif // TARGET_X86
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_Create;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
hwIntrinsicID = NI_Vector64_Create;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsArithmetic(type));
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(JITtype2varType(simdBaseJitType) == type);
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
if (simdSize == 32)
{
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_Dot;
}
else
{
assert(((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) ||
compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_Vector128_Dot;
}
#elif defined(TARGET_ARM64)
assert(!varTypeIsLong(simdBaseType));
intrinsic = (simdSize == 8) ? NI_Vector64_Dot : NI_Vector128_Dot;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
intrinsic = NI_AVX_Floor;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Floor;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_FloorScalar : NI_AdvSimd_Arm64_Floor;
}
else
{
intrinsic = NI_AdvSimd_Floor;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic intrinsicId = NI_Vector128_GetElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
intrinsicId = NI_Vector256_GetElement;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
intrinsicId = NI_Vector64_GetElement;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
int immUpperBound = getSIMDVectorLength(simdSize, simdBaseType) - 1;
bool rangeCheckNeeded = !op2->OperIsConst();
if (!rangeCheckNeeded)
{
ssize_t imm8 = op2->AsIntCon()->IconValue();
rangeCheckNeeded = (imm8 < 0) || (imm8 > immUpperBound);
}
if (rangeCheckNeeded)
{
op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound);
}
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Max;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Max;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Max(op1, op2)
op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Max;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Max;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MaxScalar : NI_AdvSimd_Arm64_Max;
}
else
{
intrinsic = NI_AdvSimd_Max;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max"));
// op1 = op1 > op2
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Min;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Min;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Min(op1, op2)
op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Min;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Min;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MinScalar : NI_AdvSimd_Arm64_Min;
}
else
{
intrinsic = NI_AdvSimd_Min;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min"));
// op1 = op1 < op2
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
GenTree* tmp1;
GenTree* tmp2;
#if defined(TARGET_XARCH)
GenTree* tmp3;
GenTree* tmp4;
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// This is the same in principle to the other comments below, however due to
// code formatting, its too long to reasonably display here.
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U | 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8L, 8U, 9L, 9U, AL, AU, BL, BU | CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, -- | 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, -- | CL, --, DL, --, EL, --, FL, --
// tmp4 = Elements 0L, 1L, 2L, 3L, 8L, 9L, AL, BL | 4L, 5L, 6L, 7L, CL, DL, EL, FL
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L | 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector256.Create(0x0000FFFF).AsInt16();
// var tmp2 = Avx2.And(op1.AsInt16(), tmp1);
// var tmp3 = Avx2.And(op2.AsInt16(), tmp1);
// var tmp4 = Avx2.PackUnsignedSaturate(tmp2, tmp3);
// return Avx2.Permute4x64(tmp4.AsUInt64(), SHUFFLE_WYZX).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate, CORINFO_TYPE_USHORT,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0, 1 | 2, 3; 0L, 0U, 1L, 1U | 2L, 2U, 3L, 3U
// op2 = Elements 4, 5 | 6, 7; 4L, 4U, 5L, 5U | 6L, 6U, 7L, 7U
//
// tmp1 = Elements 0L, 4L, 0U, 4U | 2L, 6L, 2U, 6U
// tmp2 = Elements 1L, 5L, 1U, 5U | 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 1L, 4L, 5L | 2L, 3L, 6L, 7L
// return Elements 0L, 1L, 2L, 3L | 4L, 5L, 6L, 7L
//
// var tmp1 = Avx2.UnpackLow(op1, op2);
// var tmp2 = Avx2.UnpackHigh(op1, op2);
// var tmp3 = Avx2.UnpackLow(tmp1, tmp2);
// return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
opBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1 | 2, 3
// op2 = Elements 4, 5 | 6, 7
//
// tmp1 = Elements 0, 1, 2, 3 | -, -, -, -
// tmp1 = Elements 4, 5, 6, 7
// return Elements 0, 1, 2, 3 | 4, 5, 6, 7
//
// var tmp1 = Avx.ConvertToVector128Single(op1).ToVector256Unsafe();
// var tmp2 = Avx.ConvertToVector128Single(op2);
// return Avx.InsertVector128(tmp1, tmp2, 1);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, gtNewIconNode(1), NI_AVX_InsertVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
// op1 = Elements 0, 1, 2, 3, 4, 5, 6, 7; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U, 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8, 9, A, B, C, D, E, F; 8L, 8U, 9L, 9U, AL, AU, BL, BU, CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --, 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, --, CL, --, DL, --, EL, --, FL, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector128.Create((ushort)(0x00FF)).AsSByte();
// var tmp2 = Sse2.And(op1.AsSByte(), tmp1);
// var tmp3 = Sse2.And(op2.AsSByte(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
// op1 = Elements 0, 1, 2, 3; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U
// op2 = Elements 4, 5, 6, 7; 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
//
// ...
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
// ...
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --
// tmp3 = Elements 4L, --, 5L, --, 6L, --, 7L, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Vector128.Create(0x0000FFFF).AsInt16();
// var tmp2 = Sse2.And(op1.AsInt16(), tmp1);
// var tmp3 = Sse2.And(op2.AsInt16(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp2, tmp3).As<T>();
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate,
CORINFO_TYPE_USHORT, simdSize, isSimdAsHWIntrinsic);
}
else
{
// ...
//
// tmp1 = Elements 0L, 4L, 0U, 4U, 1L, 5L, 1U, 5U
// tmp2 = Elements 2L, 6L, 2U, 6U, 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 2L, 4L, 6L, 0U, 2U, 4U, 6U
// tmp4 = Elements 1L, 3L, 5L, 7L, 1U, 3U, 5U, 7U
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt16(), op2.AsUInt16());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt16(), op2.AsUInt16());
// var tmp3 = Sse2.UnpackLow(tmp1, tmp2);
// var tmp4 = Sse2.UnpackHigh(tmp1, tmp2);
// return Sse2.UnpackLow(tmp3, tmp4).As<T>();
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
GenTree* tmp2Dup;
tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp2 for vector narrow"));
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
}
case TYP_INT:
case TYP_UINT:
{
// op1 = Elements 0, 1; 0L, 0U, 1L, 1U
// op2 = Elements 2, 3; 2L, 2U, 3L, 3U
//
// tmp1 = Elements 0L, 2L, 0U, 2U
// tmp2 = Elements 1L, 3L, 1U, 3U
// return Elements 0L, 1L, 2L, 3L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt32(), op2.AsUInt32());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt32(), op2.AsUInt32());
// return Sse2.UnpackLow(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1
// op2 = Elements 2, 3
//
// tmp1 = Elements 0, 1, -, -
// tmp1 = Elements 2, 3, -, -
// return Elements 0, 1, 2, 3
//
// var tmp1 = Sse2.ConvertToVector128Single(op1);
// var tmp2 = Sse2.ConvertToVector128Single(op2);
// return Sse.MoveLowToHigh(tmp1, tmp2);
CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1);
// return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = AdvSimd.ExtractNarrowingLower(op1);
// return AdvSimd.ExtractNarrowingUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
else if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = op1.ToVector128Unsafe();
// return AdvSimd.Arm64.ConvertToSingleLower(tmp1);
CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = op1.ToVector128Unsafe();
// var tmp2 = AdvSimd.InsertScalar(tmp1.AsUInt64(), 1, op2.AsUInt64());
// return AdvSimd.ExtractNarrowingUpper(tmp2).As<T>();
CorInfoType tmp2BaseJitType = varTypeIsSigned(simdBaseType) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Sqrt;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Sqrt;
}
else
{
intrinsic = NI_SSE2_Sqrt;
}
#elif defined(TARGET_ARM64)
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_SqrtScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Sqrt;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp = nullptr;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType);
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
// HorizontalAdd combines pairs so we need log2(vectorLength) passes to sum all elements together.
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
if (simdSize == 32)
{
// Minus 1 because for the last pass we split the vector to low / high and add them together.
haddCount -= 1;
if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_HorizontalAdd;
}
}
else if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE3));
intrinsic = NI_SSE3_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSSE3));
intrinsic = NI_SSSE3_HorizontalAdd;
}
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add;
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT), NI_AVX_ExtractVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdHWIntrinsicNode(simdType, tmp, NI_Vector256_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
{
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 8)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
if (simdSize == 8)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_DOUBLE:
case TYP_LONG:
case TYP_ULONG:
{
if (simdSize == 16)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* op2 = nullptr;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_NEG:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
}
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// Zero - op1
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case GT_NOT:
{
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = (simdSize == 32) ? NI_Vector256_get_AllBitsSet : NI_Vector128_get_AllBitsSet;
op2 = gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 ^ AllBitsSet
return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
case GT_NEG:
{
if (varTypeIsSigned(simdBaseType))
{
if (simdBaseType == TYP_LONG)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else
{
intrinsic = NI_AdvSimd_Negate;
}
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
// Zero - op1
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
case GT_NOT:
{
return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
}
GenTree* Compiler::gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 =
gtNewSimdHWIntrinsicNode(type, op1, NI_Vector256_GetLower, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if ((simdBaseType == TYP_FLOAT) || compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE2_ConvertToVector128Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen lower"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
tmp1 = op1;
}
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8, isSimdAsHWIntrinsic);
if (simdSize == 8)
{
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return tmp1;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(1), NI_AVX_ExtractVector128, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_SSE2_ShiftRightLogical128BitLane,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
GenTree* zero;
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDoubleUpper;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningUpper;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningUpper;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
ssize_t index = 8 / genTypeSize(simdBaseType);
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
zero = gtNewSimdZeroNode(TYP_SIMD16, simdBaseJitType, 16, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128,
simdBaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op2->IsCnsIntOrI());
ssize_t imm8 = op2->AsIntCon()->IconValue();
ssize_t count = simdSize / genTypeSize(simdBaseType);
assert((0 <= imm8) && (imm8 < count));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41_X64));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_WithElement;
}
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
if (simdSize == 8)
{
return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
break;
case TYP_FLOAT:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
break;
default:
unreached();
}
hwIntrinsicID = NI_AdvSimd_Insert;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
intrinsic = (simdSize == 32) ? NI_Vector256_get_Zero : NI_Vector128_get_Zero;
#elif defined(TARGET_ARM64)
intrinsic = (simdSize > 8) ? NI_Vector128_get_Zero : NI_Vector64_get_Zero;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2, op3);
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoad() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
NamedIntrinsic intrinsicId = GetHWIntrinsicId();
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
if (category == HW_Category_MemoryLoad)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryLoad(GetHWIntrinsicId()))
{
// Some intrinsics (without HW_Category_MemoryLoad) also have MemoryLoad semantics
// This is generally because they have both vector and pointer overloads, e.g.,
// * Vector128<byte> BroadcastScalarToVector128(Vector128<byte> value)
// * Vector128<byte> BroadcastScalarToVector128(byte* source)
// So, we need to check the argument's type is memory-reference or Vector128
if ((category == HW_Category_SimpleSIMD) || (category == HW_Category_SIMDScalar))
{
assert(GetOperandCount() == 1);
switch (intrinsicId)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
CorInfoType auxiliaryType = GetAuxiliaryJitType();
if (auxiliaryType == CORINFO_TYPE_PTR)
{
return true;
}
assert(auxiliaryType == CORINFO_TYPE_UNDEF);
return false;
}
default:
{
unreached();
}
}
}
else if (category == HW_Category_IMM)
{
// Do we have less than 3 operands?
if (GetOperandCount() < 3)
{
return false;
}
else if (HWIntrinsicInfo::isAVX2GatherIntrinsic(GetHWIntrinsicId()))
{
return true;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(GetHWIntrinsicId());
if (category == HW_Category_MemoryStore)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryStore(GetHWIntrinsicId()) &&
(category == HW_Category_IMM || category == HW_Category_Scalar))
{
// Some intrinsics (without HW_Category_MemoryStore) also have MemoryStore semantics
// Bmi2/Bmi2.X64.MultiplyNoFlags may return the lower half result by a out argument
// unsafe ulong MultiplyNoFlags(ulong left, ulong right, ulong* low)
//
// So, the 3-argument form is MemoryStore
if (GetOperandCount() == 3)
{
switch (GetHWIntrinsicId())
{
case NI_BMI2_MultiplyNoFlags:
case NI_BMI2_X64_MultiplyNoFlags:
return true;
default:
return false;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad or MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoadOrStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return OperIsMemoryLoad() || OperIsMemoryStore();
#else
return false;
#endif
}
NamedIntrinsic GenTreeHWIntrinsic::GetHWIntrinsicId() const
{
NamedIntrinsic id = gtHWIntrinsicId;
int numArgs = HWIntrinsicInfo::lookupNumArgs(id);
bool numArgsUnknown = numArgs < 0;
assert((static_cast<size_t>(numArgs) == GetOperandCount()) || numArgsUnknown);
return id;
}
void GenTreeHWIntrinsic::SetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
#ifdef DEBUG
size_t oldOperandCount = GetOperandCount();
int newOperandCount = HWIntrinsicInfo::lookupNumArgs(intrinsicId);
bool newCountUnknown = newOperandCount < 0;
// We'll choose to trust the programmer here.
assert((oldOperandCount == static_cast<size_t>(newOperandCount)) || newCountUnknown);
#endif // DEBUG
gtHWIntrinsicId = intrinsicId;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeHWIntrinsic::Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetHWIntrinsicId() == op2->GetHWIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
(op1->GetAuxiliaryType() == op2->GetAuxiliaryType()) && (op1->GetOtherReg() == op2->GetOtherReg()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_HW_INTRINSICS
//---------------------------------------------------------------------------------------
// gtNewMustThrowException:
// create a throw node (calling into JIT helper) that must be thrown.
// The result would be a comma node: COMMA(jithelperthrow(void), x) where x's type should be specified.
//
// Arguments
// helper - JIT helper ID
// type - return type of the node
//
// Return Value
// pointer to the throw node
//
GenTree* Compiler::gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd)
{
GenTreeCall* node = gtNewHelperCallNode(helper, TYP_VOID);
node->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
if (type != TYP_VOID)
{
unsigned dummyTemp = lvaGrabTemp(true DEBUGARG("dummy temp of must thrown exception"));
if (type == TYP_STRUCT)
{
lvaSetStruct(dummyTemp, clsHnd, false);
type = lvaTable[dummyTemp].lvType; // struct type is normalized
}
else
{
lvaTable[dummyTemp].lvType = type;
}
GenTree* dummyNode = gtNewLclvNode(dummyTemp, type);
return gtNewOperNode(GT_COMMA, type, node, dummyNode);
}
return node;
}
//---------------------------------------------------------------------------------------
// InitializeStructReturnType:
// Initialize the Return Type Descriptor for a method that returns a struct type
//
// Arguments
// comp - Compiler Instance
// retClsHnd - VM handle to the struct type returned by the method
//
// Return Value
// None
//
void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension callConv)
{
assert(!m_inited);
#if FEATURE_MULTIREG_RET
assert(retClsHnd != NO_CLASS_HANDLE);
unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
Compiler::structPassingKind howToReturnStruct;
var_types returnType = comp->getReturnTypeForStruct(retClsHnd, callConv, &howToReturnStruct, structSize);
switch (howToReturnStruct)
{
case Compiler::SPK_EnclosingType:
m_isEnclosingType = true;
FALLTHROUGH;
case Compiler::SPK_PrimitiveType:
{
assert(returnType != TYP_UNKNOWN);
assert(returnType != TYP_STRUCT);
m_regType[0] = returnType;
break;
}
case Compiler::SPK_ByValueAsHfa:
{
assert(varTypeIsStruct(returnType));
var_types hfaType = comp->GetHfaType(retClsHnd);
// We should have an hfa struct type
assert(varTypeIsValidHfaType(hfaType));
// Note that the retail build issues a warning about a potential divsion by zero without this Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
// The size of this struct should be evenly divisible by elemSize
assert((structSize % elemSize) == 0);
unsigned hfaCount = (structSize / elemSize);
for (unsigned i = 0; i < hfaCount; ++i)
{
m_regType[i] = hfaType;
}
if (comp->compFloatingPointUsed == false)
{
comp->compFloatingPointUsed = true;
}
break;
}
case Compiler::SPK_ByValue:
{
assert(varTypeIsStruct(returnType));
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
comp->eeGetSystemVAmd64PassStructInRegisterDescriptor(retClsHnd, &structDesc);
assert(structDesc.passedInRegisters);
for (int i = 0; i < structDesc.eightByteCount; i++)
{
assert(i < MAX_RET_REG_COUNT);
m_regType[i] = comp->GetEightByteType(structDesc, i);
}
#elif defined(TARGET_ARM64)
// a non-HFA struct returned using two registers
//
assert((structSize > TARGET_POINTER_SIZE) && (structSize <= (2 * TARGET_POINTER_SIZE)));
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#elif defined(TARGET_X86)
// an 8-byte struct returned using two registers
assert(structSize == 8);
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#else // TARGET_XXX
// This target needs support here!
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
#endif // UNIX_AMD64_ABI
break; // for case SPK_ByValue
}
case Compiler::SPK_ByReference:
// We are returning using the return buffer argument
// There are no return registers
break;
default:
unreached(); // By the contract of getReturnTypeForStruct we should never get here.
} // end of switch (howToReturnStruct)
#endif // FEATURE_MULTIREG_RET
#ifdef DEBUG
m_inited = true;
#endif
}
//---------------------------------------------------------------------------------------
// InitializeLongReturnType:
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
//
void ReturnTypeDesc::InitializeLongReturnType()
{
assert(!m_inited);
#if defined(TARGET_X86) || defined(TARGET_ARM)
// Setups up a ReturnTypeDesc for returning a long using two registers
//
assert(MAX_RET_REG_COUNT >= 2);
m_regType[0] = TYP_INT;
m_regType[1] = TYP_INT;
#else // not (TARGET_X86 or TARGET_ARM)
m_regType[0] = TYP_LONG;
#endif // TARGET_X86 or TARGET_ARM
#ifdef DEBUG
m_inited = true;
#endif
}
//-------------------------------------------------------------------
// GetABIReturnReg: Return ith return register as per target ABI
//
// Arguments:
// idx - Index of the return register.
// The first return register has an index of 0 and so on.
//
// Return Value:
// Returns ith return register as per target ABI.
//
// Notes:
// x86 and ARM return long in multiple registers.
// ARM and ARM64 return HFA struct in multiple registers.
//
regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) const
{
unsigned count = GetReturnRegCount();
assert(idx < count);
regNumber resultReg = REG_NA;
#ifdef UNIX_AMD64_ABI
var_types regType0 = GetReturnRegType(0);
if (idx == 0)
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET;
}
else
{
noway_assert(varTypeUsesFloatReg(regType0));
resultReg = REG_FLOATRET;
}
}
else if (idx == 1)
{
var_types regType1 = GetReturnRegType(1);
if (varTypeIsIntegralOrI(regType1))
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET_1;
}
else
{
resultReg = REG_INTRET;
}
}
else
{
noway_assert(varTypeUsesFloatReg(regType1));
if (varTypeUsesFloatReg(regType0))
{
resultReg = REG_FLOATRET_1;
}
else
{
resultReg = REG_FLOATRET;
}
}
}
#elif defined(TARGET_X86)
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
#elif defined(TARGET_ARM)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
// Ints are returned in one return register.
// Longs are returned in two return registers.
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
}
else
{
// Floats are returned in one return register (f0).
// Doubles are returned in one return register (d0).
// Structs are returned in four registers with HFAs.
assert(idx < MAX_RET_REG_COUNT); // Up to 4 return registers for HFA's
if (regType == TYP_DOUBLE)
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx * 2); // d0, d1, d2 or d3
}
else
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // f0, f1, f2 or f3
}
}
#elif defined(TARGET_ARM64)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
noway_assert(idx < 2); // Up to 2 return registers for 16-byte structs
resultReg = (idx == 0) ? REG_INTRET : REG_INTRET_1; // X0 or X1
}
else
{
noway_assert(idx < 4); // Up to 4 return registers for HFA's
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // V0, V1, V2 or V3
}
#endif // TARGET_XXX
assert(resultReg != REG_NA);
return resultReg;
}
//--------------------------------------------------------------------------------
// GetABIReturnRegs: get the mask of return registers as per target arch ABI.
//
// Arguments:
// None
//
// Return Value:
// reg mask of return registers in which the return type is returned.
//
// Note:
// This routine can be used when the caller is not particular about the order
// of return registers and wants to know the set of return registers.
//
// static
regMaskTP ReturnTypeDesc::GetABIReturnRegs() const
{
regMaskTP resultMask = RBM_NONE;
unsigned count = GetReturnRegCount();
for (unsigned i = 0; i < count; ++i)
{
resultMask |= genRegMask(GetABIReturnReg(i));
}
return resultMask;
}
//------------------------------------------------------------------------
// The following functions manage the gtRsvdRegs set of temporary registers
// created by LSRA during code generation.
//------------------------------------------------------------------------
// AvailableTempRegCount: return the number of available temporary registers in the (optional) given set
// (typically, RBM_ALLINT or RBM_ALLFLOAT).
//
// Arguments:
// mask - (optional) Check for available temporary registers only in this set.
//
// Return Value:
// Count of available temporary registers in given set.
//
unsigned GenTree::AvailableTempRegCount(regMaskTP mask /* = (regMaskTP)-1 */) const
{
return genCountBits(gtRsvdRegs & mask);
}
//------------------------------------------------------------------------
// GetSingleTempReg: There is expected to be exactly one available temporary register
// in the given mask in the gtRsvdRegs set. Get that register. No future calls to get
// a temporary register are expected. Removes the register from the set, but only in
// DEBUG to avoid doing unnecessary work in non-DEBUG builds.
//
// Arguments:
// mask - (optional) Get an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::GetSingleTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) == 1);
regNumber tempReg = genRegNumFromMask(availableSet);
INDEBUG(gtRsvdRegs &= ~availableSet;) // Remove the register from the set, so it can't be used again.
return tempReg;
}
//------------------------------------------------------------------------
// ExtractTempReg: Find the lowest number temporary register from the gtRsvdRegs set
// that is also in the optional given mask (typically, RBM_ALLINT or RBM_ALLFLOAT),
// and return it. Remove this register from the temporary register set, so it won't
// be returned again.
//
// Arguments:
// mask - (optional) Extract an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::ExtractTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) >= 1);
regMaskTP tempRegMask = genFindLowestBit(availableSet);
gtRsvdRegs &= ~tempRegMask;
return genRegNumFromMask(tempRegMask);
}
//------------------------------------------------------------------------
// GetLclOffs: if `this` is a field or a field address it returns offset
// of the field inside the struct, for not a field it returns 0.
//
// Return Value:
// The offset value.
//
uint16_t GenTreeLclVarCommon::GetLclOffs() const
{
if (OperIsLocalField())
{
return AsLclFld()->GetLclOffs();
}
else
{
return 0;
}
}
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GetResultOpNumForFMA: check if the result is written into one of the operands.
// In the case that none of the operand is overwritten, check if any of them is lastUse.
//
// Return Value:
// The operand number overwritten or lastUse. 0 is the default value, where the result is written into
// a destination that is not one of the source operands and there is no last use op.
//
unsigned GenTreeHWIntrinsic::GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3)
{
// only FMA intrinsic node should call into this function
assert(HWIntrinsicInfo::lookupIsa(gtHWIntrinsicId) == InstructionSet_FMA);
if (use != nullptr && use->OperIs(GT_STORE_LCL_VAR))
{
// For store_lcl_var, check if any op is overwritten
GenTreeLclVarCommon* overwritten = use->AsLclVarCommon();
unsigned overwrittenLclNum = overwritten->GetLclNum();
if (op1->IsLocal() && op1->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 1;
}
else if (op2->IsLocal() && op2->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 2;
}
else if (op3->IsLocal() && op3->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 3;
}
}
// If no overwritten op, check if there is any last use op
// https://github.com/dotnet/runtime/issues/62215
if (op1->OperIs(GT_LCL_VAR) && op1->IsLastUse(0))
return 1;
else if (op2->OperIs(GT_LCL_VAR) && op2->IsLastUse(0))
return 2;
else if (op3->OperIs(GT_LCL_VAR) && op3->IsLastUse(0))
return 3;
return 0;
}
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// IsOffsetMisaligned: check if the field needs a special handling on arm.
//
// Return Value:
// true if it is a float field with a misaligned offset, false otherwise.
//
bool GenTreeLclFld::IsOffsetMisaligned() const
{
if (varTypeIsFloating(gtType))
{
return ((m_lclOffs % emitTypeSize(TYP_FLOAT)) != 0);
}
return false;
}
#endif // TARGET_ARM
bool GenTree::IsInvariant() const
{
return OperIsConst() || Compiler::impIsAddressInLocal(this);
}
//------------------------------------------------------------------------
// IsNeverNegative: returns true if the given tree is known to be never
// negative, i. e. the upper bit will always be zero.
// Only valid for integral types.
//
// Arguments:
// comp - Compiler object, needed for IntegralRange::ForNode
//
// Return Value:
// true if the given tree is known to be never negative
//
bool GenTree::IsNeverNegative(Compiler* comp) const
{
assert(varTypeIsIntegral(this));
if (IsIntegralConst())
{
return AsIntConCommon()->IntegralValue() >= 0;
}
// TODO-Casts: extend IntegralRange to handle constants
return IntegralRange::ForNode((GenTree*)this, comp).IsPositive();
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#include "hwintrinsic.h"
#include "simd.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
/*****************************************************************************/
const unsigned char GenTree::gtOperKindTable[] = {
#define GTNODE(en, st, cm, ok) ((ok)>K_MASK) + GTK_COMMUTE *cm,
#include "gtlist.h"
};
#ifdef DEBUG
const GenTreeDebugOperKind GenTree::gtDebugOperKindTable[] = {
#define GTNODE(en, st, cm, ok) static_cast<GenTreeDebugOperKind>((ok)&DBK_MASK),
#include "gtlist.h"
};
#endif // DEBUG
/*****************************************************************************
*
* The types of different GenTree nodes
*/
#ifdef DEBUG
#define INDENT_SIZE 3
//--------------------------------------------
//
// IndentStack: This struct is used, along with its related enums and strings,
// to control both the indendtation and the printing of arcs.
//
// Notes:
// The mode of printing is set in the Constructor, using its 'compiler' argument.
// Currently it only prints arcs when fgOrder == fgOrderLinear.
// The type of arc to print is specified by the IndentInfo enum, and is controlled
// by the caller of the Push() method.
enum IndentChars
{
ICVertical,
ICBottom,
ICTop,
ICMiddle,
ICDash,
ICTerminal,
ICError,
IndentCharCount
};
// clang-format off
// Sets of strings for different dumping options vert bot top mid dash embedded terminal error
static const char* emptyIndents[IndentCharCount] = { " ", " ", " ", " ", " ", "", "?" };
static const char* asciiIndents[IndentCharCount] = { "|", "\\", "/", "+", "-", "*", "?" };
static const char* unicodeIndents[IndentCharCount] = { "\xe2\x94\x82", "\xe2\x94\x94", "\xe2\x94\x8c", "\xe2\x94\x9c", "\xe2\x94\x80", "\xe2\x96\x8c", "?" };
// clang-format on
typedef ArrayStack<Compiler::IndentInfo> IndentInfoStack;
struct IndentStack
{
IndentInfoStack stack;
const char** indents;
// Constructor for IndentStack. Uses 'compiler' to determine the mode of printing.
IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly))
{
if (compiler->asciiTrees)
{
indents = asciiIndents;
}
else
{
indents = unicodeIndents;
}
}
// Return the depth of the current indentation.
unsigned Depth()
{
return stack.Height();
}
// Push a new indentation onto the stack, of the given type.
void Push(Compiler::IndentInfo info)
{
stack.Push(info);
}
// Pop the most recent indentation type off the stack.
Compiler::IndentInfo Pop()
{
return stack.Pop();
}
// Print the current indentation and arcs.
void print()
{
unsigned indentCount = Depth();
for (unsigned i = 0; i < indentCount; i++)
{
unsigned index = indentCount - 1 - i;
switch (stack.Top(index))
{
case Compiler::IndentInfo::IINone:
printf(" ");
break;
case Compiler::IndentInfo::IIArc:
if (index == 0)
{
printf("%s%s%s", indents[ICMiddle], indents[ICDash], indents[ICDash]);
}
else
{
printf("%s ", indents[ICVertical]);
}
break;
case Compiler::IndentInfo::IIArcBottom:
printf("%s%s%s", indents[ICBottom], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIArcTop:
printf("%s%s%s", indents[ICTop], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIError:
printf("%s%s%s", indents[ICError], indents[ICDash], indents[ICDash]);
break;
default:
unreached();
}
}
printf("%s", indents[ICTerminal]);
}
};
//------------------------------------------------------------------------
// printIndent: This is a static method which simply invokes the 'print'
// method on its 'indentStack' argument.
//
// Arguments:
// indentStack - specifies the information for the indentation & arcs to be printed
//
// Notes:
// This method exists to localize the checking for the case where indentStack is null.
static void printIndent(IndentStack* indentStack)
{
if (indentStack == nullptr)
{
return;
}
indentStack->print();
}
#endif
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* opNames[] = {
#define GTNODE(en, st, cm, ok) #en,
#include "gtlist.h"
};
const char* GenTree::OpName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opNames));
return opNames[op];
}
#endif
#if MEASURE_NODE_SIZE
static const char* opStructNames[] = {
#define GTNODE(en, st, cm, ok) #st,
#include "gtlist.h"
};
const char* GenTree::OpStructName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opStructNames));
return opStructNames[op];
}
#endif
//
// We allocate tree nodes in 2 different sizes:
// - TREE_NODE_SZ_SMALL for most nodes
// - TREE_NODE_SZ_LARGE for the few nodes (such as calls) that have
// more fields and take up a lot more space.
//
/* GT_COUNT'th oper is overloaded as 'undefined oper', so allocate storage for GT_COUNT'th oper also */
/* static */
unsigned char GenTree::s_gtNodeSizes[GT_COUNT + 1];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
unsigned char GenTree::s_gtTrueSizes[GT_COUNT + 1]{
#define GTNODE(en, st, cm, ok) sizeof(st),
#include "gtlist.h"
};
#endif // NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
#if COUNT_AST_OPERS
unsigned GenTree::s_gtNodeCounts[GT_COUNT + 1] = {0};
#endif // COUNT_AST_OPERS
/* static */
void GenTree::InitNodeSize()
{
/* Set all sizes to 'small' first */
for (unsigned op = 0; op <= GT_COUNT; op++)
{
GenTree::s_gtNodeSizes[op] = TREE_NODE_SZ_SMALL;
}
// Now set all of the appropriate entries to 'large'
CLANG_FORMAT_COMMENT_ANCHOR;
// clang-format off
if (GlobalJitOptions::compFeatureHfa
#if defined(UNIX_AMD64_ABI)
|| true
#endif // defined(UNIX_AMD64_ABI)
)
{
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
}
GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FTN_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOUNDS_CHECK] = TREE_NODE_SZ_SMALL;
GenTree::s_gtNodeSizes[GT_ARR_ELEM] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ALLOCOBJ] = TREE_NODE_SZ_LARGE;
#if USE_HELPERS_FOR_INT_DIV
GenTree::s_gtNodeSizes[GT_DIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UDIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_MOD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UMOD] = TREE_NODE_SZ_LARGE;
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
GenTree::s_gtNodeSizes[GT_PUTARG_STK] = TREE_NODE_SZ_LARGE;
#if FEATURE_ARG_SPLIT
GenTree::s_gtNodeSizes[GT_PUTARG_SPLIT] = TREE_NODE_SZ_LARGE;
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
assert(GenTree::s_gtNodeSizes[GT_RETURN] == GenTree::s_gtNodeSizes[GT_ASG]);
// This list of assertions should come to contain all GenTree subtypes that are declared
// "small".
assert(sizeof(GenTreeLclFld) <= GenTree::s_gtNodeSizes[GT_LCL_FLD]);
assert(sizeof(GenTreeLclVar) <= GenTree::s_gtNodeSizes[GT_LCL_VAR]);
static_assert_no_msg(sizeof(GenTree) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVarCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclFld) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCC) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCast) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBox) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeField) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFieldList) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeColon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCall) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeCmpXchg) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFptrVal) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeQmark) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIntrinsic) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndexAddr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrLen) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBoundsChk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArrElem) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrOffs) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAllocObj) <= TREE_NODE_SZ_LARGE); // *** large node
#ifndef FEATURE_PUT_STRUCT_ARG_STK
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_SMALL);
#else // FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_LARGE);
#if FEATURE_ARG_SPLIT
static_assert_no_msg(sizeof(GenTreePutArgSplit) <= TREE_NODE_SZ_LARGE);
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
#ifdef FEATURE_SIMD
static_assert_no_msg(sizeof(GenTreeSIMD) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static_assert_no_msg(sizeof(GenTreeHWIntrinsic) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_HW_INTRINSICS
// clang-format on
}
size_t GenTree::GetNodeSize() const
{
return GenTree::s_gtNodeSizes[gtOper];
}
#ifdef DEBUG
bool GenTree::IsNodeProperlySized() const
{
size_t size;
if (gtDebugFlags & GTF_DEBUG_NODE_SMALL)
{
size = TREE_NODE_SZ_SMALL;
}
else
{
assert(gtDebugFlags & GTF_DEBUG_NODE_LARGE);
size = TREE_NODE_SZ_LARGE;
}
return GenTree::s_gtNodeSizes[gtOper] <= size;
}
#endif
//------------------------------------------------------------------------
// ReplaceWith: replace this with the src node. The source must be an isolated node
// and cannot be used after the replacement.
//
// Arguments:
// src - source tree, that replaces this.
// comp - the compiler instance to transfer annotations for arrays.
//
void GenTree::ReplaceWith(GenTree* src, Compiler* comp)
{
// The source may be big only if the target is also a big node
assert((gtDebugFlags & GTF_DEBUG_NODE_LARGE) || GenTree::s_gtNodeSizes[src->gtOper] == TREE_NODE_SZ_SMALL);
// The check is effective only if nodes have been already threaded.
assert((src->gtPrev == nullptr) && (src->gtNext == nullptr));
RecordOperBashing(OperGet(), src->OperGet()); // nop unless NODEBASH_STATS is enabled
GenTree* prev = gtPrev;
GenTree* next = gtNext;
// The VTable pointer is copied intentionally here
memcpy((void*)this, (void*)src, src->GetNodeSize());
this->gtPrev = prev;
this->gtNext = next;
#ifdef DEBUG
gtSeqNum = 0;
#endif
// Transfer any annotations.
if (src->OperGet() == GT_IND && src->gtFlags & GTF_IND_ARR_INDEX)
{
ArrayInfo arrInfo;
bool b = comp->GetArrayInfoMap()->Lookup(src, &arrInfo);
assert(b);
comp->GetArrayInfoMap()->Set(this, arrInfo);
}
DEBUG_DESTROY_NODE(src);
}
/*****************************************************************************
*
* When 'NODEBASH_STATS' is enabled in "jit.h" we record all instances of
* an existing GenTree node having its operator changed. This can be useful
* for two (related) things - to see what is being bashed (and what isn't),
* and to verify that the existing choices for what nodes are marked 'large'
* are reasonable (to minimize "wasted" space).
*
* And yes, the hash function / logic is simplistic, but it is conflict-free
* and transparent for what we need.
*/
#if NODEBASH_STATS
#define BASH_HASH_SIZE 211
inline unsigned hashme(genTreeOps op1, genTreeOps op2)
{
return ((op1 * 104729) ^ (op2 * 56569)) % BASH_HASH_SIZE;
}
struct BashHashDsc
{
unsigned __int32 bhFullHash; // the hash value (unique for all old->new pairs)
unsigned __int32 bhCount; // the same old->new bashings seen so far
unsigned __int8 bhOperOld; // original gtOper
unsigned __int8 bhOperNew; // new gtOper
};
static BashHashDsc BashHash[BASH_HASH_SIZE];
void GenTree::RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{
unsigned hash = hashme(operOld, operNew);
BashHashDsc* desc = BashHash + hash;
if (desc->bhFullHash != hash)
{
noway_assert(desc->bhCount == 0); // if this ever fires, need fix the hash fn
desc->bhFullHash = hash;
}
desc->bhCount += 1;
desc->bhOperOld = operOld;
desc->bhOperNew = operNew;
}
void GenTree::ReportOperBashing(FILE* f)
{
unsigned total = 0;
fflush(f);
fprintf(f, "\n");
fprintf(f, "Bashed gtOper stats:\n");
fprintf(f, "\n");
fprintf(f, " Old operator New operator #bytes old->new Count\n");
fprintf(f, " ---------------------------------------------------------------\n");
for (unsigned h = 0; h < BASH_HASH_SIZE; h++)
{
unsigned count = BashHash[h].bhCount;
if (count == 0)
continue;
unsigned opOld = BashHash[h].bhOperOld;
unsigned opNew = BashHash[h].bhOperNew;
fprintf(f, " GT_%-13s -> GT_%-13s [size: %3u->%3u] %c %7u\n", OpName((genTreeOps)opOld),
OpName((genTreeOps)opNew), s_gtTrueSizes[opOld], s_gtTrueSizes[opNew],
(s_gtTrueSizes[opOld] < s_gtTrueSizes[opNew]) ? 'X' : ' ', count);
total += count;
}
fprintf(f, "\n");
fprintf(f, "Total bashings: %u\n", total);
fprintf(f, "\n");
fflush(f);
}
#endif // NODEBASH_STATS
/*****************************************************************************/
#if MEASURE_NODE_SIZE
void GenTree::DumpNodeSizes(FILE* fp)
{
// Dump the sizes of the various GenTree flavors
fprintf(fp, "Small tree node size = %zu bytes\n", TREE_NODE_SZ_SMALL);
fprintf(fp, "Large tree node size = %zu bytes\n", TREE_NODE_SZ_LARGE);
fprintf(fp, "\n");
// Verify that node sizes are set kosherly and dump sizes
for (unsigned op = GT_NONE + 1; op < GT_COUNT; op++)
{
unsigned needSize = s_gtTrueSizes[op];
unsigned nodeSize = s_gtNodeSizes[op];
const char* structNm = OpStructName((genTreeOps)op);
const char* operName = OpName((genTreeOps)op);
bool repeated = false;
// Have we seen this struct flavor before?
for (unsigned mop = GT_NONE + 1; mop < op; mop++)
{
if (strcmp(structNm, OpStructName((genTreeOps)mop)) == 0)
{
repeated = true;
break;
}
}
// Don't repeat the same GenTree flavor unless we have an error
if (!repeated || needSize > nodeSize)
{
unsigned sizeChar = '?';
if (nodeSize == TREE_NODE_SZ_SMALL)
sizeChar = 'S';
else if (nodeSize == TREE_NODE_SZ_LARGE)
sizeChar = 'L';
fprintf(fp, "GT_%-16s ... %-19s = %3u bytes (%c)", operName, structNm, needSize, sizeChar);
if (needSize > nodeSize)
{
fprintf(fp, " -- ERROR -- allocation is only %u bytes!", nodeSize);
}
else if (needSize <= TREE_NODE_SZ_SMALL && nodeSize == TREE_NODE_SZ_LARGE)
{
fprintf(fp, " ... could be small");
}
fprintf(fp, "\n");
}
}
}
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
*
* Walk all basic blocks and call the given function pointer for all tree
* nodes contained therein.
*/
void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), visitor, pCallBackData);
}
}
}
//-----------------------------------------------------------
// CopyReg: Copy the _gtRegNum/gtRegTag fields.
//
// Arguments:
// from - GenTree node from which to copy
//
// Return Value:
// None
void GenTree::CopyReg(GenTree* from)
{
_gtRegNum = from->_gtRegNum;
INDEBUG(gtRegTag = from->gtRegTag;)
// Also copy multi-reg state if this is a call node
if (IsCall())
{
assert(from->IsCall());
this->AsCall()->CopyOtherRegs(from->AsCall());
}
else if (IsCopyOrReload())
{
this->AsCopyOrReload()->CopyOtherRegs(from->AsCopyOrReload());
}
}
//------------------------------------------------------------------
// gtHasReg: Whether node been assigned a register by LSRA
//
// Arguments:
// comp - Compiler instance. Required for multi-reg lcl var; ignored otherwise.
//
// Return Value:
// Returns true if the node was assigned a register.
//
// In case of multi-reg call nodes, it is considered having a reg if regs are allocated for ALL its
// return values.
// REVIEW: why is this ALL and the other cases are ANY? Explain.
//
// In case of GT_COPY or GT_RELOAD of a multi-reg call, GT_COPY/GT_RELOAD is considered having a reg if it
// has a reg assigned to ANY of its positions.
//
// In case of multi-reg local vars, it is considered having a reg if it has a reg assigned for ANY
// of its positions.
//
bool GenTree::gtHasReg(Compiler* comp) const
{
bool hasReg = false;
if (IsMultiRegCall())
{
const GenTreeCall* call = AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg call node is said to have regs, if it has
// reg assigned to each of its result registers.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (call->GetRegNumByIdx(i) != REG_NA);
if (!hasReg)
{
break;
}
}
}
else if (IsCopyOrReloadOfMultiRegCall())
{
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg copy or reload node is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (copyOrReload->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else if (IsMultiRegLclVar())
{
assert(comp != nullptr);
const GenTreeLclVar* lclNode = AsLclVar();
const unsigned regCount = GetMultiRegCount(comp);
// A Multi-reg local vars is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; i++)
{
hasReg = (lclNode->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else
{
hasReg = (GetRegNum() != REG_NA);
}
return hasReg;
}
//-----------------------------------------------------------------------------
// GetRegisterDstCount: Get the number of registers defined by the node.
//
// Arguments:
// None
//
// Return Value:
// The number of registers that this node defines.
//
// Notes:
// This should not be called on a contained node.
// This does not look at the actual register assignments, if any, and so
// is valid after Lowering.
//
int GenTree::GetRegisterDstCount(Compiler* compiler) const
{
assert(!isContained());
if (!IsMultiRegNode())
{
return (IsValue()) ? 1 : 0;
}
else if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
else if (IsCopyOrReload())
{
return gtGetOp1()->GetRegisterDstCount(compiler);
}
#if FEATURE_ARG_SPLIT
else if (OperIsPutArgSplit())
{
return (const_cast<GenTree*>(this))->AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
else if (OperIsMultiRegOp())
{
// A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST.
// For the latter two (ARM-only), they only have multiple registers if they produce a long value
// (GT_MUL_LONG always produces a long value).
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
return (TypeGet() == TYP_LONG) ? 2 : 1;
#else
assert(OperIs(GT_MUL_LONG));
return 2;
#endif
}
#endif
#ifdef FEATURE_HW_INTRINSICS
else if (OperIsHWIntrinsic())
{
assert(TypeIs(TYP_STRUCT));
const GenTreeHWIntrinsic* intrinsic = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = intrinsic->GetHWIntrinsicId();
assert(HWIntrinsicInfo::IsMultiReg(intrinsicId));
return HWIntrinsicInfo::GetMultiRegCount(intrinsicId);
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetFieldCount(compiler);
}
assert(!"Unexpected multi-reg node");
return 0;
}
//-----------------------------------------------------------------------------------
// IsMultiRegNode: whether a node returning its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi-reg node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
bool GenTree::IsMultiRegNode() const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return true;
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return true;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return true;
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return true;
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::IsMultiReg(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetMultiRegCount: Return the register count for a multi-reg node.
//
// Arguments:
// comp - Compiler instance. Required for MultiRegLclVar, unused otherwise.
//
// Return Value:
// Returns the number of registers defined by this node.
//
unsigned GenTree::GetMultiRegCount(Compiler* comp) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegCount();
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegCount();
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::GetMultiRegCount(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
assert(comp != nullptr);
return AsLclVar()->GetFieldCount(comp);
}
assert(!"GetMultiRegCount called with non-multireg node");
return 1;
}
//---------------------------------------------------------------
// gtGetRegMask: Get the reg mask of the node.
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetRegMask() const
{
regMaskTP resultMask;
if (IsMultiRegCall())
{
resultMask = genRegMask(GetRegNum());
resultMask |= AsCall()->GetOtherRegMask();
}
else if (IsCopyOrReloadOfMultiRegCall())
{
// A multi-reg copy or reload, will have valid regs for only those
// positions that need to be copied or reloaded. Hence we need
// to consider only those registers for computing reg mask.
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = copyOrReload->GetRegNumByIdx(i);
if (reg != REG_NA)
{
resultMask |= genRegMask(reg);
}
}
}
#if FEATURE_ARG_SPLIT
else if (compFeatureArgSplit() && OperIsPutArgSplit())
{
const GenTreePutArgSplit* splitArg = AsPutArgSplit();
const unsigned regCount = splitArg->gtNumRegs;
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = splitArg->GetRegNumByIdx(i);
assert(reg != REG_NA);
resultMask |= genRegMask(reg);
}
}
#endif // FEATURE_ARG_SPLIT
else
{
resultMask = genRegMask(GetRegNum());
}
return resultMask;
}
void GenTreeFieldList::AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
}
void GenTreeFieldList::InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::InsertFieldLIR(
Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
}
//---------------------------------------------------------------
// GetOtherRegMask: Get the reg mask of gtOtherRegs of call node
//
// Arguments:
// None
//
// Return Value:
// Reg mask of gtOtherRegs of call node.
//
regMaskTP GenTreeCall::GetOtherRegMask() const
{
regMaskTP resultMask = RBM_NONE;
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
if (gtOtherRegs[i] != REG_NA)
{
resultMask |= genRegMask((regNumber)gtOtherRegs[i]);
continue;
}
break;
}
#endif
return resultMask;
}
//-------------------------------------------------------------------------
// IsPure:
// Returns true if this call is pure. For now, this uses the same
// definition of "pure" that is that used by HelperCallProperties: a
// pure call does not read or write any aliased (e.g. heap) memory or
// have other global side effects (e.g. class constructors, finalizers),
// but is allowed to throw an exception.
//
// NOTE: this call currently only returns true if the call target is a
// helper method that is known to be pure. No other analysis is
// performed.
//
// Arguments:
// Copiler - the compiler context.
//
// Returns:
// True if the call is pure; false otherwise.
//
bool GenTreeCall::IsPure(Compiler* compiler) const
{
return (gtCallType == CT_HELPER) &&
compiler->s_helperCallProperties.IsPure(compiler->eeGetHelperNum(gtCallMethHnd));
}
//-------------------------------------------------------------------------
// HasSideEffects:
// Returns true if this call has any side effects. All non-helpers are considered to have side-effects. Only helpers
// that do not mutate the heap, do not run constructors, may not throw, and are either a) pure or b) non-finalizing
// allocation functions are considered side-effect-free.
//
// Arguments:
// compiler - the compiler instance
// ignoreExceptions - when `true`, ignores exception side effects
// ignoreCctors - when `true`, ignores class constructor side effects
//
// Return Value:
// true if this call has any side-effects; false otherwise.
bool GenTreeCall::HasSideEffects(Compiler* compiler, bool ignoreExceptions, bool ignoreCctors) const
{
// Generally all GT_CALL nodes are considered to have side-effects, but we may have extra information about helper
// calls that can prove them side-effect-free.
if (gtCallType != CT_HELPER)
{
return true;
}
CorInfoHelpFunc helper = compiler->eeGetHelperNum(gtCallMethHnd);
HelperCallProperties& helperProperties = compiler->s_helperCallProperties;
// We definitely care about the side effects if MutatesHeap is true
if (helperProperties.MutatesHeap(helper))
{
return true;
}
// Unless we have been instructed to ignore cctors (CSE, for example, ignores cctors), consider them side effects.
if (!ignoreCctors && helperProperties.MayRunCctor(helper))
{
return true;
}
// If we also care about exceptions then check if the helper can throw
if (!ignoreExceptions && !helperProperties.NoThrow(helper))
{
return true;
}
// If this is not a Pure helper call or an allocator (that will not need to run a finalizer)
// then this call has side effects.
return !helperProperties.IsPure(helper) &&
(!helperProperties.IsAllocator(helper) || ((gtCallMoreFlags & GTF_CALL_M_ALLOC_SIDE_EFFECTS) != 0));
}
//-------------------------------------------------------------------------
// HasNonStandardAddedArgs: Return true if the method has non-standard args added to the call
// argument list during argument morphing (fgMorphArgs), e.g., passed in R10 or R11 on AMD64.
// See also GetNonStandardAddedArgCount().
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// true if there are any such args, false otherwise.
//
bool GenTreeCall::HasNonStandardAddedArgs(Compiler* compiler) const
{
return GetNonStandardAddedArgCount(compiler) != 0;
}
//-------------------------------------------------------------------------
// GetNonStandardAddedArgCount: Get the count of non-standard arguments that have been added
// during call argument morphing (fgMorphArgs). Do not count non-standard args that are already
// counted in the argument list prior to morphing.
//
// This function is used to help map the caller and callee arguments during tail call setup.
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// The count of args, as described.
//
// Notes:
// It would be more general to have fgMorphArgs set a bit on the call node when such
// args are added to a call, and a bit on each such arg, and then have this code loop
// over the call args when the special call bit is set, counting the args with the special
// arg bit. This seems pretty heavyweight, though. Instead, this logic needs to be kept
// in sync with fgMorphArgs.
//
int GenTreeCall::GetNonStandardAddedArgCount(Compiler* compiler) const
{
if (IsUnmanaged() && !compiler->opts.ShouldUsePInvokeHelpers())
{
// R11 = PInvoke cookie param
return 1;
}
else if (IsVirtualStub())
{
// R11 = Virtual stub param
return 1;
}
else if ((gtCallType == CT_INDIRECT) && (gtCallCookie != nullptr))
{
// R10 = PInvoke target param
// R11 = PInvoke cookie param
return 2;
}
return 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
// These two Jit Helpers that we handle here by returning true
// aren't actually defined to return a struct, so they don't expect
// their RetBuf to be passed in x8, instead they expect it in x0.
//
bool GenTreeCall::TreatAsHasRetBufArg(Compiler* compiler) const
{
if (HasRetBufArg())
{
return true;
}
else
{
// If we see a Jit helper call that returns a TYP_STRUCT we will
// transform it as if it has a Return Buffer Argument
//
if (IsHelperCall() && (gtReturnType == TYP_STRUCT))
{
// There are two possible helper calls that use this path:
// CORINFO_HELP_GETFIELDSTRUCT and CORINFO_HELP_UNBOX_NULLABLE
//
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(gtCallMethHnd);
if (helpFunc == CORINFO_HELP_GETFIELDSTRUCT)
{
return true;
}
else if (helpFunc == CORINFO_HELP_UNBOX_NULLABLE)
{
return true;
}
else
{
assert(!"Unexpected JIT helper in TreatAsHasRetBufArg");
}
}
}
return false;
}
//-------------------------------------------------------------------------
// IsHelperCall: Determine if this GT_CALL node is a specific helper call.
//
// Arguments:
// compiler - the compiler instance so that we can call eeFindHelper
//
// Return Value:
// Returns true if this GT_CALL node is a call to the specified helper.
//
bool GenTreeCall::IsHelperCall(Compiler* compiler, unsigned helper) const
{
return IsHelperCall(compiler->eeFindHelper(helper));
}
//------------------------------------------------------------------------
// GenTreeCall::ReplaceCallOperand:
// Replaces a given operand to a call node and updates the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTreeCall::ReplaceCallOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
GenTree* originalOperand = *useEdge;
*useEdge = replacement;
const bool isArgument =
(replacement != gtControlExpr) &&
((gtCallType != CT_INDIRECT) || ((replacement != gtCallCookie) && (replacement != gtCallAddr)));
if (isArgument)
{
if ((originalOperand->gtFlags & GTF_LATE_ARG) != 0)
{
replacement->gtFlags |= GTF_LATE_ARG;
}
else
{
assert((replacement->gtFlags & GTF_LATE_ARG) == 0);
fgArgTabEntry* fp = Compiler::gtArgEntryByNode(this, replacement);
assert(fp->GetNode() == replacement);
}
}
}
//-------------------------------------------------------------------------
// AreArgsComplete: Determine if this GT_CALL node's arguments have been processed.
//
// Return Value:
// Returns true if fgMorphArgs has processed the arguments.
//
bool GenTreeCall::AreArgsComplete() const
{
if (fgArgInfo == nullptr)
{
return false;
}
if (fgArgInfo->AreArgsComplete())
{
assert((gtCallLateArgs != nullptr) || !fgArgInfo->HasRegArgs());
return true;
}
#if defined(FEATURE_FASTTAILCALL)
// If we have FEATURE_FASTTAILCALL, 'fgCanFastTailCall()' can call 'fgInitArgInfo()', and in that
// scenario it is valid to have 'fgArgInfo' be non-null when 'fgMorphArgs()' first queries this,
// when it hasn't yet morphed the arguments.
#else
assert(gtCallArgs == nullptr);
#endif
return false;
}
//--------------------------------------------------------------------------
// Equals: Check if 2 CALL nodes are equal.
//
// Arguments:
// c1 - The first call node
// c2 - The second call node
//
// Return Value:
// true if the 2 CALL nodes have the same type and operands
//
bool GenTreeCall::Equals(GenTreeCall* c1, GenTreeCall* c2)
{
assert(c1->OperGet() == c2->OperGet());
if (c1->TypeGet() != c2->TypeGet())
{
return false;
}
if (c1->gtCallType != c2->gtCallType)
{
return false;
}
if (c1->gtCallType != CT_INDIRECT)
{
if (c1->gtCallMethHnd != c2->gtCallMethHnd)
{
return false;
}
#ifdef FEATURE_READYTORUN
if (c1->gtEntryPoint.addr != c2->gtEntryPoint.addr)
{
return false;
}
#endif
}
else
{
if (!Compare(c1->gtCallAddr, c2->gtCallAddr))
{
return false;
}
}
if ((c1->gtCallThisArg != nullptr) != (c2->gtCallThisArg != nullptr))
{
return false;
}
if ((c1->gtCallThisArg != nullptr) && !Compare(c1->gtCallThisArg->GetNode(), c2->gtCallThisArg->GetNode()))
{
return false;
}
GenTreeCall::UseIterator i1 = c1->Args().begin();
GenTreeCall::UseIterator end1 = c1->Args().end();
GenTreeCall::UseIterator i2 = c2->Args().begin();
GenTreeCall::UseIterator end2 = c2->Args().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
i1 = c1->LateArgs().begin();
end1 = c1->LateArgs().end();
i2 = c2->LateArgs().begin();
end2 = c2->LateArgs().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
if (!Compare(c1->gtControlExpr, c2->gtControlExpr))
{
return false;
}
return true;
}
//--------------------------------------------------------------------------
// ResetArgInfo: The argument info needs to be reset so it can be recomputed based on some change
// in conditions, such as changing the return type of a call due to giving up on doing a tailcall.
// If there is no fgArgInfo computed yet for this call, then there is nothing to reset.
//
void GenTreeCall::ResetArgInfo()
{
if (fgArgInfo == nullptr)
{
return;
}
// We would like to just set `fgArgInfo = nullptr`. But `fgInitArgInfo()` not
// only sets up fgArgInfo, it also adds non-standard args to the IR, and we need
// to remove that extra IR so it doesn't get added again.
//
unsigned argNum = 0;
if (gtCallThisArg != nullptr)
{
argNum++;
}
Use** link = >CallArgs;
while ((*link) != nullptr)
{
const fgArgTabEntry* entry = fgArgInfo->GetArgEntry(argNum);
if (entry->isNonStandard() && entry->isNonStandardArgAddedLate())
{
JITDUMP("Removing non-standarg arg %s [%06u] to prepare for re-morphing call [%06u]\n",
getNonStandardArgKindName(entry->nonStandardArgKind), Compiler::dspTreeID((*link)->GetNode()),
gtTreeID);
*link = (*link)->GetNext();
}
else
{
link = &(*link)->NextRef();
}
argNum++;
}
fgArgInfo = nullptr;
}
#if !defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned GenTreePutArgStk::GetStackByteSize() const
{
return genTypeSize(genActualType(gtOp1->gtType));
}
#endif // !defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Returns non-zero if the two trees are identical.
*/
bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK)
{
genTreeOps oper;
unsigned kind;
// printf("tree1:\n"); gtDispTree(op1);
// printf("tree2:\n"); gtDispTree(op2);
AGAIN:
if (op1 == nullptr)
{
return (op2 == nullptr);
}
if (op2 == nullptr)
{
return false;
}
if (op1 == op2)
{
return true;
}
oper = op1->OperGet();
/* The operators must be equal */
if (oper != op2->gtOper)
{
return false;
}
/* The types must be equal */
if (op1->gtType != op2->gtType)
{
return false;
}
/* Overflow must be equal */
if (op1->gtOverflowEx() != op2->gtOverflowEx())
{
return false;
}
/* Sensible flags must be equal */
if ((op1->gtFlags & (GTF_UNSIGNED)) != (op2->gtFlags & (GTF_UNSIGNED)))
{
return false;
}
/* Figure out what kind of nodes we're comparing */
kind = op1->OperKind();
/* Is this a constant node? */
if (op1->OperIsConst())
{
switch (oper)
{
case GT_CNS_INT:
if (op1->AsIntCon()->gtIconVal == op2->AsIntCon()->gtIconVal)
{
return true;
}
break;
case GT_CNS_STR:
if ((op1->AsStrCon()->gtSconCPX == op2->AsStrCon()->gtSconCPX) &&
(op1->AsStrCon()->gtScpHnd == op2->AsStrCon()->gtScpHnd))
{
return true;
}
break;
#if 0
// TODO-CQ: Enable this in the future
case GT_CNS_LNG:
if (op1->AsLngCon()->gtLconVal == op2->AsLngCon()->gtLconVal)
return true;
break;
case GT_CNS_DBL:
if (op1->AsDblCon()->gtDconVal == op2->AsDblCon()->gtDconVal)
return true;
break;
#endif
default:
break;
}
return false;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_LCL_VAR:
if (op1->AsLclVarCommon()->GetLclNum() != op2->AsLclVarCommon()->GetLclNum())
{
break;
}
return true;
case GT_LCL_FLD:
if ((op1->AsLclFld()->GetLclNum() != op2->AsLclFld()->GetLclNum()) ||
(op1->AsLclFld()->GetLclOffs() != op2->AsLclFld()->GetLclOffs()))
{
break;
}
return true;
case GT_CLS_VAR:
if (op1->AsClsVar()->gtClsVarHnd != op2->AsClsVar()->gtClsVarHnd)
{
break;
}
return true;
case GT_LABEL:
return true;
case GT_ARGPLACE:
if ((op1->gtType == TYP_STRUCT) &&
(op1->AsArgPlace()->gtArgPlaceClsHnd != op2->AsArgPlace()->gtArgPlaceClsHnd))
{
break;
}
return true;
default:
break;
}
return false;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_UNOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the comparison.
switch (oper)
{
case GT_ARR_LENGTH:
if (op1->AsArrLen()->ArrLenOffset() != op2->AsArrLen()->ArrLenOffset())
{
return false;
}
break;
case GT_CAST:
if (op1->AsCast()->gtCastType != op2->AsCast()->gtCastType)
{
return false;
}
break;
case GT_BLK:
case GT_OBJ:
if (op1->AsBlk()->GetLayout() != op2->AsBlk()->GetLayout())
{
return false;
}
break;
case GT_FIELD:
if (op1->AsField()->gtFldHnd != op2->AsField()->gtFldHnd)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
case GT_RUNTIMELOOKUP:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
return Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1);
}
if (kind & GTK_BINOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
if (op1->AsIntrinsic()->gtIntrinsicName != op2->AsIntrinsic()->gtIntrinsicName)
{
return false;
}
break;
case GT_LEA:
if (op1->AsAddrMode()->gtScale != op2->AsAddrMode()->gtScale)
{
return false;
}
if (op1->AsAddrMode()->Offset() != op2->AsAddrMode()->Offset())
{
return false;
}
break;
case GT_BOUNDS_CHECK:
if (op1->AsBoundsChk()->gtThrowKind != op2->AsBoundsChk()->gtThrowKind)
{
return false;
}
break;
case GT_INDEX:
if (op1->AsIndex()->gtIndElemSize != op2->AsIndex()->gtIndElemSize)
{
return false;
}
break;
case GT_INDEX_ADDR:
if (op1->AsIndexAddr()->gtElemSize != op2->AsIndexAddr()->gtElemSize)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_QMARK:
break;
default:
assert(!"unexpected binary ExOp operator");
}
}
if (op1->AsOp()->gtOp2)
{
if (!Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1, swapOK))
{
if (swapOK && OperIsCommutative(oper) &&
((op1->AsOp()->gtOp1->gtFlags | op1->AsOp()->gtOp2->gtFlags | op2->AsOp()->gtOp1->gtFlags |
op2->AsOp()->gtOp2->gtFlags) &
GTF_ALL_EFFECT) == 0)
{
if (Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp2, swapOK))
{
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
}
}
return false;
}
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp2;
goto AGAIN;
}
else
{
op1 = op1->AsOp()->gtOp1;
op2 = op2->AsOp()->gtOp1;
if (!op1)
{
return (op2 == nullptr);
}
if (!op2)
{
return false;
}
goto AGAIN;
}
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
return GenTreeCall::Equals(op1->AsCall(), op2->AsCall());
#ifdef FEATURE_SIMD
case GT_SIMD:
return GenTreeSIMD::Equals(op1->AsSIMD(), op2->AsSIMD());
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
return GenTreeHWIntrinsic::Equals(op1->AsHWIntrinsic(), op2->AsHWIntrinsic());
#endif
case GT_ARR_ELEM:
if (op1->AsArrElem()->gtArrRank != op2->AsArrElem()->gtArrRank)
{
return false;
}
// NOTE: gtArrElemSize may need to be handled
unsigned dim;
for (dim = 0; dim < op1->AsArrElem()->gtArrRank; dim++)
{
if (!Compare(op1->AsArrElem()->gtArrInds[dim], op2->AsArrElem()->gtArrInds[dim]))
{
return false;
}
}
op1 = op1->AsArrElem()->gtArrObj;
op2 = op2->AsArrElem()->gtArrObj;
goto AGAIN;
case GT_ARR_OFFSET:
if (op1->AsArrOffs()->gtCurrDim != op2->AsArrOffs()->gtCurrDim ||
op1->AsArrOffs()->gtArrRank != op2->AsArrOffs()->gtArrRank)
{
return false;
}
return (Compare(op1->AsArrOffs()->gtOffset, op2->AsArrOffs()->gtOffset) &&
Compare(op1->AsArrOffs()->gtIndex, op2->AsArrOffs()->gtIndex) &&
Compare(op1->AsArrOffs()->gtArrObj, op2->AsArrOffs()->gtArrObj));
case GT_PHI:
return GenTreePhi::Equals(op1->AsPhi(), op2->AsPhi());
case GT_FIELD_LIST:
return GenTreeFieldList::Equals(op1->AsFieldList(), op2->AsFieldList());
case GT_CMPXCHG:
return Compare(op1->AsCmpXchg()->gtOpLocation, op2->AsCmpXchg()->gtOpLocation) &&
Compare(op1->AsCmpXchg()->gtOpValue, op2->AsCmpXchg()->gtOpValue) &&
Compare(op1->AsCmpXchg()->gtOpComparand, op2->AsCmpXchg()->gtOpComparand);
case GT_STORE_DYN_BLK:
return Compare(op1->AsStoreDynBlk()->Addr(), op2->AsStoreDynBlk()->Addr()) &&
Compare(op1->AsStoreDynBlk()->Data(), op2->AsStoreDynBlk()->Data()) &&
Compare(op1->AsStoreDynBlk()->gtDynamicSize, op2->AsStoreDynBlk()->gtDynamicSize);
default:
assert(!"unexpected operator");
}
return false;
}
//------------------------------------------------------------------------
// gtHasRef: Find out whether the given tree contains a local/field.
//
// Arguments:
// tree - tree to find the local in
// lclNum - the local's number, *or* the handle for the field
//
// Return Value:
// Whether "tree" has any LCL_VAR/LCL_FLD nodes that refer to the
// local, LHS or RHS, or FIELD nodes with the specified handle.
//
// Notes:
// Does not pay attention to local address nodes.
//
/* static */ bool Compiler::gtHasRef(GenTree* tree, ssize_t lclNum)
{
if (tree == nullptr)
{
return false;
}
if (tree->OperIsLeaf())
{
if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD) && (tree->AsLclVarCommon()->GetLclNum() == (unsigned)lclNum))
{
return true;
}
if (tree->OperIs(GT_RET_EXPR))
{
return gtHasRef(tree->AsRetExpr()->gtInlineCandidate, lclNum);
}
return false;
}
if (tree->OperIsUnary())
{
// Code in importation (see CEE_STFLD in impImportBlockCode), when
// spilling, can pass us "lclNum" that is actually a field handle...
if (tree->OperIs(GT_FIELD) && (lclNum == (ssize_t)tree->AsField()->gtFldHnd))
{
return true;
}
return gtHasRef(tree->AsUnOp()->gtGetOp1(), lclNum);
}
if (tree->OperIsBinary())
{
return gtHasRef(tree->AsOp()->gtGetOp1(), lclNum) || gtHasRef(tree->AsOp()->gtGetOp2(), lclNum);
}
bool result = false;
tree->VisitOperands([lclNum, &result](GenTree* operand) -> GenTree::VisitResult {
if (gtHasRef(operand, lclNum))
{
result = true;
return GenTree::VisitResult::Abort;
}
return GenTree::VisitResult::Continue;
});
return result;
}
struct AddrTakenDsc
{
Compiler* comp;
bool hasAddrTakenLcl;
};
/* static */
Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
Compiler* comp = data->compiler;
if (tree->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = comp->lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
((AddrTakenDsc*)data->pCallbackData)->hasAddrTakenLcl = true;
return WALK_ABORT;
}
}
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Return true if this tree contains locals with lvHasLdAddrOp or IsAddressExposed()
* flag(s) set.
*/
bool Compiler::gtHasLocalsWithAddrOp(GenTree* tree)
{
AddrTakenDsc desc;
desc.comp = this;
desc.hasAddrTakenLcl = false;
fgWalkTreePre(&tree, gtHasLocalsWithAddrOpCB, &desc);
return desc.hasAddrTakenLcl;
}
#ifdef DEBUG
/*****************************************************************************
*
* Helper used to compute hash values for trees.
*/
inline unsigned genTreeHashAdd(unsigned old, unsigned add)
{
return (old + old / 2) ^ add;
}
inline unsigned genTreeHashAdd(unsigned old, void* add)
{
return genTreeHashAdd(old, (unsigned)(size_t)add);
}
/*****************************************************************************
*
* Given an arbitrary expression tree, compute a hash value for it.
*/
unsigned Compiler::gtHashValue(GenTree* tree)
{
genTreeOps oper;
unsigned kind;
unsigned hash = 0;
GenTree* temp;
AGAIN:
assert(tree);
/* Figure out what kind of a node we have */
oper = tree->OperGet();
kind = tree->OperKind();
/* Include the operator value in the hash */
hash = genTreeHashAdd(hash, oper);
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
size_t add;
switch (oper)
{
UINT64 bits;
case GT_LCL_VAR:
add = tree->AsLclVar()->GetLclNum();
break;
case GT_LCL_FLD:
hash = genTreeHashAdd(hash, tree->AsLclFld()->GetLclNum());
add = tree->AsLclFld()->GetLclOffs();
break;
case GT_CNS_INT:
add = tree->AsIntCon()->gtIconVal;
break;
case GT_CNS_LNG:
bits = (UINT64)tree->AsLngCon()->gtLconVal;
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_DBL:
bits = *(UINT64*)(&tree->AsDblCon()->gtDconVal);
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_STR:
add = tree->AsStrCon()->gtSconCPX;
break;
case GT_JMP:
add = tree->AsVal()->gtVal1;
break;
default:
add = 0;
break;
}
// clang-format off
// narrow 'add' into a 32-bit 'val'
unsigned val;
#ifdef HOST_64BIT
val = genTreeHashAdd(uhi32(add), ulo32(add));
#else // 32-bit host
val = add;
#endif
// clang-format on
hash = genTreeHashAdd(hash, val);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
GenTree* op1;
if (kind & GTK_UNOP)
{
op1 = tree->AsOp()->gtOp1;
/* Special case: no sub-operand at all */
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_ARR_LENGTH:
hash += tree->AsArrLen()->ArrLenOffset();
break;
case GT_CAST:
hash ^= tree->AsCast()->gtCastType;
break;
case GT_INDEX:
hash += tree->AsIndex()->gtIndElemSize;
break;
case GT_INDEX_ADDR:
hash += tree->AsIndexAddr()->gtElemSize;
break;
case GT_ALLOCOBJ:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsAllocObj()->gtAllocObjClsHnd)));
hash = genTreeHashAdd(hash, tree->AsAllocObj()->gtNewHelper);
break;
case GT_RUNTIMELOOKUP:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsRuntimeLookup()->gtHnd)));
break;
case GT_BLK:
case GT_OBJ:
hash =
genTreeHashAdd(hash,
static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->AsBlk()->GetLayout())));
break;
case GT_FIELD:
hash = genTreeHashAdd(hash, tree->AsField()->gtFldHnd);
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
if (!op1)
{
goto DONE;
}
tree = op1;
goto AGAIN;
}
if (kind & GTK_BINOP)
{
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
hash += tree->AsIntrinsic()->gtIntrinsicName;
break;
case GT_LEA:
hash += static_cast<unsigned>(tree->AsAddrMode()->Offset() << 3) + tree->AsAddrMode()->gtScale;
break;
case GT_BOUNDS_CHECK:
hash = genTreeHashAdd(hash, tree->AsBoundsChk()->gtThrowKind);
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
hash ^= PtrToUlong(tree->AsBlk()->GetLayout());
break;
// For the ones below no extra argument matters for comparison.
case GT_ARR_INDEX:
case GT_QMARK:
case GT_INDEX:
case GT_INDEX_ADDR:
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
hash += tree->AsSIMD()->GetSIMDIntrinsicId();
hash += tree->AsSIMD()->GetSimdBaseType();
hash += tree->AsSIMD()->GetSimdSize();
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
hash += tree->AsHWIntrinsic()->GetHWIntrinsicId();
hash += tree->AsHWIntrinsic()->GetSimdBaseType();
hash += tree->AsHWIntrinsic()->GetSimdSize();
hash += tree->AsHWIntrinsic()->GetAuxiliaryType();
hash += tree->AsHWIntrinsic()->GetOtherReg();
break;
#endif // FEATURE_HW_INTRINSICS
default:
assert(!"unexpected binary ExOp operator");
}
}
op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
/* Is there a second sub-operand? */
if (!op2)
{
/* Special case: no sub-operands at all */
if (!op1)
{
goto DONE;
}
/* This is a unary operator */
tree = op1;
goto AGAIN;
}
/* This is a binary operator */
unsigned hsh1 = gtHashValue(op1);
/* Add op1's hash to the running value and continue with op2 */
hash = genTreeHashAdd(hash, hsh1);
tree = op2;
goto AGAIN;
}
/* See what kind of a special operator we have here */
switch (tree->gtOper)
{
case GT_ARR_ELEM:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrObj));
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrInds[dim]));
}
break;
case GT_ARR_OFFSET:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtOffset));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtIndex));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtArrObj));
break;
case GT_CALL:
if ((tree->AsCall()->gtCallThisArg != nullptr) && !tree->AsCall()->gtCallThisArg->GetNode()->OperIs(GT_NOP))
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCall()->gtCallThisArg->GetNode()));
}
for (GenTreeCall::Use& use : tree->AsCall()->Args())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
temp = tree->AsCall()->gtCallAddr;
assert(temp);
hash = genTreeHashAdd(hash, gtHashValue(temp));
}
else
{
hash = genTreeHashAdd(hash, tree->AsCall()->gtCallMethHnd);
}
for (GenTreeCall::Use& use : tree->AsCall()->LateArgs())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
// TODO-List: rewrite with a general visitor / iterator?
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
hash = genTreeHashAdd(hash, gtHashValue(operand));
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_CMPXCHG:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpLocation));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpValue));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpComparand));
break;
case GT_STORE_DYN_BLK:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Data()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Addr()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->gtDynamicSize));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
assert(!"unexpected operator");
break;
}
DONE:
return hash;
}
#endif // DEBUG
/*****************************************************************************
*
* Return a relational operator that is the reverse of the given one.
*/
/* static */
genTreeOps GenTree::ReverseRelop(genTreeOps relop)
{
static const genTreeOps reverseOps[] = {
GT_NE, // GT_EQ
GT_EQ, // GT_NE
GT_GE, // GT_LT
GT_GT, // GT_LE
GT_LT, // GT_GE
GT_LE, // GT_GT
GT_TEST_NE, // GT_TEST_EQ
GT_TEST_EQ, // GT_TEST_NE
};
assert(reverseOps[GT_EQ - GT_EQ] == GT_NE);
assert(reverseOps[GT_NE - GT_EQ] == GT_EQ);
assert(reverseOps[GT_LT - GT_EQ] == GT_GE);
assert(reverseOps[GT_LE - GT_EQ] == GT_GT);
assert(reverseOps[GT_GE - GT_EQ] == GT_LT);
assert(reverseOps[GT_GT - GT_EQ] == GT_LE);
assert(reverseOps[GT_TEST_EQ - GT_EQ] == GT_TEST_NE);
assert(reverseOps[GT_TEST_NE - GT_EQ] == GT_TEST_EQ);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(reverseOps));
return reverseOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Return a relational operator that will work for swapped operands.
*/
/* static */
genTreeOps GenTree::SwapRelop(genTreeOps relop)
{
static const genTreeOps swapOps[] = {
GT_EQ, // GT_EQ
GT_NE, // GT_NE
GT_GT, // GT_LT
GT_GE, // GT_LE
GT_LE, // GT_GE
GT_LT, // GT_GT
GT_TEST_EQ, // GT_TEST_EQ
GT_TEST_NE, // GT_TEST_NE
};
assert(swapOps[GT_EQ - GT_EQ] == GT_EQ);
assert(swapOps[GT_NE - GT_EQ] == GT_NE);
assert(swapOps[GT_LT - GT_EQ] == GT_GT);
assert(swapOps[GT_LE - GT_EQ] == GT_GE);
assert(swapOps[GT_GE - GT_EQ] == GT_LE);
assert(swapOps[GT_GT - GT_EQ] == GT_LT);
assert(swapOps[GT_TEST_EQ - GT_EQ] == GT_TEST_EQ);
assert(swapOps[GT_TEST_NE - GT_EQ] == GT_TEST_NE);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(swapOps));
return swapOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Reverse the meaning of the given test condition.
*/
GenTree* Compiler::gtReverseCond(GenTree* tree)
{
if (tree->OperIsCompare())
{
tree->SetOper(GenTree::ReverseRelop(tree->OperGet()));
// Flip the GTF_RELOP_NAN_UN bit
// a ord b === (a != NaN && b != NaN)
// a unord b === (a == NaN || b == NaN)
// => !(a ord b) === (a unord b)
if (varTypeIsFloating(tree->AsOp()->gtOp1->TypeGet()))
{
tree->gtFlags ^= GTF_RELOP_NAN_UN;
}
}
else if (tree->OperIs(GT_JCC, GT_SETCC))
{
GenTreeCC* cc = tree->AsCC();
cc->gtCondition = GenCondition::Reverse(cc->gtCondition);
}
else if (tree->OperIs(GT_JCMP))
{
// Flip the GTF_JCMP_EQ
//
// This causes switching
// cbz <=> cbnz
// tbz <=> tbnz
tree->gtFlags ^= GTF_JCMP_EQ;
}
else
{
tree = gtNewOperNode(GT_NOT, TYP_INT, tree);
}
return tree;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
//------------------------------------------------------------------------------
// IsValidLongMul : Check for long multiplication with 32 bit operands.
//
// Recognizes the following tree: MUL(CAST(long <- int), CAST(long <- int) or CONST),
// where CONST must be an integer constant that fits in 32 bits. Will try to detect
// cases when the multiplication cannot overflow and return "true" for them.
//
// This function does not change the state of the tree and is usable in LIR.
//
// Return Value:
// Whether this GT_MUL tree is a valid long multiplication candidate.
//
bool GenTreeOp::IsValidLongMul()
{
assert(OperIs(GT_MUL));
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
if (!TypeIs(TYP_LONG))
{
return false;
}
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
if (!(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp())))
{
return false;
}
if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
!(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())))
{
return false;
}
if (op1->gtOverflow() || op2->gtOverflowEx())
{
return false;
}
if (gtOverflow())
{
auto getMaxValue = [this](GenTree* op) -> int64_t {
if (op->OperIs(GT_CAST))
{
if (op->IsUnsigned())
{
switch (op->AsCast()->CastOp()->TypeGet())
{
case TYP_UBYTE:
return UINT8_MAX;
case TYP_USHORT:
return UINT16_MAX;
default:
return UINT32_MAX;
}
}
return IsUnsigned() ? static_cast<int64_t>(UINT64_MAX) : INT32_MIN;
}
return op->AsIntConCommon()->IntegralValue();
};
int64_t maxOp1 = getMaxValue(op1);
int64_t maxOp2 = getMaxValue(op2);
if (CheckedOps::MulOverflows(maxOp1, maxOp2, IsUnsigned()))
{
return false;
}
}
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
{
return false;
}
return true;
}
#if !defined(TARGET_64BIT) && defined(DEBUG)
//------------------------------------------------------------------------------
// DebugCheckLongMul : Checks that a GTF_MUL_64RSLT tree is a valid MUL_LONG.
//
// Notes:
// This function is defined for 32 bit targets only because we *must* maintain
// the MUL_LONG-compatible tree shape throughout the compilation from morph to
// decomposition, since we do not have (great) ability to create new calls in LIR.
//
// It is for this reason that we recognize MUL_LONGs early in morph, mark them with
// a flag and then pessimize various places (e. g. assertion propagation) to not look
// at them. In contrast, on ARM64 we recognize MUL_LONGs late, in lowering, and thus
// do not need this function.
//
void GenTreeOp::DebugCheckLongMul()
{
assert(OperIs(GT_MUL));
assert(Is64RsltMul());
assert(TypeIs(TYP_LONG));
assert(!gtOverflow());
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
// op1 has to be CAST(long <- int)
assert(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp()));
assert(!op1->gtOverflow());
// op2 has to be CAST(long <- int) or a suitably small constant.
assert((op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) ||
(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())));
assert(!op2->gtOverflowEx());
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
assert((op1ZeroExtends == op2ZeroExtends) || op2AnyExtensionIsSuitable);
// Do unsigned mul iff both operands are zero-extending.
assert(op1->IsUnsigned() == IsUnsigned());
}
#endif // !defined(TARGET_64BIT) && defined(DEBUG)
#endif // !defined(TARGET_64BIT) || defined(TARGET_ARM64)
unsigned Compiler::gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz)
{
unsigned level = 0;
unsigned costEx = 0;
unsigned costSz = 0;
for (GenTreeCall::Use& use : args)
{
GenTree* argNode = use.GetNode();
unsigned argLevel = gtSetEvalOrder(argNode);
if (argLevel > level)
{
level = argLevel;
}
if (argNode->GetCostEx() != 0)
{
costEx += argNode->GetCostEx();
costEx += lateArgs ? 0 : IND_COST_EX;
}
if (argNode->GetCostSz() != 0)
{
costSz += argNode->GetCostSz();
#ifdef TARGET_XARCH
if (lateArgs) // push is smaller than mov to reg
#endif
{
costSz += 1;
}
}
}
*callCostEx += costEx;
*callCostSz += costSz;
return level;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// gtSetMultiOpOrder: Calculate the costs for a MultiOp.
//
// Currently this function just preserves the previous behavior.
// TODO-List-Cleanup: implement proper costing for these trees.
//
// Arguments:
// multiOp - The MultiOp tree in question
//
// Return Value:
// The Sethi "complexity" for this tree (the idealized number of
// registers needed to evaluate it).
//
unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
{
// These default costs preserve previous behavior.
// TODO-CQ: investigate opportunities for tuning them.
int costEx = 1;
int costSz = 1;
unsigned level = 0;
unsigned lvl2 = 0;
#if defined(FEATURE_HW_INTRINSICS)
if (multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hwTree = multiOp->AsHWIntrinsic();
#if defined(TARGET_XARCH)
if ((hwTree->GetOperandCount() == 1) && hwTree->OperIsMemoryLoadOrStore())
{
costEx = IND_COST_EX;
costSz = 2;
GenTree* const addrNode = hwTree->Op(1);
level = gtSetEvalOrder(addrNode);
GenTree* const addr = addrNode->gtEffectiveVal();
// See if we can form a complex addressing mode.
if (addr->OperIs(GT_ADD) && gtMarkAddrMode(addr, &costEx, &costSz, hwTree->TypeGet()))
{
// Nothing to do, costs have been set.
}
else
{
costEx += addr->GetCostEx();
costSz += addr->GetCostSz();
}
hwTree->SetCosts(costEx, costSz);
return level;
}
#endif
switch (hwTree->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_Vector128_Create:
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
case NI_Vector128_Create:
#endif
{
if ((hwTree->GetOperandCount() == 1) && hwTree->Op(1)->OperIsConst())
{
// Vector.Create(cns) is cheap but not that cheap to be (1,1)
costEx = IND_COST_EX;
costSz = 2;
level = gtSetEvalOrder(hwTree->Op(1));
hwTree->SetCosts(costEx, costSz);
return level;
}
break;
}
default:
break;
}
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// This code is here to preserve previous behavior.
switch (multiOp->GetOperandCount())
{
case 0:
// This is a constant HWIntrinsic, we already have correct costs.
break;
case 1:
// A "unary" case.
level = gtSetEvalOrder(multiOp->Op(1));
costEx += multiOp->Op(1)->GetCostEx();
costSz += multiOp->Op(1)->GetCostSz();
break;
case 2:
// A "binary" case.
// This way we have "level" be the complexity of the
// first tree to be evaluated, and "lvl2" - the second.
if (multiOp->IsReverseOp())
{
level = gtSetEvalOrder(multiOp->Op(2));
lvl2 = gtSetEvalOrder(multiOp->Op(1));
}
else
{
level = gtSetEvalOrder(multiOp->Op(1));
lvl2 = gtSetEvalOrder(multiOp->Op(2));
}
// We want the more complex tree to be evaluated first.
if (level < lvl2)
{
bool canSwap = multiOp->IsReverseOp() ? gtCanSwapOrder(multiOp->Op(2), multiOp->Op(1))
: gtCanSwapOrder(multiOp->Op(1), multiOp->Op(2));
if (canSwap)
{
if (multiOp->IsReverseOp())
{
multiOp->ClearReverseOp();
}
else
{
multiOp->SetReverseOp();
}
std::swap(level, lvl2);
}
}
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx += (multiOp->Op(1)->GetCostEx() + multiOp->Op(2)->GetCostEx());
costSz += (multiOp->Op(1)->GetCostSz() + multiOp->Op(2)->GetCostSz());
break;
default:
// The former "ArgList" case... we'll be emulating it here.
// The old implementation pushed the nodes on the list, in pre-order.
// Then it popped and costed them in "reverse order", so that's what
// we'll be doing here as well.
unsigned nxtlvl = 0;
for (size_t i = multiOp->GetOperandCount(); i >= 1; i--)
{
GenTree* op = multiOp->Op(i);
unsigned lvl = gtSetEvalOrder(op);
if (lvl < 1)
{
level = nxtlvl;
}
else if (lvl == nxtlvl)
{
level = lvl + 1;
}
else
{
level = lvl;
}
costEx += op->GetCostEx();
costSz += op->GetCostSz();
// Preserving previous behavior...
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_XARCH
if (op->GetCostSz() != 0)
{
costSz += 1;
}
#endif
nxtlvl = level;
}
break;
}
multiOp->SetCosts(costEx, costSz);
return level;
}
#endif
//-----------------------------------------------------------------------------
// gtWalkOp: Traverse and mark an address expression
//
// Arguments:
// op1WB - An out parameter which is either the address expression, or one
// of its operands.
// op2WB - An out parameter which starts as either null or one of the operands
// of the address expression.
// base - The base address of the addressing mode, or null if 'constOnly' is false
// constOnly - True if we will only traverse into ADDs with constant op2.
//
// This routine is a helper routine for gtSetEvalOrder() and is used to identify the
// base and index nodes, which will be validated against those identified by
// genCreateAddrMode().
// It also marks the ADD nodes involved in the address expression with the
// GTF_ADDRMODE_NO_CSE flag which prevents them from being considered for CSE's.
//
// Its two output parameters are modified under the following conditions:
//
// It is called once with the original address expression as 'op1WB', and
// with 'constOnly' set to false. On this first invocation, *op1WB is always
// an ADD node, and it will consider the operands of the ADD even if its op2 is
// not a constant. However, when it encounters a non-constant or the base in the
// op2 position, it stops iterating. That operand is returned in the 'op2WB' out
// parameter, and will be considered on the third invocation of this method if
// it is an ADD.
//
// It is called the second time with the two operands of the original expression, in
// the original order, and the third time in reverse order. For these invocations
// 'constOnly' is true, so it will only traverse cascaded ADD nodes if they have a
// constant op2.
//
// The result, after three invocations, is that the values of the two out parameters
// correspond to the base and index in some fashion. This method doesn't attempt
// to determine or validate the scale or offset, if any.
//
// Assumptions (presumed to be ensured by genCreateAddrMode()):
// If an ADD has a constant operand, it is in the op2 position.
//
// Notes:
// This method, and its invocation sequence, are quite confusing, and since they
// were not originally well-documented, this specification is a possibly-imperfect
// reconstruction.
// The motivation for the handling of the NOP case is unclear.
// Note that 'op2WB' is only modified in the initial (!constOnly) case,
// or if a NOP is encountered in the op1 position.
//
void Compiler::gtWalkOp(GenTree** op1WB, GenTree** op2WB, GenTree* base, bool constOnly)
{
GenTree* op1 = *op1WB;
GenTree* op2 = *op2WB;
op1 = op1->gtEffectiveVal();
// Now we look for op1's with non-overflow GT_ADDs [of constants]
while ((op1->gtOper == GT_ADD) && (!op1->gtOverflow()) && (!constOnly || (op1->AsOp()->gtOp2->IsCnsIntOrI())))
{
// mark it with GTF_ADDRMODE_NO_CSE
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (!constOnly)
{
op2 = op1->AsOp()->gtOp2;
}
op1 = op1->AsOp()->gtOp1;
// If op1 is a GT_NOP then swap op1 and op2.
// (Why? Also, presumably op2 is not a GT_NOP in this case?)
if (op1->gtOper == GT_NOP)
{
GenTree* tmp;
tmp = op1;
op1 = op2;
op2 = tmp;
}
if (!constOnly && ((op2 == base) || (!op2->IsCnsIntOrI())))
{
break;
}
op1 = op1->gtEffectiveVal();
}
*op1WB = op1;
*op2WB = op2;
}
#ifdef DEBUG
/*****************************************************************************
* This is a workaround. It is to help implement an assert in gtSetEvalOrder() that the values
* gtWalkOp() leaves in op1 and op2 correspond with the values of adr, idx, mul, and cns
* that are returned by genCreateAddrMode(). It's essentially impossible to determine
* what gtWalkOp() *should* return for all possible trees. This simply loosens one assert
* to handle the following case:
indir int
const(h) int 4 field
+ byref
lclVar byref V00 this <-- op2
comma byref <-- adr (base)
indir byte
lclVar byref V00 this
+ byref
const int 2 <-- mul == 4
<< int <-- op1
lclVar int V01 arg1 <-- idx
* Here, we are planning to generate the address mode [edx+4*eax], where eax = idx and edx = the GT_COMMA expression.
* To check adr equivalence with op2, we need to walk down the GT_ADD tree just like gtWalkOp() does.
*/
GenTree* Compiler::gtWalkOpEffectiveVal(GenTree* op)
{
for (;;)
{
op = op->gtEffectiveVal();
if ((op->gtOper != GT_ADD) || op->gtOverflow() || !op->AsOp()->gtOp2->IsCnsIntOrI())
{
break;
}
op = op->AsOp()->gtOp1;
}
return op;
}
#endif // DEBUG
/*****************************************************************************
*
* Given a tree, set the GetCostEx and GetCostSz() fields which
* are used to measure the relative costs of the codegen of the tree
*
*/
void Compiler::gtPrepareCost(GenTree* tree)
{
gtSetEvalOrder(tree);
}
bool Compiler::gtIsLikelyRegVar(GenTree* tree)
{
if (tree->gtOper != GT_LCL_VAR)
{
return false;
}
const LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varDsc->lvDoNotEnregister)
{
return false;
}
// If this is an EH-live var, return false if it is a def,
// as it will have to go to memory.
if (varDsc->lvLiveInOutOfHndlr && ((tree->gtFlags & GTF_VAR_DEF) != 0))
{
return false;
}
// Be pessimistic if ref counts are not yet set up.
//
// Perhaps we should be optimistic though.
// See notes in GitHub issue 18969.
if (!lvaLocalVarRefCounted())
{
return false;
}
if (varDsc->lvRefCntWtd() < (BB_UNITY_WEIGHT * 3))
{
return false;
}
#ifdef TARGET_X86
if (varTypeUsesFloatReg(tree->TypeGet()))
return false;
if (varTypeIsLong(tree->TypeGet()))
return false;
#endif
return true;
}
//------------------------------------------------------------------------
// gtCanSwapOrder: Returns true iff the secondNode can be swapped with firstNode.
//
// Arguments:
// firstNode - An operand of a tree that can have GTF_REVERSE_OPS set.
// secondNode - The other operand of the tree.
//
// Return Value:
// Returns a boolean indicating whether it is safe to reverse the execution
// order of the two trees, considering any exception, global effects, or
// ordering constraints.
//
bool Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
{
// Relative of order of global / side effects can't be swapped.
bool canSwap = true;
if (optValnumCSE_phase)
{
canSwap = optCSE_canSwap(firstNode, secondNode);
}
// We cannot swap in the presence of special side effects such as GT_CATCH_ARG.
if (canSwap && (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
{
canSwap = false;
}
// When strict side effect order is disabled we allow GTF_REVERSE_OPS to be set
// when one or both sides contains a GTF_CALL or GTF_EXCEPT.
// Currently only the C and C++ languages allow non strict side effect order.
unsigned strictEffects = GTF_GLOB_EFFECT;
if (canSwap && (firstNode->gtFlags & strictEffects))
{
// op1 has side efects that can't be reordered.
// Check for some special cases where we still may be able to swap.
if (secondNode->gtFlags & strictEffects)
{
// op2 has also has non reorderable side effects - can't swap.
canSwap = false;
}
else
{
// No side effects in op2 - we can swap iff op1 has no way of modifying op2,
// i.e. through byref assignments or calls or op2 is a constant.
if (firstNode->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS)
{
// We have to be conservative - can swap iff op2 is constant.
if (!secondNode->IsInvariant())
{
canSwap = false;
}
}
}
}
return canSwap;
}
//------------------------------------------------------------------------
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
//
// Arguments:
// addr - The address expression
// costEx - The execution cost of this address expression (in/out arg to be updated)
// costEx - The size cost of this address expression (in/out arg to be updated)
// type - The type of the value being referenced by the parent of this address expression.
//
// Return Value:
// Returns true if it finds an addressing mode.
//
// Notes:
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
//
bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_types type)
{
// These are "out" parameters on the call to genCreateAddrMode():
bool rev; // This will be true if the operands will need to be reversed. At this point we
// don't care about this because we're not yet instantiating this addressing mode.
unsigned mul; // This is the index (scale) value for the addressing mode
ssize_t cns; // This is the constant offset
GenTree* base; // This is the base of the address.
GenTree* idx; // This is the index.
if (codeGen->genCreateAddrMode(addr, false /*fold*/, &rev, &base, &idx, &mul, &cns))
{
#ifdef TARGET_ARMARCH
// Multiplier should be a "natural-scale" power of two number which is equal to target's width.
//
// *(ulong*)(data + index * 8); - can be optimized
// *(ulong*)(data + index * 7); - can not be optimized
// *(int*)(data + index * 2); - can not be optimized
//
if ((mul > 0) && (genTypeSize(type) != mul))
{
return false;
}
#endif
// We can form a complex addressing mode, so mark each of the interior
// nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
#ifdef TARGET_XARCH
// addrmodeCount is the count of items that we used to form
// an addressing mode. The maximum value is 4 when we have
// all of these: { base, idx, cns, mul }
//
unsigned addrmodeCount = 0;
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
addrmodeCount++;
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
addrmodeCount++;
}
if (cns)
{
if (((signed char)cns) == ((int)cns))
{
*pCostSz += 1;
}
else
{
*pCostSz += 4;
}
addrmodeCount++;
}
if (mul)
{
addrmodeCount++;
}
// When we form a complex addressing mode we can reduced the costs
// associated with the interior GT_ADD and GT_LSH nodes:
//
// GT_ADD -- reduce this interior GT_ADD by (-3,-3)
// / \ --
// GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
// / \ --
// 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
// / \ --
// 'idx' 'mul'
//
if (addrmodeCount > 1)
{
// The number of interior GT_ADD and GT_LSL will always be one less than addrmodeCount
//
addrmodeCount--;
GenTree* tmp = addr;
while (addrmodeCount > 0)
{
// decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining
// addrmodeCount
tmp->SetCosts(tmp->GetCostEx() - addrmodeCount, tmp->GetCostSz() - addrmodeCount);
addrmodeCount--;
if (addrmodeCount > 0)
{
GenTree* tmpOp1 = tmp->AsOp()->gtOp1;
GenTree* tmpOp2 = tmp->gtGetOp2();
assert(tmpOp2 != nullptr);
if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_LSH)
{
tmp = tmpOp2;
}
else if (tmpOp1->OperGet() == GT_LSH)
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_ADD)
{
tmp = tmpOp2;
}
else
{
// We can very rarely encounter a tree that has a GT_COMMA node
// that is difficult to walk, so we just early out without decrementing.
addrmodeCount = 0;
}
}
}
}
#elif defined TARGET_ARM
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
if ((base->gtOper == GT_LCL_VAR) && ((idx == NULL) || (cns == 0)))
{
*pCostSz -= 1;
}
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
if (mul > 0)
{
*pCostSz += 2;
}
}
if (cns)
{
if (cns >= 128) // small offsets fits into a 16-bit instruction
{
if (cns < 4096) // medium offsets require a 32-bit instruction
{
if (!varTypeIsFloating(type))
{
*pCostSz += 2;
}
}
else
{
*pCostEx += 2; // Very large offsets require movw/movt instructions
*pCostSz += 8;
}
}
}
#elif defined TARGET_ARM64
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
}
if (cns != 0)
{
if (cns >= (4096 * genTypeSize(type)))
{
*pCostEx += 1;
*pCostSz += 4;
}
}
#else
#error "Unknown TARGET"
#endif
assert(addr->gtOper == GT_ADD);
assert(!addr->gtOverflow());
assert(mul != 1);
// If we have an addressing mode, we have one of:
// [base + cns]
// [ idx * mul ] // mul >= 2, else we would use base instead of idx
// [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
// [base + idx * mul ] // mul can be 0, 2, 4, or 8
// [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
// Note that mul == 0 is semantically equivalent to mul == 1.
// Note that cns can be zero.
CLANG_FORMAT_COMMENT_ANCHOR;
assert((base != nullptr) || (idx != nullptr && mul >= 2));
INDEBUG(GenTree* op1Save = addr);
// Walk 'addr' identifying non-overflow ADDs that will be part of the address mode.
// Note that we will be modifying 'op1' and 'op2' so that eventually they should
// map to the base and index.
GenTree* op1 = addr;
GenTree* op2 = nullptr;
gtWalkOp(&op1, &op2, base, false);
// op1 and op2 are now descendents of the root GT_ADD of the addressing mode.
assert(op1 != op1Save);
assert(op2 != nullptr);
#if defined(TARGET_XARCH)
// Walk the operands again (the third operand is unused in this case).
// This time we will only consider adds with constant op2's, since
// we have already found either a non-ADD op1 or a non-constant op2.
// NOTE: we don't support ADD(op1, cns) addressing for ARM/ARM64 yet so
// this walk makes no sense there.
gtWalkOp(&op1, &op2, nullptr, true);
// For XARCH we will fold GT_ADDs in the op2 position into the addressing mode, so we call
// gtWalkOp on both operands of the original GT_ADD.
// This is not done for ARMARCH. Though the stated reason is that we don't try to create a
// scaled index, in fact we actually do create them (even base + index*scale + offset).
// At this point, 'op2' may itself be an ADD of a constant that should be folded
// into the addressing mode.
// Walk op2 looking for non-overflow GT_ADDs of constants.
gtWalkOp(&op2, &op1, nullptr, true);
#endif // defined(TARGET_XARCH)
// OK we are done walking the tree
// Now assert that op1 and op2 correspond with base and idx
// in one of the several acceptable ways.
// Note that sometimes op1/op2 is equal to idx/base
// and other times op1/op2 is a GT_COMMA node with
// an effective value that is idx/base
if (mul > 1)
{
if ((op1 != base) && (op1->gtOper == GT_LSH))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1->AsOp()->gtOp1->gtOper == GT_MUL)
{
op1->AsOp()->gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
assert((base == nullptr) || (op2 == base) || (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
(gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
}
else
{
assert(op2 != nullptr);
assert(op2->OperIs(GT_LSH, GT_MUL));
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
// We may have eliminated multiple shifts and multiplies in the addressing mode,
// so navigate down through them to get to "idx".
GenTree* op2op1 = op2->AsOp()->gtOp1;
while ((op2op1->gtOper == GT_LSH || op2op1->gtOper == GT_MUL) && op2op1 != idx)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
op2op1 = op2op1->AsOp()->gtOp1;
}
assert(op1->gtEffectiveVal() == base);
assert(op2op1 == idx);
}
}
else
{
assert(mul == 0);
if ((op1 == idx) || (op1->gtEffectiveVal() == idx))
{
if (idx != nullptr)
{
if ((op1->gtOper == GT_MUL) || (op1->gtOper == GT_LSH))
{
GenTree* op1op1 = op1->AsOp()->gtOp1;
if ((op1op1->gtOper == GT_NOP) ||
(op1op1->gtOper == GT_MUL && op1op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1op1->gtOper == GT_MUL)
{
op1op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
}
assert((op2 == base) || (op2->gtEffectiveVal() == base));
}
else if ((op1 == base) || (op1->gtEffectiveVal() == base))
{
if (idx != nullptr)
{
assert(op2 != nullptr);
if (op2->OperIs(GT_MUL, GT_LSH))
{
GenTree* op2op1 = op2->AsOp()->gtOp1;
if ((op2op1->gtOper == GT_NOP) ||
(op2op1->gtOper == GT_MUL && op2op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op2op1->gtOper == GT_MUL)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
assert((op2 == idx) || (op2->gtEffectiveVal() == idx));
}
}
else
{
// op1 isn't base or idx. Is this possible? Or should there be an assert?
}
}
return true;
} // end if (genCreateAddrMode(...))
return false;
}
/*****************************************************************************
*
* Given a tree, figure out the order in which its sub-operands should be
* evaluated. If the second operand of a binary operator is more expensive
* than the first operand, then try to swap the operand trees. Updates the
* GTF_REVERSE_OPS bit if necessary in this case.
*
* Returns the Sethi 'complexity' estimate for this tree (the higher
* the number, the higher is the tree's resources requirement).
*
* This function sets:
* 1. GetCostEx() to the execution complexity estimate
* 2. GetCostSz() to the code size estimate
* 3. Sometimes sets GTF_ADDRMODE_NO_CSE on nodes in the tree.
* 4. DEBUG-only: clears GTF_DEBUG_NODE_MORPHED.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
unsigned Compiler::gtSetEvalOrder(GenTree* tree)
{
assert(tree);
#ifdef DEBUG
/* Clear the GTF_DEBUG_NODE_MORPHED flag as well */
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
/* Is this a FP value? */
bool isflt = varTypeIsFloating(tree->TypeGet());
/* Figure out what kind of a node we have */
const genTreeOps oper = tree->OperGet();
const unsigned kind = tree->OperKind();
/* Assume no fixed registers will be trashed */
unsigned level;
int costEx;
int costSz;
#ifdef DEBUG
costEx = -1;
costSz = -1;
#endif
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
#ifdef TARGET_ARM
case GT_CNS_STR:
// Uses movw/movt
costSz = 8;
costEx = 2;
goto COMMON_CNS;
case GT_CNS_LNG:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
INT64 lngVal = con->LngValue();
INT32 loVal = (INT32)(lngVal & 0xffffffff);
INT32 hiVal = (INT32)(lngVal >> 32);
if (lngVal == 0)
{
costSz = 1;
costEx = 1;
}
else
{
// Minimum of one instruction to setup hiVal,
// and one instruction to setup loVal
costSz = 4 + 4;
costEx = 1 + 1;
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
}
goto COMMON_CNS;
}
case GT_CNS_INT:
{
// If the constant is a handle then it will need to have a relocation
// applied to it.
// Any constant that requires a reloc must use the movw/movt sequence
//
GenTreeIntConCommon* con = tree->AsIntConCommon();
target_ssize_t conVal = (target_ssize_t)con->IconValue();
if (con->ImmedValNeedsReloc(this))
{
// Requires movw/movt
costSz = 8;
costEx = 2;
}
else if (codeGen->validImmForInstr(INS_add, conVal))
{
// Typically included with parent oper
costSz = 2;
costEx = 1;
}
else if (codeGen->validImmForInstr(INS_mov, conVal) || codeGen->validImmForInstr(INS_mvn, conVal))
{
// Uses mov or mvn
costSz = 4;
costEx = 1;
}
else
{
// Needs movw/movt
costSz = 8;
costEx = 2;
}
goto COMMON_CNS;
}
#elif defined TARGET_XARCH
case GT_CNS_STR:
#ifdef TARGET_AMD64
costSz = 10;
costEx = 2;
#else // TARGET_X86
costSz = 4;
costEx = 1;
#endif
goto COMMON_CNS;
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t conVal = (oper == GT_CNS_LNG) ? (ssize_t)con->LngValue() : con->IconValue();
bool fitsInVal = true;
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
INT64 lngVal = con->LngValue();
conVal = (ssize_t)lngVal; // truncate to 32-bits
fitsInVal = ((INT64)conVal == lngVal);
}
#endif // TARGET_X86
// If the constant is a handle then it will need to have a relocation
// applied to it.
//
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
if (iconNeedsReloc)
{
costSz = 4;
costEx = 1;
}
else if (fitsInVal && GenTreeIntConCommon::FitsInI8(conVal))
{
costSz = 1;
costEx = 1;
}
#ifdef TARGET_AMD64
else if (!GenTreeIntConCommon::FitsInI32(conVal))
{
costSz = 10;
costEx = 2;
}
#endif // TARGET_AMD64
else
{
costSz = 4;
costEx = 1;
}
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
costSz += fitsInVal ? 1 : 4;
costEx += 1;
}
#endif // TARGET_X86
goto COMMON_CNS;
}
#elif defined(TARGET_ARM64)
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
INT64 imm = con->LngValue();
emitAttr size = EA_SIZE(emitActualTypeSize(tree));
if (iconNeedsReloc)
{
costSz = 8;
costEx = 2;
}
else if (emitter::emitIns_valid_imm_for_add(imm, size))
{
costSz = 2;
costEx = 1;
}
else if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
costSz = 4;
costEx = 1;
}
else
{
// Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword
// There are three forms
// movk which loads into any halfword preserving the remaining halfwords
// movz which loads into any halfword zeroing the remaining halfwords
// movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting
// the register
// In some cases it is preferable to use movn, because it has the side effect of filling the
// other halfwords
// with ones
// Determine whether movn or movz will require the fewest instructions to populate the immediate
bool preferMovz = false;
bool preferMovn = false;
int instructionCount = 4;
for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16)
{
if (!preferMovn && (uint16_t(imm >> i) == 0x0000))
{
preferMovz = true; // by using a movk to start we can save one instruction
instructionCount--;
}
else if (!preferMovz && (uint16_t(imm >> i) == 0xffff))
{
preferMovn = true; // by using a movn to start we can save one instruction
instructionCount--;
}
}
costEx = instructionCount;
costSz = 4 * instructionCount;
}
}
goto COMMON_CNS;
#else
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
#error "Unknown TARGET"
#endif
COMMON_CNS:
/*
Note that some code below depends on constants always getting
moved to be the second operand of a binary operator. This is
easily accomplished by giving constants a level of 0, which
we do on the next line. If you ever decide to change this, be
aware that unless you make other arrangements for integer
constants to be moved, stuff will break.
*/
level = 0;
break;
case GT_CNS_DBL:
{
level = 0;
#if defined(TARGET_XARCH)
/* We use fldz and fld1 to load 0.0 and 1.0, but all other */
/* floating point constants are loaded using an indirection */
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
(*((__int64*)&(tree->AsDblCon()->gtDconVal)) == I64(0x3ff0000000000000)))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#elif defined(TARGET_ARM)
var_types targetType = tree->TypeGet();
if (targetType == TYP_FLOAT)
{
costEx = 1 + 2;
costSz = 2 + 4;
}
else
{
assert(targetType == TYP_DOUBLE);
costEx = 1 + 4;
costSz = 2 + 8;
}
#elif defined(TARGET_ARM64)
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
emitter::emitIns_valid_imm_for_fmov(tree->AsDblCon()->gtDconVal))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#else
#error "Unknown TARGET"
#endif
}
break;
case GT_LCL_VAR:
level = 1;
if (gtIsLikelyRegVar(tree))
{
costEx = 1;
costSz = 1;
/* Sign-extend and zero-extend are more expensive to load */
if (lvaTable[tree->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
costEx += 1;
costSz += 1;
}
}
else
{
costEx = IND_COST_EX;
costSz = 2;
/* Sign-extend and zero-extend are more expensive to load */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
}
#if defined(TARGET_AMD64)
// increase costSz for floating point locals
if (isflt)
{
costSz += 1;
if (!gtIsLikelyRegVar(tree))
{
costSz += 1;
}
}
#endif
break;
case GT_CLS_VAR:
#ifdef TARGET_ARM
// We generate movw/movt/ldr
level = 1;
costEx = 3 + IND_COST_EX; // 6
costSz = 4 + 4 + 2; // 10
break;
#endif
case GT_LCL_FLD:
level = 1;
costEx = IND_COST_EX;
costSz = 4;
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
level = 1;
costEx = 3;
costSz = 3;
break;
case GT_PHI_ARG:
case GT_ARGPLACE:
level = 0;
costEx = 0;
costSz = 0;
break;
default:
level = 1;
costEx = 1;
costSz = 1;
break;
}
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
int lvlb; // preference for op2
unsigned lvl2; // scratch variable
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
costEx = 0;
costSz = 0;
if (tree->OperIsAddrMode())
{
if (op1 == nullptr)
{
op1 = op2;
op2 = nullptr;
}
}
/* Check for a nilary operator */
if (op1 == nullptr)
{
assert(op2 == nullptr);
level = 0;
goto DONE;
}
/* Is this a unary operator? */
if (op2 == nullptr)
{
/* Process the operand of the operator */
/* Most Unary ops have costEx of 1 */
costEx = 1;
costSz = 1;
level = gtSetEvalOrder(op1);
GenTreeIntrinsic* intrinsic;
/* Special handling for some operators */
switch (oper)
{
case GT_JTRUE:
costEx = 2;
costSz = 2;
break;
case GT_SWITCH:
costEx = 10;
costSz = 5;
break;
case GT_CAST:
#if defined(TARGET_ARM)
costEx = 1;
costSz = 1;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 3;
costSz = 4;
}
#elif defined(TARGET_ARM64)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 2;
costSz = 4;
}
#elif defined(TARGET_XARCH)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
/* cast involving floats always go through memory */
costEx = IND_COST_EX * 2;
costSz = 6;
}
#else
#error "Unknown TARGET"
#endif
/* Overflow casts are a lot more expensive */
if (tree->gtOverflow())
{
costEx += 6;
costSz += 6;
}
break;
case GT_NOP:
costEx = 0;
costSz = 0;
break;
case GT_INTRINSIC:
intrinsic = tree->AsIntrinsic();
// named intrinsic
assert(intrinsic->gtIntrinsicName != NI_Illegal);
// GT_INTRINSIC intrinsics Sin, Cos, Sqrt, Abs ... have higher costs.
// TODO: tune these costs target specific as some of these are
// target intrinsics and would cost less to generate code.
switch (intrinsic->gtIntrinsicName)
{
default:
assert(!"missing case for gtIntrinsicName");
costEx = 12;
costSz = 12;
break;
case NI_System_Math_Abs:
costEx = 5;
costSz = 15;
break;
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_FusedMultiplyAdd:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
case NI_System_Math_Max:
case NI_System_Math_Min:
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls. We don't do this for target intrinsics
// however as they typically represent single instruction calls
if (IsIntrinsicImplementedByUserCall(intrinsic->gtIntrinsicName))
{
costEx = 36;
costSz = 4;
}
else
{
costEx = 3;
costSz = 4;
}
break;
}
case NI_System_Object_GetType:
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls.
costEx = 36;
costSz = 4;
break;
}
level++;
break;
case GT_NOT:
case GT_NEG:
// We need to ensure that -x is evaluated before x or else
// we get burned while adjusting genFPstkLevel in x*-x where
// the rhs x is the last use of the enregistered x.
//
// Even in the integer case we want to prefer to
// evaluate the side without the GT_NEG node, all other things
// being equal. Also a GT_NOT requires a scratch register
level++;
break;
case GT_ADDR:
costEx = 0;
costSz = 1;
// If we have a GT_ADDR of an GT_IND we can just copy the costs from indOp1
if (op1->OperGet() == GT_IND)
{
GenTree* indOp1 = op1->AsOp()->gtOp1;
costEx = indOp1->GetCostEx();
costSz = indOp1->GetCostSz();
}
break;
case GT_ARR_LENGTH:
level++;
/* Array Len should be the same as an indirections, which have a costEx of IND_COST_EX */
costEx = IND_COST_EX - 1;
costSz = 2;
break;
case GT_MKREFANY:
case GT_OBJ:
// We estimate the cost of a GT_OBJ or GT_MKREFANY to be two loads (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BOX:
// We estimate the cost of a GT_BOX to be two stores (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BLK:
case GT_IND:
/* An indirection should always have a non-zero level.
* Only constant leaf nodes have level 0.
*/
if (level == 0)
{
level = 1;
}
/* Indirections have a costEx of IND_COST_EX */
costEx = IND_COST_EX;
costSz = 2;
/* If we have to sign-extend or zero-extend, bump the cost */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
if (isflt)
{
if (tree->TypeGet() == TYP_DOUBLE)
{
costEx += 1;
}
#ifdef TARGET_ARM
costSz += 2;
#endif // TARGET_ARM
}
// Can we form an addressing mode with this indirection?
// TODO-CQ: Consider changing this to op1->gtEffectiveVal() to take into account
// addressing modes hidden under a comma node.
if (op1->gtOper == GT_ADD)
{
// See if we can form a complex addressing mode.
GenTree* addr = op1->gtEffectiveVal();
bool doAddrMode = true;
// See if we can form a complex addressing mode.
// Always use an addrMode for an array index indirection.
// TODO-1stClassStructs: Always do this, but first make sure it's
// done in Lowering as well.
if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
if (tree->TypeGet() == TYP_STRUCT)
{
doAddrMode = false;
}
else if (varTypeIsStruct(tree))
{
// This is a heuristic attempting to match prior behavior when indirections
// under a struct assignment would not be considered for addressing modes.
if (compCurStmt != nullptr)
{
GenTree* expr = compCurStmt->GetRootNode();
if ((expr->OperGet() == GT_ASG) &&
((expr->gtGetOp1() == tree) || (expr->gtGetOp2() == tree)))
{
doAddrMode = false;
}
}
}
}
#ifdef TARGET_ARM64
if (tree->gtFlags & GTF_IND_VOLATILE)
{
// For volatile store/loads when address is contained we always emit `dmb`
// if it's not - we emit one-way barriers i.e. ldar/stlr
doAddrMode = false;
}
#endif // TARGET_ARM64
if (doAddrMode && gtMarkAddrMode(addr, &costEx, &costSz, tree->TypeGet()))
{
goto DONE;
}
} // end if (op1->gtOper == GT_ADD)
else if (gtIsLikelyRegVar(op1))
{
/* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
goto DONE;
}
#ifdef TARGET_XARCH
else if (op1->IsCnsIntOrI())
{
// Indirection of a CNS_INT, subtract 1 from costEx
// makes costEx 3 for x86 and 4 for amd64
//
costEx += (op1->GetCostEx() - 1);
costSz += op1->GetCostSz();
goto DONE;
}
#endif
break;
default:
break;
}
costEx += op1->GetCostEx();
costSz += op1->GetCostSz();
goto DONE;
}
/* Binary operator - check for certain special cases */
lvlb = 0;
/* Default Binary ops have a cost of 1,1 */
costEx = 1;
costSz = 1;
#ifdef TARGET_ARM
if (isflt)
{
costSz += 2;
}
#endif
#ifndef TARGET_64BIT
if (varTypeIsLong(op1->TypeGet()))
{
/* Operations on longs are more expensive */
costEx += 3;
costSz += 3;
}
#endif
switch (oper)
{
case GT_MOD:
case GT_UMOD:
/* Modulo by a power of 2 is easy */
if (op2->IsCnsIntOrI())
{
size_t ival = op2->AsIntConCommon()->IconValue();
if (ival > 0 && ival == genFindLowestBit(ival))
{
break;
}
}
FALLTHROUGH;
case GT_DIV:
case GT_UDIV:
if (isflt)
{
/* fp division is very expensive to execute */
costEx = 36; // TYP_DOUBLE
costSz += 3;
}
else
{
/* integer division is also very expensive */
costEx = 20;
costSz += 2;
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 3;
}
break;
case GT_MUL:
if (isflt)
{
/* FP multiplication instructions are more expensive */
costEx += 4;
costSz += 3;
}
else
{
/* Integer multiplication instructions are more expensive */
costEx += 3;
costSz += 2;
if (tree->gtOverflow())
{
/* Overflow check are more expensive */
costEx += 3;
costSz += 3;
}
#ifdef TARGET_X86
if ((tree->gtType == TYP_LONG) || tree->gtOverflow())
{
/* We use imulEAX for TYP_LONG and overflow multiplications */
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 4;
/* The 64-bit imul instruction costs more */
costEx += 4;
}
#endif // TARGET_X86
}
break;
case GT_ADD:
case GT_SUB:
if (isflt)
{
/* FP instructions are a bit more expensive */
costEx += 4;
costSz += 3;
break;
}
/* Overflow check are more expensive */
if (tree->gtOverflow())
{
costEx += 3;
costSz += 3;
}
break;
case GT_BOUNDS_CHECK:
costEx = 4; // cmp reg,reg and jae throw (not taken)
costSz = 7; // jump to cold section
break;
case GT_COMMA:
/* Comma tosses the result of the left operand */
gtSetEvalOrder(op1);
level = gtSetEvalOrder(op2);
/* GT_COMMA cost is the sum of op1 and op2 costs */
costEx = (op1->GetCostEx() + op2->GetCostEx());
costSz = (op1->GetCostSz() + op2->GetCostSz());
goto DONE;
case GT_COLON:
level = gtSetEvalOrder(op1);
lvl2 = gtSetEvalOrder(op2);
if (level < lvl2)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx = op1->GetCostEx() + op2->GetCostEx();
costSz = op1->GetCostSz() + op2->GetCostSz();
goto DONE;
case GT_INDEX_ADDR:
costEx = 6; // cmp reg,reg; jae throw; mov reg, [addrmode] (not taken)
costSz = 9; // jump to cold section
break;
case GT_ASG:
/* Assignments need a bit of special handling */
/* Process the target */
level = gtSetEvalOrder(op1);
if (gtIsLikelyRegVar(op1))
{
assert(lvlb == 0);
lvl2 = gtSetEvalOrder(op2);
/* Assignment to an enregistered LCL_VAR */
costEx = op2->GetCostEx();
costSz = max(3, op2->GetCostSz()); // 3 is an estimate for a reg-reg assignment
goto DONE_OP1_AFTER_COST;
}
goto DONE_OP1;
default:
break;
}
/* Process the sub-operands */
level = gtSetEvalOrder(op1);
if (lvlb < 0)
{
level -= lvlb; // lvlb is negative, so this increases level
lvlb = 0;
}
DONE_OP1:
assert(lvlb >= 0);
lvl2 = gtSetEvalOrder(op2) + lvlb;
costEx += (op1->GetCostEx() + op2->GetCostEx());
costSz += (op1->GetCostSz() + op2->GetCostSz());
DONE_OP1_AFTER_COST:
bool bReverseInAssignment = false;
if (oper == GT_ASG && (!optValnumCSE_phase || optCSE_canSwap(op1, op2)))
{
GenTree* op1Val = op1;
// Skip over the GT_IND/GT_ADDR tree (if one exists)
//
if ((op1->gtOper == GT_IND) && (op1->AsOp()->gtOp1->gtOper == GT_ADDR))
{
op1Val = op1->AsOp()->gtOp1->AsOp()->gtOp1;
}
switch (op1Val->gtOper)
{
case GT_IND:
case GT_BLK:
case GT_OBJ:
// In an indirection, the destination address is evaluated prior to the source.
// If we have any side effects on the target indirection,
// we have to evaluate op1 first.
// However, if the LHS is a lclVar address, SSA relies on using evaluation order for its
// renaming, and therefore the RHS must be evaluated first.
// If we have an assignment involving a lclVar address, the LHS may be marked as having
// side-effects.
// However the side-effects won't require that we evaluate the LHS address first:
// - The GTF_GLOB_REF might have been conservatively set on a FIELD of a local.
// - The local might be address-exposed, but that side-effect happens at the actual assignment (not
// when its address is "evaluated") so it doesn't change the side effect to "evaluate" the address
// after the RHS (note that in this case it won't be renamed by SSA anyway, but the reordering is
// safe).
//
if (op1Val->AsIndir()->Addr()->IsLocalAddrExpr())
{
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
}
if (op1Val->AsIndir()->Addr()->gtFlags & GTF_ALL_EFFECT)
{
break;
}
// In case op2 assigns to a local var that is used in op1Val, we have to evaluate op1Val first.
if (op2->gtFlags & GTF_ASG)
{
break;
}
// If op2 is simple then evaluate op1 first
if (op2->OperKind() & GTK_LEAF)
{
break;
}
// fall through and set GTF_REVERSE_OPS
FALLTHROUGH;
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_CLS_VAR:
// We evaluate op2 before op1
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
default:
break;
}
}
else if (GenTree::OperIsCompare(oper))
{
/* Float compares remove both operands from the FP stack */
/* Also FP comparison uses EAX for flags */
if (varTypeIsFloating(op1->TypeGet()))
{
level++;
lvl2++;
}
if ((tree->gtFlags & GTF_RELOP_JMP_USED) == 0)
{
/* Using a setcc instruction is more expensive */
costEx += 3;
}
}
/* Check for other interesting cases */
switch (oper)
{
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
/* Variable sized shifts are more expensive and use REG_SHIFT */
if (!op2->IsCnsIntOrI())
{
costEx += 3;
#ifndef TARGET_64BIT
// Variable sized LONG shifts require the use of a helper call
//
if (tree->gtType == TYP_LONG)
{
level += 5;
lvl2 += 5;
costEx += 3 * IND_COST_EX;
costSz += 4;
}
#endif // !TARGET_64BIT
}
break;
case GT_INTRINSIC:
switch (tree->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Atan2:
case NI_System_Math_Pow:
// These math intrinsics are actually implemented by user calls.
// Increase the Sethi 'complexity' by two to reflect the argument
// register requirement.
level += 2;
break;
case NI_System_Math_Max:
case NI_System_Math_Min:
level++;
break;
default:
assert(!"Unknown binary GT_INTRINSIC operator");
break;
}
break;
default:
break;
}
/* We need to evalutate constants later as many places in codegen
can't handle op1 being a constant. This is normally naturally
enforced as constants have the least level of 0. However,
sometimes we end up with a tree like "cns1 < nop(cns2)". In
such cases, both sides have a level of 0. So encourage constants
to be evaluated last in such cases */
if ((level == 0) && (level == lvl2) && op1->OperIsConst() &&
(tree->OperIsCommutative() || tree->OperIsCompare()))
{
lvl2++;
}
/* We try to swap operands if the second one is more expensive */
bool tryToSwap;
GenTree* opA;
GenTree* opB;
if (tree->gtFlags & GTF_REVERSE_OPS)
{
opA = op2;
opB = op1;
}
else
{
opA = op1;
opB = op2;
}
if (fgOrder == FGOrderLinear)
{
// Don't swap anything if we're in linear order; we're really just interested in the costs.
tryToSwap = false;
}
else if (bReverseInAssignment)
{
// Assignments are special, we want the reverseops flags
// so if possible it was set above.
tryToSwap = false;
}
else if ((oper == GT_INTRINSIC) && IsIntrinsicImplementedByUserCall(tree->AsIntrinsic()->gtIntrinsicName))
{
// We do not swap operand execution order for intrinsics that are implemented by user calls
// because of trickiness around ensuring the execution order does not change during rationalization.
tryToSwap = false;
}
else if (oper == GT_BOUNDS_CHECK)
{
// Bounds check nodes used to not be binary, thus GTF_REVERSE_OPS was
// not enabled for them. This condition preserves that behavior.
// Additionally, CQ analysis shows that enabling GTF_REVERSE_OPS
// for these nodes leads to mixed results at best.
tryToSwap = false;
}
else
{
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tryToSwap = (level > lvl2);
}
else
{
tryToSwap = (level < lvl2);
}
// Try to force extra swapping when in the stress mode:
if (compStressCompile(STRESS_REVERSE_FLAG, 60) && ((tree->gtFlags & GTF_REVERSE_OPS) == 0) &&
!op2->OperIsConst())
{
tryToSwap = true;
}
}
if (tryToSwap)
{
bool canSwap = gtCanSwapOrder(opA, opB);
if (canSwap)
{
/* Can we swap the order by commuting the operands? */
switch (oper)
{
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (GenTree::SwapRelop(oper) != oper)
{
tree->SetOper(GenTree::SwapRelop(oper), GenTree::PRESERVE_VN);
}
FALLTHROUGH;
case GT_ADD:
case GT_MUL:
case GT_OR:
case GT_XOR:
case GT_AND:
/* Swap the operands */
tree->AsOp()->gtOp1 = op2;
tree->AsOp()->gtOp2 = op1;
break;
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
break;
default:
/* Mark the operand's evaluation order to be swapped */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
else
{
tree->gtFlags |= GTF_REVERSE_OPS;
}
break;
}
}
}
/* Swap the level counts */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
unsigned tmpl;
tmpl = level;
level = lvl2;
lvl2 = tmpl;
}
/* Compute the sethi number for this binary operator */
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
unsigned lvl2; // Scratch variable
case GT_CALL:
assert(tree->gtFlags & GTF_CALL);
level = 0;
costEx = 5;
costSz = 2;
GenTreeCall* call;
call = tree->AsCall();
/* Evaluate the 'this' argument, if present */
if (tree->AsCall()->gtCallThisArg != nullptr)
{
GenTree* thisVal = tree->AsCall()->gtCallThisArg->GetNode();
lvl2 = gtSetEvalOrder(thisVal);
if (level < lvl2)
{
level = lvl2;
}
costEx += thisVal->GetCostEx();
costSz += thisVal->GetCostSz() + 1;
}
/* Evaluate the arguments, right to left */
if (call->gtCallArgs != nullptr)
{
const bool lateArgs = false;
lvl2 = gtSetCallArgsOrder(call->Args(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
/* Evaluate the temp register arguments list
* This is a "hidden" list and its only purpose is to
* extend the life of temps until we make the call */
if (call->gtCallLateArgs != nullptr)
{
const bool lateArgs = true;
lvl2 = gtSetCallArgsOrder(call->LateArgs(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
if (call->gtCallType == CT_INDIRECT)
{
// pinvoke-calli cookie is a constant, or constant indirection
assert(call->gtCallCookie == nullptr || call->gtCallCookie->gtOper == GT_CNS_INT ||
call->gtCallCookie->gtOper == GT_IND);
GenTree* indirect = call->gtCallAddr;
lvl2 = gtSetEvalOrder(indirect);
if (level < lvl2)
{
level = lvl2;
}
costEx += indirect->GetCostEx() + IND_COST_EX;
costSz += indirect->GetCostSz();
}
else
{
if (call->IsVirtual())
{
GenTree* controlExpr = call->gtControlExpr;
if (controlExpr != nullptr)
{
lvl2 = gtSetEvalOrder(controlExpr);
if (level < lvl2)
{
level = lvl2;
}
costEx += controlExpr->GetCostEx();
costSz += controlExpr->GetCostSz();
}
}
#ifdef TARGET_ARM
if (call->IsVirtualStub())
{
// We generate movw/movt/ldr
costEx += (1 + IND_COST_EX);
costSz += 8;
if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
{
// Must use R12 for the ldr target -- REG_JUMP_THUNK_PARAM
costSz += 2;
}
}
else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
costEx += 2;
costSz += 6;
}
costSz += 2;
#endif
#ifdef TARGET_XARCH
costSz += 3;
#endif
}
level += 1;
/* Virtual calls are a bit more expensive */
if (call->IsVirtual())
{
costEx += 2 * IND_COST_EX;
costSz += 2;
}
level += 5;
costEx += 3 * IND_COST_EX;
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
return gtSetMultiOpOrder(tree->AsMultiOp());
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
level = gtSetEvalOrder(arrElem->gtArrObj);
costEx = arrElem->gtArrObj->GetCostEx();
costSz = arrElem->gtArrObj->GetCostSz();
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
lvl2 = gtSetEvalOrder(arrElem->gtArrInds[dim]);
if (level < lvl2)
{
level = lvl2;
}
costEx += arrElem->gtArrInds[dim]->GetCostEx();
costSz += arrElem->gtArrInds[dim]->GetCostSz();
}
level += arrElem->gtArrRank;
costEx += 2 + (arrElem->gtArrRank * (IND_COST_EX + 1));
costSz += 2 + (arrElem->gtArrRank * 2);
}
break;
case GT_ARR_OFFSET:
level = gtSetEvalOrder(tree->AsArrOffs()->gtOffset);
costEx = tree->AsArrOffs()->gtOffset->GetCostEx();
costSz = tree->AsArrOffs()->gtOffset->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtIndex);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtIndex->GetCostEx();
costSz += tree->AsArrOffs()->gtIndex->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtArrObj);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtArrObj->GetCostEx();
costSz += tree->AsArrOffs()->gtArrObj->GetCostSz();
break;
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
lvl2 = gtSetEvalOrder(use.GetNode());
// PHI args should always have cost 0 and level 0
assert(lvl2 == 0);
assert(use.GetNode()->GetCostEx() == 0);
assert(use.GetNode()->GetCostSz() == 0);
}
// Give it a level of 2, just to be sure that it's greater than the LHS of
// the parent assignment and the PHI gets evaluated first in linear order.
// See also SsaBuilder::InsertPhi and SsaBuilder::AddPhiArg.
level = 2;
costEx = 0;
costSz = 0;
break;
case GT_FIELD_LIST:
level = 0;
costEx = 0;
costSz = 0;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
unsigned opLevel = gtSetEvalOrder(use.GetNode());
level = max(level, opLevel);
gtSetEvalOrder(use.GetNode());
costEx += use.GetNode()->GetCostEx();
costSz += use.GetNode()->GetCostSz();
}
break;
case GT_CMPXCHG:
level = gtSetEvalOrder(tree->AsCmpXchg()->gtOpLocation);
costSz = tree->AsCmpXchg()->gtOpLocation->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpValue);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpValue->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpComparand);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpComparand->GetCostSz();
costEx = MAX_COST; // Seriously, what could be more expensive than lock cmpxchg?
costSz += 5; // size of lock cmpxchg [reg+C], reg
break;
case GT_STORE_DYN_BLK:
level = gtSetEvalOrder(tree->AsStoreDynBlk()->Addr());
costEx = tree->AsStoreDynBlk()->Addr()->GetCostEx();
costSz = tree->AsStoreDynBlk()->Addr()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->Data());
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->Data()->GetCostEx();
costSz += tree->AsStoreDynBlk()->Data()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->gtDynamicSize);
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->gtDynamicSize->GetCostEx();
costSz += tree->AsStoreDynBlk()->gtDynamicSize->GetCostSz();
break;
default:
JITDUMP("unexpected operator in this tree:\n");
DISPTREE(tree);
NO_WAY("unexpected operator");
}
DONE:
// Some path through this function must have set the costs.
assert(costEx != -1);
assert(costSz != -1);
tree->SetCosts(costEx, costSz);
return level;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
#ifdef DEBUG
bool GenTree::OperSupportsReverseOpEvalOrder(Compiler* comp) const
{
if (OperIsBinary())
{
if ((AsOp()->gtGetOp1() == nullptr) || (AsOp()->gtGetOp2() == nullptr))
{
return false;
}
if (OperIs(GT_COMMA, GT_BOUNDS_CHECK))
{
return false;
}
if (OperIs(GT_INTRINSIC))
{
return !comp->IsIntrinsicImplementedByUserCall(AsIntrinsic()->gtIntrinsicName);
}
return true;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
if (OperIsMultiOp())
{
return AsMultiOp()->GetOperandCount() == 2;
}
#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
return false;
}
#endif // DEBUG
/*****************************************************************************
*
* If the given tree is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0. Note that we never return 1,
* to match the behavior of GetScaleIndexShf().
*/
unsigned GenTree::GetScaleIndexMul()
{
if (IsCnsIntOrI() && jitIsScaleIndexMul(AsIntConCommon()->IconValue()) && AsIntConCommon()->IconValue() != 1)
{
return (unsigned)AsIntConCommon()->IconValue();
}
return 0;
}
/*****************************************************************************
*
* If the given tree is the right-hand side of a left shift (that is,
* 'y' in the tree 'x' << 'y'), and it is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0.
*/
unsigned GenTree::GetScaleIndexShf()
{
if (IsCnsIntOrI() && jitIsScaleIndexShift(AsIntConCommon()->IconValue()))
{
return (unsigned)(1 << AsIntConCommon()->IconValue());
}
return 0;
}
/*****************************************************************************
*
* If the given tree is a scaled index (i.e. "op * 4" or "op << 2"), returns
* the multiplier: 2, 4, or 8; otherwise returns 0. Note that "1" is never
* returned.
*/
unsigned GenTree::GetScaledIndex()
{
// with (!opts.OptEnabled(CLFLG_CONSTANTFOLD) we can have
// CNS_INT * CNS_INT
//
if (AsOp()->gtOp1->IsCnsIntOrI())
{
return 0;
}
switch (gtOper)
{
case GT_MUL:
return AsOp()->gtOp2->GetScaleIndexMul();
case GT_LSH:
return AsOp()->gtOp2->GetScaleIndexShf();
default:
assert(!"GenTree::GetScaledIndex() called with illegal gtOper");
break;
}
return 0;
}
//------------------------------------------------------------------------
// TryGetUse: Get the use edge for an operand of this tree.
//
// Arguments:
// operand - the node to find the use for
// pUse - [out] parameter for the use
//
// Return Value:
// Whether "operand" is a child of this node. If it is, "*pUse" is set,
// allowing for the replacement of "operand" with some other node.
//
bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
switch (OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
return false;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_RETURN:
case GT_RETFILT:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
// Variadic nodes
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
if (this->AsUnOp()->gtOp1->gtOper == GT_FIELD_LIST)
{
return this->AsUnOp()->gtOp1->TryGetUse(operand, pUse);
}
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
#endif // FEATURE_ARG_SPLIT
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
for (GenTree** opUse : this->AsMultiOp()->UseEdges())
{
if (*opUse == operand)
{
*pUse = opUse;
return true;
}
}
return false;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& phiUse : AsPhi()->Uses())
{
if (phiUse.GetNode() == operand)
{
*pUse = &phiUse.NodeRef();
return true;
}
}
return false;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& fieldUse : AsFieldList()->Uses())
{
if (fieldUse.GetNode() == operand)
{
*pUse = &fieldUse.NodeRef();
return true;
}
}
return false;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg();
if (operand == cmpXchg->gtOpLocation)
{
*pUse = &cmpXchg->gtOpLocation;
return true;
}
if (operand == cmpXchg->gtOpValue)
{
*pUse = &cmpXchg->gtOpValue;
return true;
}
if (operand == cmpXchg->gtOpComparand)
{
*pUse = &cmpXchg->gtOpComparand;
return true;
}
return false;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = this->AsArrElem();
if (operand == arrElem->gtArrObj)
{
*pUse = &arrElem->gtArrObj;
return true;
}
for (unsigned i = 0; i < arrElem->gtArrRank; i++)
{
if (operand == arrElem->gtArrInds[i])
{
*pUse = &arrElem->gtArrInds[i];
return true;
}
}
return false;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = this->AsArrOffs();
if (operand == arrOffs->gtOffset)
{
*pUse = &arrOffs->gtOffset;
return true;
}
if (operand == arrOffs->gtIndex)
{
*pUse = &arrOffs->gtIndex;
return true;
}
if (operand == arrOffs->gtArrObj)
{
*pUse = &arrOffs->gtArrObj;
return true;
}
return false;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk();
if (operand == dynBlock->gtOp1)
{
*pUse = &dynBlock->gtOp1;
return true;
}
if (operand == dynBlock->gtOp2)
{
*pUse = &dynBlock->gtOp2;
return true;
}
if (operand == dynBlock->gtDynamicSize)
{
*pUse = &dynBlock->gtDynamicSize;
return true;
}
return false;
}
case GT_CALL:
{
GenTreeCall* const call = this->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
*pUse = &call->gtCallThisArg->NodeRef();
return true;
}
if (operand == call->gtControlExpr)
{
*pUse = &call->gtControlExpr;
return true;
}
if (call->gtCallType == CT_INDIRECT)
{
if (operand == call->gtCallCookie)
{
*pUse = &call->gtCallCookie;
return true;
}
if (operand == call->gtCallAddr)
{
*pUse = &call->gtCallAddr;
return true;
}
}
for (GenTreeCall::Use& argUse : call->Args())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
for (GenTreeCall::Use& argUse : call->LateArgs())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
return false;
}
// Binary nodes
default:
assert(this->OperIsBinary());
return TryGetUseBinOp(operand, pUse);
}
}
bool GenTree::TryGetUseBinOp(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
assert(this->OperIsBinary());
GenTreeOp* const binOp = this->AsOp();
if (operand == binOp->gtOp1)
{
*pUse = &binOp->gtOp1;
return true;
}
if (operand == binOp->gtOp2)
{
*pUse = &binOp->gtOp2;
return true;
}
return false;
}
//------------------------------------------------------------------------
// GenTree::ReplaceOperand:
// Replace a given operand to this node with a new operand. If the
// current node is a call node, this will also udpate the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTree::ReplaceOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
if (OperGet() == GT_CALL)
{
AsCall()->ReplaceCallOperand(useEdge, replacement);
}
else
{
*useEdge = replacement;
}
}
//------------------------------------------------------------------------
// gtGetParent: Get the parent of this node, and optionally capture the
// pointer to the child so that it can be modified.
//
// Arguments:
// pUse - A pointer to a GenTree** (yes, that's three
// levels, i.e. GenTree ***), which if non-null,
// will be set to point to the field in the parent
// that points to this node.
//
// Return value
// The parent of this node.
//
// Notes:
// This requires that the execution order must be defined (i.e. gtSetEvalOrder() has been called).
// To enable the child to be replaced, it accepts an argument, "pUse", that, if non-null,
// will be set to point to the child pointer in the parent that points to this node.
//
GenTree* GenTree::gtGetParent(GenTree*** pUse)
{
// Find the parent node; it must be after this node in the execution order.
GenTree* user;
GenTree** use = nullptr;
for (user = gtNext; user != nullptr; user = user->gtNext)
{
if (user->TryGetUse(this, &use))
{
break;
}
}
if (pUse != nullptr)
{
*pUse = use;
}
return user;
}
//-------------------------------------------------------------------------
// gtRetExprVal - walk back through GT_RET_EXPRs
//
// Arguments:
// pbbFlags - out-parameter that is set to the flags of the basic block
// containing the inlinee return value. The value is 0
// for unsuccessful inlines.
//
// Returns:
// tree representing return value from a successful inline,
// or original call for failed or yet to be determined inline.
//
// Notes:
// Multi-level inlines can form chains of GT_RET_EXPRs.
// This method walks back to the root of the chain.
//
GenTree* GenTree::gtRetExprVal(BasicBlockFlags* pbbFlags /* = nullptr */)
{
GenTree* retExprVal = this;
BasicBlockFlags bbFlags = BBF_EMPTY;
assert(!retExprVal->OperIs(GT_PUTARG_TYPE));
while (retExprVal->OperIs(GT_RET_EXPR))
{
const GenTreeRetExpr* retExpr = retExprVal->AsRetExpr();
bbFlags = retExpr->bbFlags;
retExprVal = retExpr->gtInlineCandidate;
}
if (pbbFlags != nullptr)
{
*pbbFlags = bbFlags;
}
return retExprVal;
}
//------------------------------------------------------------------------------
// OperRequiresAsgFlag : Check whether the operation requires GTF_ASG flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresAsgFlag()
{
if (OperIs(GT_ASG, GT_STORE_DYN_BLK) ||
OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER))
{
return true;
}
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
if (hwIntrinsicNode->OperIsMemoryStore())
{
// A MemoryStore operation is an assignment
return true;
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//------------------------------------------------------------------------------
// OperRequiresCallFlag : Check whether the operation requires GTF_CALL flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresCallFlag(Compiler* comp)
{
switch (gtOper)
{
case GT_CALL:
return true;
case GT_KEEPALIVE:
return true;
case GT_INTRINSIC:
return comp->IsIntrinsicImplementedByUserCall(this->AsIntrinsic()->gtIntrinsicName);
#if FEATURE_FIXED_OUT_ARGS && !defined(TARGET_64BIT)
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// Variable shifts of a long end up being helper calls, so mark the tree as such in morph.
// This is potentially too conservative, since they'll get treated as having side effects.
// It is important to mark them as calls so if they are part of an argument list,
// they will get sorted and processed properly (for example, it is important to handle
// all nested calls before putting struct arguments in the argument registers). We
// could mark the trees just before argument processing, but it would require a full
// tree walk of the argument tree, so we just do it when morphing, instead, even though we'll
// mark non-argument trees (that will still get converted to calls, anyway).
return (this->TypeGet() == TYP_LONG) && (gtGetOp2()->OperGet() != GT_CNS_INT);
#endif // FEATURE_FIXED_OUT_ARGS && !TARGET_64BIT
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperIsImplicitIndir : Check whether the operation contains an implicit
// indirection.
// Arguments:
// this - a GenTree node
//
// Return Value:
// True if the given node contains an implicit indirection
//
// Note that for the [HW]INTRINSIC nodes we have to examine the
// details of the node to determine its result.
//
bool GenTree::OperIsImplicitIndir() const
{
switch (gtOper)
{
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
case GT_XCHG:
case GT_CMPXCHG:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_BOX:
case GT_ARR_INDEX:
case GT_ARR_ELEM:
case GT_ARR_OFFSET:
return true;
case GT_INTRINSIC:
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
#ifdef FEATURE_SIMD
case GT_SIMD:
{
return AsSIMD()->OperIsMemoryLoad();
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
return AsHWIntrinsic()->OperIsMemoryLoadOrStore();
}
#endif // FEATURE_HW_INTRINSICS
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperMayThrow : Check whether the operation may throw.
//
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if the given operator may cause an exception
bool GenTree::OperMayThrow(Compiler* comp)
{
GenTree* op;
switch (gtOper)
{
case GT_MOD:
case GT_DIV:
case GT_UMOD:
case GT_UDIV:
/* Division with a non-zero, non-minus-one constant does not throw an exception */
op = AsOp()->gtOp2;
if (varTypeIsFloating(op->TypeGet()))
{
return false; // Floating point division does not throw.
}
// For integers only division by 0 or by -1 can throw
if (op->IsIntegralConst() && !op->IsIntegralConst(0) && !op->IsIntegralConst(-1))
{
return false;
}
return true;
case GT_INTRINSIC:
// If this is an intrinsic that represents the object.GetType(), it can throw an NullReferenceException.
// Currently, this is the only intrinsic that can throw an exception.
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
case GT_CALL:
CorInfoHelpFunc helper;
helper = comp->eeGetHelperNum(this->AsCall()->gtCallMethHnd);
return ((helper == CORINFO_HELP_UNDEF) || !comp->s_helperCallProperties.NoThrow(helper));
case GT_IND:
case GT_BLK:
case GT_OBJ:
case GT_NULLCHECK:
case GT_STORE_BLK:
case GT_STORE_DYN_BLK:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) && comp->fgAddrCouldBeNull(this->AsIndir()->Addr()));
case GT_ARR_LENGTH:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) &&
comp->fgAddrCouldBeNull(this->AsArrLen()->ArrRef()));
case GT_ARR_ELEM:
return comp->fgAddrCouldBeNull(this->AsArrElem()->gtArrObj);
case GT_FIELD:
{
GenTree* fldObj = this->AsField()->GetFldObj();
if (fldObj != nullptr)
{
return comp->fgAddrCouldBeNull(fldObj);
}
return false;
}
case GT_BOUNDS_CHECK:
case GT_ARR_INDEX:
case GT_ARR_OFFSET:
case GT_LCLHEAP:
case GT_CKFINITE:
case GT_INDEX_ADDR:
return true;
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
assert(hwIntrinsicNode != nullptr);
if (hwIntrinsicNode->OperIsMemoryLoadOrStore())
{
// This operation contains an implicit indirection
// it could throw a null reference exception.
//
return true;
}
break;
}
#endif // FEATURE_HW_INTRINSICS
default:
break;
}
/* Overflow arithmetic operations also throw exceptions */
if (gtOverflowEx())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetFieldCount: Return the register count for a multi-reg lclVar.
//
// Arguments:
// compiler - the current Compiler instance.
//
// Return Value:
// Returns the number of registers defined by this node.
//
// Notes:
// This must be a multireg lclVar.
//
unsigned int GenTreeLclVar::GetFieldCount(Compiler* compiler) const
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
return varDsc->lvFieldCnt;
}
//-----------------------------------------------------------------------------------
// GetFieldTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// compiler - the current Compiler instance.
// idx - which register type to return.
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg lclVar and 'regIndex' must be a valid index for this node.
//
var_types GenTreeLclVar::GetFieldTypeByIndex(Compiler* compiler, unsigned idx)
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + idx);
assert(fieldVarDsc->TypeGet() != TYP_STRUCT); // Don't expect struct fields.
return fieldVarDsc->TypeGet();
}
#if DEBUGGABLE_GENTREE
// static
GenTree::VtablePtr GenTree::s_vtablesForOpers[] = {nullptr};
GenTree::VtablePtr GenTree::s_vtableForOp = nullptr;
GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper)
{
noway_assert(oper < GT_COUNT);
// First, check a cache.
if (s_vtablesForOpers[oper] != nullptr)
{
return s_vtablesForOpers[oper];
}
// Otherwise, look up the correct vtable entry. Note that we want the most derived GenTree subtype
// for an oper. E.g., GT_LCL_VAR is defined in GTSTRUCT_3 as GenTreeLclVar and in GTSTRUCT_N as
// GenTreeLclVarCommon. We want the GenTreeLclVar vtable, since nothing should actually be
// instantiated as a GenTreeLclVarCommon.
VtablePtr res = nullptr;
switch (oper)
{
// clang-format off
#define GTSTRUCT_0(nm, tag) /*handle explicitly*/
#define GTSTRUCT_1(nm, tag) \
case tag: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_2(nm, tag, tag2) \
case tag: \
case tag2: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_3(nm, tag, tag2, tag3) \
case tag: \
case tag2: \
case tag3: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_4(nm, tag, tag2, tag3, tag4) \
case tag: \
case tag2: \
case tag3: \
case tag4: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_N(nm, ...) /*handle explicitly*/
#define GTSTRUCT_2_SPECIAL(nm, tag, tag2) /*handle explicitly*/
#define GTSTRUCT_3_SPECIAL(nm, tag, tag2, tag3) /*handle explicitly*/
#include "gtstructs.h"
// clang-format on
// Handle the special cases.
// The following opers are in GTSTRUCT_N but no other place (namely, no subtypes).
case GT_STORE_BLK:
case GT_BLK:
{
GenTreeBlk gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
case GT_IND:
case GT_NULLCHECK:
{
GenTreeIndir gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
// We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified
// in their proper subtype. Similarly for GenTreeIndir.
default:
{
// Should be unary or binary op.
if (s_vtableForOp == nullptr)
{
unsigned opKind = OperKind(oper);
assert(!IsExOp(opKind));
assert(OperIsSimple(oper) || OperIsLeaf(oper));
// Need to provide non-null operands.
GenTreeIntCon dummyOp(TYP_INT, 0);
GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? nullptr : &dummyOp));
s_vtableForOp = *reinterpret_cast<VtablePtr*>(>);
}
res = s_vtableForOp;
break;
}
}
s_vtablesForOpers[oper] = res;
return res;
}
void GenTree::SetVtableForOper(genTreeOps oper)
{
*reinterpret_cast<VtablePtr*>(this) = GetVtableForOper(oper);
}
#endif // DEBUGGABLE_GENTREE
GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
{
assert(op1 != nullptr);
assert(op2 != nullptr);
// We should not be allocating nodes that extend GenTreeOp with this;
// should call the appropriate constructor for the extended type.
assert(!GenTree::IsExOp(GenTree::OperKind(oper)));
GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, op2);
return node;
}
GenTreeQmark* Compiler::gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon)
{
compQmarkUsed = true;
GenTreeQmark* result = new (this, GT_QMARK) GenTreeQmark(type, cond, colon);
#ifdef DEBUG
if (compQmarkRationalized)
{
fgCheckQmarkAllowedForm(result);
}
#endif
return result;
}
GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
{
return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
}
GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq)
{
GenTreeIntCon* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, static_cast<ssize_t>(fieldOffset));
node->gtFieldSeq = fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq;
return node;
}
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
assert(genIsValidIntReg(reg) || (reg == REG_SPBASE));
GenTree* result = new (this, GT_PHYSREG) GenTreePhysReg(reg, type);
return result;
}
GenTree* Compiler::gtNewJmpTableNode()
{
return new (this, GT_JMPTABLE) GenTree(GT_JMPTABLE, TYP_I_IMPL);
}
/*****************************************************************************
*
* Converts an annotated token into an icon flags (so that we will later be
* able to tell the type of the handle that will be embedded in the icon
* node)
*/
GenTreeFlags Compiler::gtTokenToIconFlags(unsigned token)
{
GenTreeFlags flags = GTF_EMPTY;
switch (TypeFromToken(token))
{
case mdtTypeRef:
case mdtTypeDef:
case mdtTypeSpec:
flags = GTF_ICON_CLASS_HDL;
break;
case mdtMethodDef:
flags = GTF_ICON_METHOD_HDL;
break;
case mdtFieldDef:
flags = GTF_ICON_FIELD_HDL;
break;
default:
flags = GTF_ICON_TOKEN_HDL;
break;
}
return flags;
}
//-----------------------------------------------------------------------------------------
// gtNewIndOfIconHandleNode: Creates an indirection GenTree node of a constant handle
//
// Arguments:
// indType - The type returned by the indirection node
// addr - The constant address to read from
// iconFlags - The GTF_ICON flag value that specifies the kind of handle that we have
// isInvariant - The indNode should also be marked as invariant
//
// Return Value:
// Returns a GT_IND node representing value at the address provided by 'value'
//
// Notes:
// The GT_IND node is marked as non-faulting
// If the indType is GT_REF we also mark the indNode as GTF_GLOB_REF
//
GenTree* Compiler::gtNewIndOfIconHandleNode(var_types indType, size_t addr, GenTreeFlags iconFlags, bool isInvariant)
{
GenTree* addrNode = gtNewIconHandleNode(addr, iconFlags);
GenTree* indNode = gtNewOperNode(GT_IND, indType, addrNode);
// This indirection won't cause an exception.
//
indNode->gtFlags |= GTF_IND_NONFAULTING;
if (isInvariant)
{
assert(iconFlags != GTF_ICON_STATIC_HDL); // Pointer to a mutable class Static variable
assert(iconFlags != GTF_ICON_BBC_PTR); // Pointer to a mutable basic block count value
assert(iconFlags != GTF_ICON_GLOBAL_PTR); // Pointer to mutable data from the VM state
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
if (iconFlags == GTF_ICON_STR_HDL)
{
// String literals are never null
indNode->gtFlags |= GTF_IND_NONNULL;
}
}
else
{
// GLOB_REF needs to be set for indirections returning values from mutable
// locations, so that e. g. args sorting does not reorder them with calls.
indNode->gtFlags |= GTF_GLOB_REF;
}
return indNode;
}
/*****************************************************************************
*
* Allocates a integer constant entry that represents a HANDLE to something.
* It may not be allowed to embed HANDLEs directly into the JITed code (for eg,
* as arguments to JIT helpers). Get a corresponding value that can be embedded.
* If the handle needs to be accessed via an indirection, pValue points to it.
*/
GenTree* Compiler::gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags iconFlags, void* compileTimeHandle)
{
GenTree* iconNode;
GenTree* handleNode;
if (value != nullptr)
{
// When 'value' is non-null, pValue is required to be null
assert(pValue == nullptr);
// use 'value' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)value, iconFlags);
// 'value' is the handle
handleNode = iconNode;
}
else
{
// When 'value' is null, pValue is required to be non-null
assert(pValue != nullptr);
// use 'pValue' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)pValue, iconFlags);
// 'pValue' is an address of a location that contains the handle
// construct the indirection of 'pValue'
handleNode = gtNewOperNode(GT_IND, TYP_I_IMPL, iconNode);
// This indirection won't cause an exception.
handleNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
handleNode->gtFlags |= GTF_IND_INVARIANT;
}
iconNode->AsIntCon()->gtCompileTimeHandle = (size_t)compileTimeHandle;
return handleNode;
}
/*****************************************************************************/
GenTree* Compiler::gtNewStringLiteralNode(InfoAccessType iat, void* pValue)
{
GenTree* tree = nullptr;
switch (iat)
{
case IAT_VALUE:
setMethodHasFrozenString();
tree = gtNewIconEmbHndNode(pValue, nullptr, GTF_ICON_STR_HDL, nullptr);
tree->gtType = TYP_REF;
#ifdef DEBUG
tree->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PVALUE: // The value needs to be accessed via an indirection
// Create an indirection
tree = gtNewIndOfIconHandleNode(TYP_REF, (size_t)pValue, GTF_ICON_STR_HDL, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PPVALUE: // The value needs to be accessed via a double indirection
// Create the first indirection
tree = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pValue, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
// Create the second indirection
tree = gtNewOperNode(GT_IND, TYP_REF, tree);
// This indirection won't cause an exception.
tree->gtFlags |= GTF_IND_NONFAULTING;
// This indirection points into the gloabal heap (it is String Object)
tree->gtFlags |= GTF_GLOB_REF;
break;
default:
noway_assert(!"Unexpected InfoAccessType");
}
return tree;
}
//------------------------------------------------------------------------
// gtNewStringLiteralLength: create GenTreeIntCon node for the given string
// literal to store its length.
//
// Arguments:
// node - string literal node.
//
// Return Value:
// GenTreeIntCon node with string's length as a value or null.
//
GenTreeIntCon* Compiler::gtNewStringLiteralLength(GenTreeStrCon* node)
{
if (node->IsStringEmptyField())
{
JITDUMP("Folded String.Empty.Length to 0\n");
return gtNewIconNode(0);
}
int length = -1;
const char16_t* str = info.compCompHnd->getStringLiteral(node->gtScpHnd, node->gtSconCPX, &length);
if (length >= 0)
{
GenTreeIntCon* iconNode = gtNewIconNode(length);
// str can be NULL for dynamic context
if (str != nullptr)
{
JITDUMP("Folded '\"%ws\".Length' to '%d'\n", str, length)
}
else
{
JITDUMP("Folded 'CNS_STR.Length' to '%d'\n", length)
}
return iconNode;
}
return nullptr;
}
/*****************************************************************************/
GenTree* Compiler::gtNewLconNode(__int64 value)
{
#ifdef TARGET_64BIT
GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
#else
GenTree* node = new (this, GT_CNS_LNG) GenTreeLngCon(value);
#endif
return node;
}
GenTree* Compiler::gtNewDconNode(double value, var_types type)
{
GenTree* node = new (this, GT_CNS_DBL) GenTreeDblCon(value, type);
return node;
}
GenTree* Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle)
{
// 'GT_CNS_STR' nodes later get transformed into 'GT_CALL'
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_CNS_STR]);
GenTree* node = new (this, GT_CALL) GenTreeStrCon(CPX, scpHandle DEBUGARG(/*largeNode*/ true));
return node;
}
GenTree* Compiler::gtNewZeroConNode(var_types type)
{
GenTree* zero;
switch (type)
{
case TYP_INT:
zero = gtNewIconNode(0);
break;
case TYP_BYREF:
FALLTHROUGH;
case TYP_REF:
zero = gtNewIconNode(0);
zero->gtType = type;
break;
case TYP_LONG:
zero = gtNewLconNode(0);
break;
case TYP_FLOAT:
zero = gtNewDconNode(0.0);
zero->gtType = type;
break;
case TYP_DOUBLE:
zero = gtNewDconNode(0.0);
break;
default:
noway_assert(!"Bad type in gtNewZeroConNode");
zero = nullptr;
break;
}
return zero;
}
GenTree* Compiler::gtNewOneConNode(var_types type)
{
GenTree* one;
switch (type)
{
case TYP_INT:
case TYP_UINT:
one = gtNewIconNode(1);
break;
case TYP_LONG:
case TYP_ULONG:
one = gtNewLconNode(1);
break;
case TYP_FLOAT:
case TYP_DOUBLE:
one = gtNewDconNode(1.0);
one->gtType = type;
break;
default:
noway_assert(!"Bad type in gtNewOneConNode");
one = nullptr;
break;
}
return one;
}
GenTreeLclVar* Compiler::gtNewStoreLclVar(unsigned dstLclNum, GenTree* src)
{
GenTreeLclVar* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, src->TypeGet(), dstLclNum);
store->gtOp1 = src;
store->gtFlags = (src->gtFlags & GTF_COMMON_MASK);
store->gtFlags |= GTF_VAR_DEF | GTF_ASG;
return store;
}
#ifdef FEATURE_SIMD
//---------------------------------------------------------------------
// gtNewSIMDVectorZero: create a GT_SIMD node for Vector<T>.Zero
//
// Arguments:
// simdType - simd vector type
// simdBaseJitType - element type of vector
// simdSize - size of vector in bytes
GenTree* Compiler::gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = genActualType(JitType2PreciseVarType(simdBaseJitType));
GenTree* initVal = gtNewZeroConNode(simdBaseType);
initVal->gtType = simdBaseType;
return gtNewSIMDNode(simdType, initVal, SIMDIntrinsicInit, simdBaseJitType, simdSize);
}
#endif // FEATURE_SIMD
GenTreeCall* Compiler::gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
return gtNewCallNode(CT_INDIRECT, (CORINFO_METHOD_HANDLE)addr, type, args, di);
}
GenTreeCall* Compiler::gtNewCallNode(
gtCallTypes callType, CORINFO_METHOD_HANDLE callHnd, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
GenTreeCall* node = new (this, GT_CALL) GenTreeCall(genActualType(type));
node->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
#ifdef UNIX_X86_ABI
if (callType == CT_INDIRECT || callType == CT_HELPER)
node->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
node->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
node->gtCallType = callType;
node->gtCallMethHnd = callHnd;
node->gtCallArgs = args;
node->gtCallThisArg = nullptr;
node->fgArgInfo = nullptr;
INDEBUG(node->callSig = nullptr;)
node->tailCallInfo = nullptr;
node->gtRetClsHnd = nullptr;
node->gtControlExpr = nullptr;
node->gtCallMoreFlags = GTF_CALL_M_EMPTY;
if (callType == CT_INDIRECT)
{
node->gtCallCookie = nullptr;
}
else
{
node->gtInlineCandidateInfo = nullptr;
}
node->gtCallLateArgs = nullptr;
node->gtReturnType = type;
#ifdef FEATURE_READYTORUN
node->gtEntryPoint.addr = nullptr;
node->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These get updated after call node is built.
node->gtInlineObservation = InlineObservation::CALLEE_UNUSED_INITIAL;
node->gtRawILOffset = BAD_IL_OFFSET;
node->gtInlineContext = compInlineContext;
#endif
// Spec: Managed Retval sequence points needs to be generated while generating debug info for debuggable code.
//
// Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and
// codegen will pass DebugInfo() to emitter, which will cause emitter
// not to emit IP mapping entry.
if (opts.compDbgCode && opts.compDbgInfo && di.IsValid())
{
// Managed Retval - IL offset of the call. This offset is used to emit a
// CALL_INSTRUCTION type sequence point while emitting corresponding native call.
//
// TODO-Cleanup:
// a) (Opt) We need not store this offset if the method doesn't return a
// value. Rather it can be made BAD_IL_OFFSET to prevent a sequence
// point being emitted.
//
// b) (Opt) Add new sequence points only if requested by debugger through
// a new boundary type - ICorDebugInfo::BoundaryTypes
if (genCallSite2DebugInfoMap == nullptr)
{
genCallSite2DebugInfoMap = new (getAllocator()) CallSiteDebugInfoTable(getAllocator());
}
// Make sure that there are no duplicate entries for a given call node
assert(!genCallSite2DebugInfoMap->Lookup(node));
genCallSite2DebugInfoMap->Set(node, di);
}
// Initialize gtOtherRegs
node->ClearOtherRegs();
// Initialize spill flags of gtOtherRegs
node->ClearOtherRegFlags();
#if !defined(TARGET_64BIT)
if (varTypeIsLong(node))
{
assert(node->gtReturnType == node->gtType);
// Initialize Return type descriptor of call node
node->InitializeLongReturnType();
}
#endif // !defined(TARGET_64BIT)
return node;
}
GenTreeLclVar* Compiler::gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
assert(type != TYP_VOID);
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
LclVarDsc* varDsc = lvaGetDesc(lnum);
bool simd12ToSimd16Widening = false;
#if FEATURE_SIMD
// We can additionally have a SIMD12 that was widened to a SIMD16, generally as part of lowering
simd12ToSimd16Widening = (type == TYP_SIMD16) && (varDsc->lvType == TYP_SIMD12);
#endif
assert((type == varDsc->lvType) || simd12ToSimd16Widening ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (varDsc->lvType == TYP_BYREF)));
}
GenTreeLclVar* node = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs));
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
return node;
}
GenTreeLclVar* Compiler::gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
assert(type == lvaTable[lnum].lvType ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (lvaTable[lnum].lvType == TYP_BYREF)));
}
// This local variable node may later get transformed into a large node
assert(GenTree::s_gtNodeSizes[LargeOpOpcode()] > GenTree::s_gtNodeSizes[GT_LCL_VAR]);
GenTreeLclVar* node =
new (this, LargeOpOpcode()) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs) DEBUGARG(/*largeNode*/ true));
return node;
}
GenTreeLclVar* Compiler::gtNewLclVarAddrNode(unsigned lclNum, var_types type)
{
GenTreeLclVar* node = new (this, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, type, lclNum);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, type, lclNum, lclOffs);
node->SetFieldSeq(fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, type, lnum, offset);
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
node->SetFieldSeq(FieldSeqStore::NotAField());
return node;
}
GenTree* Compiler::gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags)
{
assert(GenTree::s_gtNodeSizes[GT_RET_EXPR] == TREE_NODE_SZ_LARGE);
GenTreeRetExpr* node = new (this, GT_RET_EXPR) GenTreeRetExpr(type);
node->gtInlineCandidate = inlineCandidate;
node->bbFlags = bbFlags;
if (varTypeIsStruct(inlineCandidate) && !inlineCandidate->OperIsBlkOp())
{
node->gtRetClsHnd = gtGetStructHandle(inlineCandidate);
}
// GT_RET_EXPR node eventually might be bashed back to GT_CALL (when inlining is aborted for example).
// Therefore it should carry the GTF_CALL flag so that all the rules about spilling can apply to it as well.
// For example, impImportLeave or CEE_POP need to spill GT_RET_EXPR before empty the evaluation stack.
node->gtFlags |= GTF_CALL;
return node;
}
GenTreeCall::Use* Compiler::gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node, args);
}
GenTreeCall::Use* Compiler::gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after)
{
after->SetNext(new (this, CMK_ASTNode) GenTreeCall::Use(node, after->GetNext()));
return after->GetNext();
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node);
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3, node4));
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching argNum and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
return argInfo->GetArgEntry(argNum);
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching node and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByNode(GenTreeCall* call, GenTree* node)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->GetNode() == node)
{
return curArgTabEntry;
}
else if (curArgTabEntry->use->GetNode() == node)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
/*****************************************************************************
*
* Find and return the entry with the given "lateArgInx". Requires that one is found
* (asserts this).
*/
fgArgTabEntry* Compiler::gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
assert(lateArgInx != UINT_MAX);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->isLateArg() && curArgTabEntry->GetLateArgInx() == lateArgInx)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
//------------------------------------------------------------------------
// gtArgNodeByLateArgInx: Given a call instruction, find the argument with the given
// late arg index (i.e. the given position in the gtCallLateArgs list).
// Arguments:
// call - the call node
// lateArgInx - the index into the late args list
//
// Return value:
// The late argument node.
//
GenTree* Compiler::gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx)
{
GenTree* argx = nullptr;
unsigned regIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
argx = use.GetNode();
assert(!argx->IsArgPlaceHolderNode()); // No placeholder nodes are in gtCallLateArgs;
if (regIndex == lateArgInx)
{
break;
}
regIndex++;
}
noway_assert(argx != nullptr);
return argx;
}
/*****************************************************************************
*
* Create a node that will assign 'src' to 'dst'.
*/
GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
{
assert(!src->TypeIs(TYP_VOID));
/* Mark the target as being assigned */
if ((dst->gtOper == GT_LCL_VAR) || (dst->OperGet() == GT_LCL_FLD))
{
dst->gtFlags |= GTF_VAR_DEF;
if (dst->IsPartialLclFld(this))
{
// We treat these partial writes as combined uses and defs.
dst->gtFlags |= GTF_VAR_USEASG;
}
}
dst->gtFlags |= GTF_DONT_CSE;
#if defined(FEATURE_SIMD) && !defined(TARGET_X86)
// TODO-CQ: x86 Windows supports multi-reg returns but not SIMD multi-reg returns
if (varTypeIsSIMD(dst->gtType))
{
// We want to track SIMD assignments as being intrinsics since they
// are functionally SIMD `mov` instructions and are more efficient
// when we don't promote, particularly when it occurs due to inlining
SetOpLclRelatedToSIMDIntrinsic(dst);
SetOpLclRelatedToSIMDIntrinsic(src);
}
#endif // FEATURE_SIMD
/* Create the assignment node */
GenTreeOp* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src)->AsOp();
/* Mark the expression as containing an assignment */
asg->gtFlags |= GTF_ASG;
return asg;
}
//------------------------------------------------------------------------
// gtNewObjNode: Creates a new Obj node.
//
// Arguments:
// structHnd - The class handle of the struct type.
// addr - The address of the struct.
//
// Return Value:
// Returns a node representing the struct value at the given address.
//
GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, typGetObjLayout(structHnd));
// An Obj is not a global reference, if it is known to be a local struct.
if ((addr->gtFlags & GTF_GLOB_REF) == 0)
{
GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr();
if (lclNode != nullptr)
{
objNode->gtFlags |= GTF_IND_NONFAULTING;
if (!lvaIsImplicitByRefLocal(lclNode->GetLclNum()))
{
objNode->gtFlags &= ~GTF_GLOB_REF;
}
}
}
return objNode;
}
//------------------------------------------------------------------------
// gtSetObjGcInfo: Set the GC info on an object node
//
// Arguments:
// objNode - The object node of interest
void Compiler::gtSetObjGcInfo(GenTreeObj* objNode)
{
assert(varTypeIsStruct(objNode->TypeGet()));
assert(objNode->TypeGet() == impNormStructType(objNode->GetLayout()->GetClassHandle()));
if (!objNode->GetLayout()->HasGCPtr())
{
objNode->SetOper(objNode->OperIs(GT_OBJ) ? GT_BLK : GT_STORE_BLK);
}
}
//------------------------------------------------------------------------
// gtNewStructVal: Return a node that represents a struct value
//
// Arguments:
// structHnd - The class for the struct
// addr - The address of the struct
//
// Return Value:
// A block, object or local node that represents the struct value pointed to by 'addr'.
GenTree* Compiler::gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
if (val->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = addr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = &(lvaTable[lclNum]);
if (varTypeIsStruct(varDsc) && (varDsc->GetStructHnd() == structHnd) && !lvaIsImplicitByRefLocal(lclNum))
{
return addr->gtGetOp1();
}
}
}
return gtNewObjNode(structHnd, addr);
}
//------------------------------------------------------------------------
// gtNewBlockVal: Return a node that represents a possibly untyped block value
//
// Arguments:
// addr - The address of the block
// size - The size of the block
//
// Return Value:
// A block, object or local node that represents the block value pointed to by 'addr'.
GenTree* Compiler::gtNewBlockVal(GenTree* addr, unsigned size)
{
// By default we treat this as an opaque struct type with known size.
var_types blkType = TYP_STRUCT;
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
#if FEATURE_SIMD
if (varTypeIsSIMD(val) && (genTypeSize(val) == size))
{
blkType = val->TypeGet();
}
#endif // FEATURE_SIMD
if (varTypeIsStruct(val) && val->OperIs(GT_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(val->AsLclVarCommon());
unsigned varSize = varTypeIsStruct(varDsc) ? varDsc->lvExactSize : genTypeSize(varDsc);
if (varSize == size)
{
return val;
}
}
}
return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, typGetBlkLayout(size));
}
// Creates a new assignment node for a CpObj.
// Parameters (exactly the same as MSIL CpObj):
//
// dstAddr - The target to copy the struct to
// srcAddr - The source to copy the struct from
// structHnd - A class token that represents the type of object being copied. May be null
// if FEATURE_SIMD is enabled and the source has a SIMD type.
// isVolatile - Is this marked as volatile memory?
GenTree* Compiler::gtNewCpObjNode(GenTree* dstAddr, GenTree* srcAddr, CORINFO_CLASS_HANDLE structHnd, bool isVolatile)
{
GenTree* lhs = gtNewStructVal(structHnd, dstAddr);
GenTree* src = nullptr;
if (lhs->OperIs(GT_OBJ))
{
GenTreeObj* lhsObj = lhs->AsObj();
#if DEBUG
// Codegen for CpObj assumes that we cannot have a struct with GC pointers whose size is not a multiple
// of the register size. The EE currently does not allow this to ensure that GC pointers are aligned
// if the struct is stored in an array. Note that this restriction doesn't apply to stack-allocated objects:
// they are never stored in arrays. We should never get to this method with stack-allocated objects since they
// are never copied so we don't need to exclude them from the assert below.
// Let's assert it just to be safe.
ClassLayout* layout = lhsObj->GetLayout();
unsigned size = layout->GetSize();
assert((layout->GetGCPtrCount() == 0) || (roundUp(size, REGSIZE_BYTES) == size));
#endif
gtSetObjGcInfo(lhsObj);
}
if (srcAddr->OperGet() == GT_ADDR)
{
src = srcAddr->AsOp()->gtOp1;
}
else
{
src = gtNewOperNode(GT_IND, lhs->TypeGet(), srcAddr);
}
GenTree* result = gtNewBlkOpNode(lhs, src, isVolatile, true);
return result;
}
//------------------------------------------------------------------------
// FixupInitBlkValue: Fixup the init value for an initBlk operation
//
// Arguments:
// asgType - The type of assignment that the initBlk is being transformed into
//
// Return Value:
// Modifies the constant value on this node to be the appropriate "fill"
// value for the initblk.
//
// Notes:
// The initBlk MSIL instruction takes a byte value, which must be
// extended to the size of the assignment when an initBlk is transformed
// to an assignment of a primitive type.
// This performs the appropriate extension.
void GenTreeIntCon::FixupInitBlkValue(var_types asgType)
{
assert(varTypeIsIntegralOrI(asgType));
unsigned size = genTypeSize(asgType);
if (size > 1)
{
size_t cns = gtIconVal;
cns = cns & 0xFF;
cns |= cns << 8;
if (size >= 4)
{
cns |= cns << 16;
#ifdef TARGET_64BIT
if (size == 8)
{
cns |= cns << 32;
}
#endif // TARGET_64BIT
// Make the type match for evaluation types.
gtType = asgType;
// if we are initializing a GC type the value being assigned must be zero (null).
assert(!varTypeIsGC(asgType) || (cns == 0));
}
gtIconVal = cns;
}
}
//----------------------------------------------------------------------------
// UsesDivideByConstOptimized:
// returns true if rationalize will use the division by constant
// optimization for this node.
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
// Return Value:
// Return true iff the node is a GT_DIV,GT_UDIV, GT_MOD or GT_UMOD with
// an integer constant and we can perform the division operation using
// a reciprocal multiply or a shift operation.
//
bool GenTreeOp::UsesDivideByConstOptimized(Compiler* comp)
{
if (!comp->opts.OptimizationEnabled())
{
return false;
}
if (!OperIs(GT_DIV, GT_MOD, GT_UDIV, GT_UMOD))
{
return false;
}
#if defined(TARGET_ARM64)
if (OperIs(GT_MOD, GT_UMOD))
{
// MOD, UMOD not supported for ARM64
return false;
}
#endif // TARGET_ARM64
bool isSignedDivide = OperIs(GT_DIV, GT_MOD);
GenTree* dividend = gtGetOp1()->gtEffectiveVal(/*commaOnly*/ true);
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
#if !defined(TARGET_64BIT)
if (dividend->OperIs(GT_LONG))
{
return false;
}
#endif
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return false;
}
ssize_t divisorValue;
if (divisor->IsCnsIntOrI())
{
divisorValue = static_cast<ssize_t>(divisor->AsIntCon()->IconValue());
}
else
{
ValueNum vn = divisor->gtVNPair.GetLiberal();
if (comp->vnStore->IsVNConstant(vn))
{
divisorValue = comp->vnStore->CoercedConstantValue<ssize_t>(vn);
}
else
{
return false;
}
}
const var_types divType = TypeGet();
if (divisorValue == 0)
{
// x / 0 and x % 0 can't be optimized because they are required to throw an exception.
return false;
}
else if (isSignedDivide)
{
if (divisorValue == -1)
{
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
return false;
}
else if (isPow2(divisorValue))
{
return true;
}
}
else // unsigned divide
{
if (divType == TYP_INT)
{
// Clear up the upper 32 bits of the value, they may be set to 1 because constants
// are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
divisorValue &= UINT32_MAX;
}
size_t unsignedDivisorValue = (size_t)divisorValue;
if (isPow2(unsignedDivisorValue))
{
return true;
}
}
const bool isDiv = OperIs(GT_DIV, GT_UDIV);
if (isDiv)
{
if (isSignedDivide)
{
// If the divisor is the minimum representable integer value then the result is either 0 or 1
if ((divType == TYP_INT && divisorValue == INT_MIN) || (divType == TYP_LONG && divisorValue == INT64_MIN))
{
return true;
}
}
else
{
// If the divisor is greater or equal than 2^(N - 1) then the result is either 0 or 1
if (((divType == TYP_INT) && ((UINT32)divisorValue > (UINT32_MAX / 2))) ||
((divType == TYP_LONG) && ((UINT64)divisorValue > (UINT64_MAX / 2))))
{
return true;
}
}
}
// TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
if (!comp->opts.MinOpts() && ((divisorValue >= 3) || !isSignedDivide))
{
// All checks pass we can perform the division operation using a reciprocal multiply.
return true;
}
#endif
return false;
}
//------------------------------------------------------------------------
// CheckDivideByConstOptimized:
// Checks if we can use the division by constant optimization
// on this node
// and if so sets the flag GTF_DIV_BY_CNS_OPT and
// set GTF_DONT_CSE on the constant node
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
void GenTreeOp::CheckDivideByConstOptimized(Compiler* comp)
{
if (UsesDivideByConstOptimized(comp))
{
gtFlags |= GTF_DIV_BY_CNS_OPT;
// Now set DONT_CSE on the GT_CNS_INT divisor, note that
// with ValueNumbering we can have a non GT_CNS_INT divisior
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
if (divisor->OperIs(GT_CNS_INT))
{
divisor->gtFlags |= GTF_DONT_CSE;
}
}
}
//
//------------------------------------------------------------------------
// gtBlockOpInit: Initializes a BlkOp GenTree
//
// Arguments:
// result - an assignment node that is to be initialized.
// dst - the target (destination) we want to either initialize or copy to.
// src - the init value for InitBlk or the source struct for CpBlk/CpObj.
// isVolatile - specifies whether this node is a volatile memory operation.
//
// Assumptions:
// 'result' is an assignment that is newly constructed.
// If 'dst' is TYP_STRUCT, then it must be a block node or lclVar.
//
// Notes:
// This procedure centralizes all the logic to both enforce proper structure and
// to properly construct any InitBlk/CpBlk node.
void Compiler::gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile)
{
if (!result->OperIsBlkOp())
{
assert(dst->TypeGet() != TYP_STRUCT);
return;
}
/* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
* because of two reasons, first, is useless, second
* it introduces issues in liveness and also copying
* memory from an overlapping memory location is
* undefined both as per the ECMA standard and also
* the memcpy semantics specify that.
*
* NOTE: In this case we'll only detect the case for addr of a local
* and a local itself, any other complex expressions won't be
* caught.
*
* TODO-Cleanup: though having this logic is goodness (i.e. avoids self-assignment
* of struct vars very early), it was added because fgInterBlockLocalVarLiveness()
* isn't handling self-assignment of struct variables correctly. This issue may not
* surface if struct promotion is ON (which is the case on x86/arm). But still the
* fundamental issue exists that needs to be addressed.
*/
if (result->OperIsCopyBlkOp())
{
GenTree* currSrc = srcOrFillVal;
GenTree* currDst = dst;
if (currSrc->OperIsBlk() && (currSrc->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currSrc = currSrc->AsBlk()->Addr()->gtGetOp1();
}
if (currDst->OperIsBlk() && (currDst->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currDst = currDst->AsBlk()->Addr()->gtGetOp1();
}
if (currSrc->OperGet() == GT_LCL_VAR && currDst->OperGet() == GT_LCL_VAR &&
currSrc->AsLclVarCommon()->GetLclNum() == currDst->AsLclVarCommon()->GetLclNum())
{
// Make this a NOP
// TODO-Cleanup: probably doesn't matter, but could do this earlier and avoid creating a GT_ASG
result->gtBashToNOP();
return;
}
}
// Propagate all effect flags from children
result->gtFlags |= dst->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= result->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) | (srcOrFillVal->gtFlags & GTF_EXCEPT);
if (isVolatile)
{
result->gtFlags |= GTF_BLK_VOLATILE;
}
#ifdef FEATURE_SIMD
if (result->OperIsCopyBlkOp() && varTypeIsSIMD(srcOrFillVal))
{
// If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
// should be labeled as simd intrinsic related struct.
// This is done so that the morpher can transform any field accesses into
// intrinsics, thus avoiding conflicting access methods (fields vs. whole-register).
GenTree* src = srcOrFillVal;
if (src->OperIsIndir() && (src->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
src = src->AsIndir()->Addr()->gtGetOp1();
}
#ifdef FEATURE_HW_INTRINSICS
if ((src->OperGet() == GT_SIMD) || (src->OperGet() == GT_HWINTRINSIC))
#else
if (src->OperGet() == GT_SIMD)
#endif // FEATURE_HW_INTRINSICS
{
if (dst->OperIsBlk() && (dst->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
dst = dst->AsIndir()->Addr()->gtGetOp1();
}
if (dst->OperIsLocal() && varTypeIsStruct(dst))
{
setLclRelatedToSIMDIntrinsic(dst);
}
}
}
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// gtNewBlkOpNode: Creates a GenTree for a block (struct) assignment.
//
// Arguments:
// dst - The destination node: local var / block node.
// srcOrFillVall - The value to assign for CopyBlk, the integer "fill" for InitBlk
// isVolatile - Whether this is a volatile memory operation or not.
// isCopyBlock - True if this is a block copy (rather than a block init).
//
// Return Value:
// Returns the newly constructed and initialized block operation.
//
GenTree* Compiler::gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock)
{
assert(dst->OperIsBlk() || dst->OperIsLocal());
if (isCopyBlock)
{
if (srcOrFillVal->OperIsIndir() && (srcOrFillVal->gtGetOp1()->gtOper == GT_ADDR))
{
srcOrFillVal = srcOrFillVal->gtGetOp1()->gtGetOp1();
}
}
else
{
// InitBlk
assert(varTypeIsIntegral(srcOrFillVal));
if (varTypeIsStruct(dst))
{
if (!srcOrFillVal->IsIntegralConst(0))
{
srcOrFillVal = gtNewOperNode(GT_INIT_VAL, TYP_INT, srcOrFillVal);
}
}
}
GenTree* result = gtNewAssignNode(dst, srcOrFillVal);
gtBlockOpInit(result, dst, srcOrFillVal, isVolatile);
return result;
}
//------------------------------------------------------------------------
// gtNewPutArgReg: Creates a new PutArgReg node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created PutArgReg node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/armel, GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg)
{
assert(arg != nullptr);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A PUTARG_REG could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_PUTARG_REG) GenTreeMultiRegOp(GT_PUTARG_REG, type, arg, nullptr);
if (type == TYP_LONG)
{
node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg);
}
#else
node = gtNewOperNode(GT_PUTARG_REG, type, arg);
#endif
node->SetRegNum(argReg);
return node;
}
//------------------------------------------------------------------------
// gtNewBitCastNode: Creates a new BitCast node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created BitCast node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/arm, as GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg)
{
assert(arg != nullptr);
assert(type != TYP_STRUCT);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr);
#else
node = gtNewOperNode(GT_BITCAST, type, arg);
#endif
return node;
}
//------------------------------------------------------------------------
// gtNewAllocObjNode: Helper to create an object allocation node.
//
// Arguments:
// pResolvedToken - Resolved token for the object being allocated
// useParent - true iff the token represents a child of the object's class
//
// Return Value:
// Returns GT_ALLOCOBJ node that will be later morphed into an
// allocation helper call or local variable allocation on the stack.
//
// Node creation can fail for inlinees when the type described by pResolvedToken
// can't be represented in jitted code. If this happens, this method will return
// nullptr.
//
GenTreeAllocObj* Compiler::gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent)
{
const bool mustRestoreHandle = true;
bool* const pRuntimeLookup = nullptr;
bool usingReadyToRunHelper = false;
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
GenTree* opHandle = impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, useParent);
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP lookup = {};
if (opts.IsReadyToRun())
{
helper = CORINFO_HELP_READYTORUN_NEW;
CORINFO_LOOKUP_KIND* const pGenericLookupKind = nullptr;
usingReadyToRunHelper =
info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup);
}
#endif
if (!usingReadyToRunHelper)
{
if (opHandle == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return nullptr;
}
}
bool helperHasSideEffects;
CorInfoHelpFunc helperTemp =
info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd, &helperHasSideEffects);
if (!usingReadyToRunHelper)
{
helper = helperTemp;
}
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newfast call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate and return the new object for boxing
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
GenTreeAllocObj* allocObj =
gtNewAllocObjNode(helper, helperHasSideEffects, pResolvedToken->hClass, TYP_REF, opHandle);
#ifdef FEATURE_READYTORUN
if (usingReadyToRunHelper)
{
assert(lookup.addr != nullptr);
allocObj->gtEntryPoint = lookup;
}
#endif
return allocObj;
}
/*****************************************************************************
*
* Clones the given tree value and returns a copy of the given tree.
* If 'complexOK' is false, the cloning is only done provided the tree
* is not too complex (whatever that may mean);
* If 'complexOK' is true, we try slightly harder to clone the tree.
* In either case, NULL is returned if the tree cannot be cloned
*
* Note that there is the function gtCloneExpr() which does a more
* complete job if you can't handle this function failing.
*/
GenTree* Compiler::gtClone(GenTree* tree, bool complexOK)
{
GenTree* copy;
switch (tree->gtOper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy = gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = new (this, GT_CNS_INT)
GenTreeIntCon(tree->gtType, tree->AsIntCon()->gtIconVal, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
break;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
break;
case GT_LCL_VAR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVarCommon()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
break;
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = new (this, tree->OperGet())
GenTreeLclFld(tree->OperGet(), tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
break;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->gtType, tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
break;
default:
if (!complexOK)
{
return nullptr;
}
if (tree->gtOper == GT_FIELD)
{
GenTree* objp = nullptr;
if (tree->AsField()->GetFldObj() != nullptr)
{
objp = gtClone(tree->AsField()->GetFldObj(), false);
if (objp == nullptr)
{
return nullptr;
}
}
copy = gtNewFieldRef(tree->TypeGet(), tree->AsField()->gtFldHnd, objp, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
}
else if (tree->OperIs(GT_ADD, GT_SUB))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
if (op1->OperIsLeaf() && op2->OperIsLeaf())
{
op1 = gtClone(op1);
if (op1 == nullptr)
{
return nullptr;
}
op2 = gtClone(op2);
if (op2 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(tree->OperGet(), tree->TypeGet(), op1, op2);
}
else
{
return nullptr;
}
}
else if (tree->gtOper == GT_ADDR)
{
GenTree* op1 = gtClone(tree->AsOp()->gtOp1);
if (op1 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(GT_ADDR, tree->TypeGet(), op1);
}
else
{
return nullptr;
}
break;
}
copy->gtFlags |= tree->gtFlags & ~GTF_NODE_MASK;
#if defined(DEBUG)
copy->gtDebugFlags |= tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK;
#endif // defined(DEBUG)
return copy;
}
//------------------------------------------------------------------------
// gtCloneExpr: Create a copy of `tree`, adding flags `addFlags`, mapping
// local `varNum` to int constant `varVal` if it appears at
// the root, and mapping uses of local `deepVarNum` to constant
// `deepVarVal` if they occur beyond the root.
//
// Arguments:
// tree - GenTree to create a copy of
// addFlags - GTF_* flags to add to the copied tree nodes
// varNum - lclNum to replace at the root, or ~0 for no root replacement
// varVal - If replacing at root, replace local `varNum` with IntCns `varVal`
// deepVarNum - lclNum to replace uses of beyond the root, or ~0 for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Return Value:
// A copy of the given tree with the replacements and added flags specified.
//
// Notes:
// Top-level callers should generally call the overload that doesn't have
// the explicit `deepVarNum` and `deepVarVal` parameters; those are used in
// recursive invocations to avoid replacing defs.
GenTree* Compiler::gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal)
{
if (tree == nullptr)
{
return nullptr;
}
/* Figure out what kind of a node we have */
genTreeOps oper = tree->OperGet();
unsigned kind = tree->OperKind();
GenTree* copy;
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy =
gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = gtNewIconNode(tree->AsIntCon()->gtIconVal, tree->gtType);
#ifdef DEBUG
copy->AsIntCon()->gtTargetHandle = tree->AsIntCon()->gtTargetHandle;
#endif
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->AsIntCon()->gtFieldSeq = tree->AsIntCon()->gtFieldSeq;
}
goto DONE;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
goto DONE;
case GT_CNS_DBL:
copy = gtNewDconNode(tree->AsDblCon()->gtDconVal);
copy->gtType = tree->gtType; // keep the same type
goto DONE;
case GT_CNS_STR:
copy = gtNewSconNode(tree->AsStrCon()->gtSconCPX, tree->AsStrCon()->gtScpHnd);
goto DONE;
case GT_LCL_VAR:
if (tree->AsLclVarCommon()->GetLclNum() == varNum)
{
copy = gtNewIconNode(varVal, tree->gtType);
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
copy->LabelIndex(this);
}
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVar()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
copy->AsLclVarCommon()->SetSsaNum(tree->AsLclVarCommon()->GetSsaNum());
}
goto DONE;
case GT_LCL_FLD:
if (tree->AsLclFld()->GetLclNum() == varNum)
{
IMPL_LIMITATION("replacing GT_LCL_FLD with a constant");
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy =
new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
copy->gtFlags = tree->gtFlags;
}
goto DONE;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->TypeGet(), tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
goto DONE;
case GT_RET_EXPR:
// GT_RET_EXPR is unique node, that contains a link to a gtInlineCandidate node,
// that is part of another statement. We cannot clone both here and cannot
// create another GT_RET_EXPR that points to the same gtInlineCandidate.
NO_WAY("Cloning of GT_RET_EXPR node not supported");
goto DONE;
case GT_MEMORYBARRIER:
copy = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
goto DONE;
case GT_ARGPLACE:
copy = gtNewArgPlaceHolderNode(tree->gtType, tree->AsArgPlace()->gtArgPlaceClsHnd);
goto DONE;
case GT_FTN_ADDR:
copy = new (this, oper) GenTreeFptrVal(tree->gtType, tree->AsFptrVal()->gtFptrMethod);
#ifdef FEATURE_READYTORUN
copy->AsFptrVal()->gtEntryPoint = tree->AsFptrVal()->gtEntryPoint;
#endif
goto DONE;
case GT_CATCH_ARG:
case GT_NO_OP:
case GT_LABEL:
copy = new (this, oper) GenTree(oper, tree->gtType);
goto DONE;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_JMP:
copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1);
goto DONE;
case GT_LCL_VAR_ADDR:
copy = new (this, oper) GenTreeLclVar(oper, tree->TypeGet(), tree->AsLclVar()->GetLclNum());
goto DONE;
case GT_LCL_FLD_ADDR:
copy = new (this, oper)
GenTreeLclFld(oper, tree->TypeGet(), tree->AsLclFld()->GetLclNum(), tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
goto DONE;
default:
NO_WAY("Cloning of node not supported");
goto DONE;
}
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
/* If necessary, make sure we allocate a "fat" tree node */
CLANG_FORMAT_COMMENT_ANCHOR;
switch (oper)
{
/* These nodes sometimes get bashed to "fat" ones */
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
// In the implementation of gtNewLargeOperNode you have
// to give an oper that will create a small node,
// otherwise it asserts.
//
if (GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1,
tree->OperIsBinary() ? tree->AsOp()->gtOp2 : nullptr);
}
else // Always a large tree
{
if (tree->OperIsBinary())
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
else
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1);
}
}
break;
case GT_CAST:
copy = new (this, LargeOpOpcode())
GenTreeCast(tree->TypeGet(), tree->AsCast()->CastOp(), tree->IsUnsigned(),
tree->AsCast()->gtCastType DEBUGARG(/*largeNode*/ TRUE));
break;
case GT_INDEX:
{
GenTreeIndex* asInd = tree->AsIndex();
copy = new (this, GT_INDEX)
GenTreeIndex(asInd->TypeGet(), asInd->Arr(), asInd->Index(), asInd->gtIndElemSize);
copy->AsIndex()->gtStructElemClass = asInd->gtStructElemClass;
}
break;
case GT_INDEX_ADDR:
{
GenTreeIndexAddr* asIndAddr = tree->AsIndexAddr();
copy = new (this, GT_INDEX_ADDR)
GenTreeIndexAddr(asIndAddr->Arr(), asIndAddr->Index(), asIndAddr->gtElemType,
asIndAddr->gtStructElemClass, asIndAddr->gtElemSize, asIndAddr->gtLenOffset,
asIndAddr->gtElemOffset);
copy->AsIndexAddr()->gtIndRngFailBB = asIndAddr->gtIndRngFailBB;
}
break;
case GT_ALLOCOBJ:
{
GenTreeAllocObj* asAllocObj = tree->AsAllocObj();
copy = new (this, GT_ALLOCOBJ)
GenTreeAllocObj(tree->TypeGet(), asAllocObj->gtNewHelper, asAllocObj->gtHelperHasSideEffects,
asAllocObj->gtAllocObjClsHnd, asAllocObj->gtOp1);
#ifdef FEATURE_READYTORUN
copy->AsAllocObj()->gtEntryPoint = asAllocObj->gtEntryPoint;
#endif
}
break;
case GT_RUNTIMELOOKUP:
{
GenTreeRuntimeLookup* asRuntimeLookup = tree->AsRuntimeLookup();
copy = new (this, GT_RUNTIMELOOKUP)
GenTreeRuntimeLookup(asRuntimeLookup->gtHnd, asRuntimeLookup->gtHndType, asRuntimeLookup->gtOp1);
}
break;
case GT_ARR_LENGTH:
copy = gtNewArrLen(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsArrLen()->ArrLenOffset(), nullptr);
break;
case GT_ARR_INDEX:
copy = new (this, GT_ARR_INDEX)
GenTreeArrIndex(tree->TypeGet(),
gtCloneExpr(tree->AsArrIndex()->ArrObj(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrIndex()->IndexExpr(), addFlags, deepVarNum, deepVarVal),
tree->AsArrIndex()->gtCurrDim, tree->AsArrIndex()->gtArrRank,
tree->AsArrIndex()->gtArrElemType);
break;
case GT_QMARK:
copy = new (this, GT_QMARK)
GenTreeQmark(tree->TypeGet(), tree->AsOp()->gtGetOp1(), tree->AsOp()->gtGetOp2()->AsColon());
break;
case GT_OBJ:
copy =
new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->AsObj()->Addr(), tree->AsObj()->GetLayout());
break;
case GT_BLK:
copy = new (this, GT_BLK)
GenTreeBlk(GT_BLK, tree->TypeGet(), tree->AsBlk()->Addr(), tree->AsBlk()->GetLayout());
break;
case GT_FIELD:
copy = new (this, GT_FIELD) GenTreeField(tree->TypeGet(), tree->AsField()->GetFldObj(),
tree->AsField()->gtFldHnd, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
break;
case GT_BOX:
copy = new (this, GT_BOX)
GenTreeBox(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsBox()->gtAsgStmtWhenInlinedBoxValue,
tree->AsBox()->gtCopyStmtWhenInlinedBoxValue);
break;
case GT_INTRINSIC:
copy = new (this, GT_INTRINSIC)
GenTreeIntrinsic(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2,
tree->AsIntrinsic()->gtIntrinsicName, tree->AsIntrinsic()->gtMethodHandle);
#ifdef FEATURE_READYTORUN
copy->AsIntrinsic()->gtEntryPoint = tree->AsIntrinsic()->gtEntryPoint;
#endif
break;
case GT_BOUNDS_CHECK:
copy = new (this, GT_BOUNDS_CHECK)
GenTreeBoundsChk(tree->AsBoundsChk()->GetIndex(), tree->AsBoundsChk()->GetArrayLength(),
tree->AsBoundsChk()->gtThrowKind);
copy->AsBoundsChk()->gtIndRngFailBB = tree->AsBoundsChk()->gtIndRngFailBB;
break;
case GT_LEA:
{
GenTreeAddrMode* addrModeOp = tree->AsAddrMode();
copy = new (this, GT_LEA)
GenTreeAddrMode(addrModeOp->TypeGet(), addrModeOp->Base(), addrModeOp->Index(), addrModeOp->gtScale,
static_cast<unsigned>(addrModeOp->Offset()));
}
break;
case GT_COPY:
case GT_RELOAD:
{
copy = new (this, oper) GenTreeCopyOrReload(oper, tree->TypeGet(), tree->gtGetOp1());
}
break;
default:
assert(!GenTree::IsExOp(tree->OperKind()) && tree->OperIsSimple());
// We're in the SimpleOp case, so it's always unary or binary.
if (GenTree::OperIsUnary(tree->OperGet()))
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, /*doSimplifications*/ false);
}
else
{
assert(GenTree::OperIsBinary(tree->OperGet()));
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
break;
}
// Some flags are conceptually part of the gtOper, and should be copied immediately.
if (tree->gtOverflowEx())
{
copy->gtFlags |= GTF_OVERFLOW;
}
if (tree->AsOp()->gtOp1)
{
if (tree->gtOper == GT_ASG)
{
// Don't replace varNum if it appears as the LHS of an assign.
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, -1, 0, deepVarNum, deepVarVal);
}
else
{
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, deepVarNum, deepVarVal);
}
}
if (tree->gtGetOp2IfPresent())
{
copy->AsOp()->gtOp2 = gtCloneExpr(tree->AsOp()->gtOp2, addFlags, deepVarNum, deepVarVal);
}
/* Flags */
addFlags |= tree->gtFlags;
// Copy any node annotations, if necessary.
switch (tree->gtOper)
{
case GT_STOREIND:
case GT_IND:
case GT_OBJ:
case GT_STORE_OBJ:
{
ArrayInfo arrInfo;
if (!tree->AsIndir()->gtOp1->OperIs(GT_INDEX_ADDR) && TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
GetArrayInfoMap()->Set(copy, arrInfo);
}
}
break;
default:
break;
}
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
// Effects flags propagate upwards.
if (copy->AsOp()->gtOp1 != nullptr)
{
copy->gtFlags |= (copy->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT);
}
if (copy->gtGetOp2IfPresent() != nullptr)
{
copy->gtFlags |= (copy->gtGetOp2()->gtFlags & GTF_ALL_EFFECT);
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
// We can't safely clone calls that have GT_RET_EXPRs via gtCloneExpr.
// You must use gtCloneCandidateCall for these calls (and then do appropriate other fixup)
if (tree->AsCall()->IsInlineCandidate() || tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
NO_WAY("Cloning of calls with associated GT_RET_EXPR nodes is not supported");
}
copy = gtCloneExprCallHelper(tree->AsCall(), addFlags, deepVarNum, deepVarVal);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
copy = new (this, GT_SIMD)
GenTreeSIMD(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsSIMD()),
tree->AsSIMD()->GetSIMDIntrinsicId(), tree->AsSIMD()->GetSimdBaseJitType(),
tree->AsSIMD()->GetSimdSize());
goto CLONE_MULTIOP_OPERANDS;
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
copy = new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()),
tree->AsHWIntrinsic()->GetHWIntrinsicId(),
tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize(),
tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic());
copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType());
goto CLONE_MULTIOP_OPERANDS;
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
CLONE_MULTIOP_OPERANDS:
for (GenTree** use : copy->AsMultiOp()->UseEdges())
{
*use = gtCloneExpr(*use, addFlags, deepVarNum, deepVarVal);
}
break;
#endif
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
inds[dim] = gtCloneExpr(arrElem->gtArrInds[dim], addFlags, deepVarNum, deepVarVal);
}
copy = new (this, GT_ARR_ELEM)
GenTreeArrElem(arrElem->TypeGet(), gtCloneExpr(arrElem->gtArrObj, addFlags, deepVarNum, deepVarVal),
arrElem->gtArrRank, arrElem->gtArrElemSize, arrElem->gtArrElemType, &inds[0]);
}
break;
case GT_ARR_OFFSET:
{
copy = new (this, GT_ARR_OFFSET)
GenTreeArrOffs(tree->TypeGet(),
gtCloneExpr(tree->AsArrOffs()->gtOffset, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtIndex, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtArrObj, addFlags, deepVarNum, deepVarVal),
tree->AsArrOffs()->gtCurrDim, tree->AsArrOffs()->gtArrRank,
tree->AsArrOffs()->gtArrElemType);
}
break;
case GT_PHI:
{
copy = new (this, GT_PHI) GenTreePhi(tree->TypeGet());
GenTreePhi::Use** prevUse = ©->AsPhi()->gtUses;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
*prevUse = new (this, CMK_ASTNode)
GenTreePhi::Use(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal), *prevUse);
prevUse = &((*prevUse)->NextRef());
}
}
break;
case GT_FIELD_LIST:
copy = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
copy->AsFieldList()->AddField(this, gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal),
use.GetOffset(), use.GetType());
}
break;
case GT_CMPXCHG:
copy = new (this, GT_CMPXCHG)
GenTreeCmpXchg(tree->TypeGet(),
gtCloneExpr(tree->AsCmpXchg()->gtOpLocation, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpValue, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpComparand, addFlags, deepVarNum, deepVarVal));
break;
case GT_STORE_DYN_BLK:
copy = new (this, oper)
GenTreeStoreDynBlk(gtCloneExpr(tree->AsStoreDynBlk()->Addr(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->Data(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->gtDynamicSize, addFlags, deepVarNum, deepVarVal));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
NO_WAY("unexpected operator");
}
DONE:
// If it has a zero-offset field seq, copy annotation.
if (tree->TypeGet() == TYP_BYREF)
{
FieldSeqNode* fldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &fldSeq))
{
fgAddFieldSeqForZeroOffset(copy, fldSeq);
}
}
copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
/* Compute the flags for the copied node. Note that we can do this only
if we didnt gtFoldExpr(copy) */
if (copy->gtOper == oper)
{
addFlags |= tree->gtFlags;
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
copy->gtFlags |= addFlags;
// Update side effect flags since they may be different from the source side effect flags.
// For example, we may have replaced some locals with constants and made indirections non-throwing.
gtUpdateNodeSideEffects(copy);
}
/* GTF_COLON_COND should be propagated from 'tree' to 'copy' */
copy->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
#if defined(DEBUG)
// Non-node debug flags should be propagated from 'tree' to 'copy'
copy->gtDebugFlags |= (tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
/* Make sure to copy back fields that may have been initialized */
copy->CopyRawCosts(tree);
copy->gtRsvdRegs = tree->gtRsvdRegs;
copy->CopyReg(tree);
return copy;
}
//------------------------------------------------------------------------
// gtCloneExprCallHelper: clone a call tree
//
// Notes:
// Do not invoke this method directly, instead call either gtCloneExpr
// or gtCloneCandidateCall, as appropriate.
//
// Arguments:
// tree - the call to clone
// addFlags - GTF_* flags to add to the copied tree nodes
// deepVarNum - lclNum to replace uses of beyond the root, or BAD_VAR_NUM for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneExprCallHelper(GenTreeCall* tree,
GenTreeFlags addFlags,
unsigned deepVarNum,
int deepVarVal)
{
GenTreeCall* copy = new (this, GT_CALL) GenTreeCall(tree->TypeGet());
if (tree->gtCallThisArg == nullptr)
{
copy->gtCallThisArg = nullptr;
}
else
{
copy->gtCallThisArg =
gtNewCallArgs(gtCloneExpr(tree->gtCallThisArg->GetNode(), addFlags, deepVarNum, deepVarVal));
}
copy->gtCallMoreFlags = tree->gtCallMoreFlags;
copy->gtCallArgs = nullptr;
copy->gtCallLateArgs = nullptr;
GenTreeCall::Use** argsTail = ©->gtCallArgs;
for (GenTreeCall::Use& use : tree->Args())
{
*argsTail = gtNewCallArgs(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal));
argsTail = &((*argsTail)->NextRef());
}
argsTail = ©->gtCallLateArgs;
for (GenTreeCall::Use& use : tree->LateArgs())
{
*argsTail = gtNewCallArgs(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal));
argsTail = &((*argsTail)->NextRef());
}
// The call sig comes from the EE and doesn't change throughout the compilation process, meaning
// we only really need one physical copy of it. Therefore a shallow pointer copy will suffice.
// (Note that this still holds even if the tree we are cloning was created by an inlinee compiler,
// because the inlinee still uses the inliner's memory allocator anyway.)
INDEBUG(copy->callSig = tree->callSig;)
// The tail call info does not change after it is allocated, so for the same reasons as above
// a shallow copy suffices.
copy->tailCallInfo = tree->tailCallInfo;
copy->gtRetClsHnd = tree->gtRetClsHnd;
copy->gtControlExpr = gtCloneExpr(tree->gtControlExpr, addFlags, deepVarNum, deepVarVal);
copy->gtStubCallStubAddr = tree->gtStubCallStubAddr;
/* Copy the union */
if (tree->gtCallType == CT_INDIRECT)
{
copy->gtCallCookie =
tree->gtCallCookie ? gtCloneExpr(tree->gtCallCookie, addFlags, deepVarNum, deepVarVal) : nullptr;
copy->gtCallAddr = tree->gtCallAddr ? gtCloneExpr(tree->gtCallAddr, addFlags, deepVarNum, deepVarVal) : nullptr;
}
else
{
copy->gtCallMethHnd = tree->gtCallMethHnd;
copy->gtInlineCandidateInfo = tree->gtInlineCandidateInfo;
}
copy->gtCallType = tree->gtCallType;
copy->gtReturnType = tree->gtReturnType;
if (tree->fgArgInfo)
{
// Create and initialize the fgArgInfo for our copy of the call tree
copy->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(copy, tree);
}
else
{
copy->fgArgInfo = nullptr;
}
#if FEATURE_MULTIREG_RET
copy->gtReturnTypeDesc = tree->gtReturnTypeDesc;
#endif
#ifdef FEATURE_READYTORUN
copy->setEntryPoint(tree->gtEntryPoint);
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
copy->gtInlineObservation = tree->gtInlineObservation;
copy->gtRawILOffset = tree->gtRawILOffset;
copy->gtInlineContext = tree->gtInlineContext;
#endif
copy->CopyOtherRegFlags(tree);
// We keep track of the number of no return calls, so if we've cloned
// one of these, update the tracking.
//
if (tree->IsNoReturn())
{
assert(copy->IsNoReturn());
setMethodHasNoReturnCalls();
}
return copy;
}
//------------------------------------------------------------------------
// gtCloneCandidateCall: clone a call that is an inline or guarded
// devirtualization candidate (~ any call that can have a GT_RET_EXPR)
//
// Notes:
// If the call really is a candidate, the caller must take additional steps
// after cloning to re-establish candidate info and the relationship between
// the candidate and any associated GT_RET_EXPR.
//
// Arguments:
// call - the call to clone
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneCandidateCall(GenTreeCall* call)
{
assert(call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate());
GenTreeCall* result = gtCloneExprCallHelper(call);
// There is some common post-processing in gtCloneExpr that we reproduce
// here, for the fields that make sense for candidate calls.
result->gtFlags |= call->gtFlags;
#if defined(DEBUG)
result->gtDebugFlags |= (call->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
result->CopyReg(call);
return result;
}
//------------------------------------------------------------------------
// gtUpdateSideEffects: Update the side effects of a tree and its ancestors
//
// Arguments:
// stmt - The tree's statement
// tree - Tree to update the side effects for
//
// Note: If tree's order hasn't been established, the method updates side effect
// flags on all statement's nodes.
void Compiler::gtUpdateSideEffects(Statement* stmt, GenTree* tree)
{
if (fgStmtListThreaded)
{
gtUpdateTreeAncestorsSideEffects(tree);
}
else
{
gtUpdateStmtSideEffects(stmt);
}
}
//------------------------------------------------------------------------
// gtUpdateTreeAncestorsSideEffects: Update the side effects of a tree and its ancestors
// when statement order has been established.
//
// Arguments:
// tree - Tree to update the side effects for
//
void Compiler::gtUpdateTreeAncestorsSideEffects(GenTree* tree)
{
assert(fgStmtListThreaded);
while (tree != nullptr)
{
gtUpdateNodeSideEffects(tree);
tree = tree->gtGetParent(nullptr);
}
}
//------------------------------------------------------------------------
// gtUpdateStmtSideEffects: Update the side effects for statement tree nodes.
//
// Arguments:
// stmt - The statement to update side effects on
//
void Compiler::gtUpdateStmtSideEffects(Statement* stmt)
{
fgWalkTree(stmt->GetRootNodePointer(), fgUpdateSideEffectsPre, fgUpdateSideEffectsPost);
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffects: Update the side effects based on the node operation.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
// The caller of this method is expected to update the flags based on the children's flags.
//
void Compiler::gtUpdateNodeOperSideEffects(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
tree->gtFlags &= ~GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
tree->gtFlags &= ~GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffectsPost: Update the side effects based on the node operation,
// in the post-order visit of a tree walk. It is expected that the pre-order visit cleared
// the bits, so the post-order visit only sets them. This is important for binary nodes
// where one child already may have set the GTF_EXCEPT bit. Note that `SetIndirExceptionFlags`
// looks at its child, which is why we need to do this in a bottom-up walk.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_ASG, GTF_CALL, and GTF_EXCEPT flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeOperSideEffectsPost(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeSideEffects: Update the side effects based on the node operation and
// children's side efects.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeSideEffects(GenTree* tree)
{
gtUpdateNodeOperSideEffects(tree);
tree->VisitOperands([tree](GenTree* operand) -> GenTree::VisitResult {
tree->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
return GenTree::VisitResult::Continue;
});
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPre: Update the side effects based on the tree operation.
// The pre-visit walk clears GTF_ASG, GTF_CALL, and GTF_EXCEPT; the post-visit walk sets
// the bits as necessary.
//
// Arguments:
// pTree - Pointer to the tree to update the side effects
// fgWalkPre - Walk data
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPre(GenTree** pTree, fgWalkData* fgWalkPre)
{
GenTree* tree = *pTree;
tree->gtFlags &= ~(GTF_ASG | GTF_CALL | GTF_EXCEPT);
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPost: Update the side effects of the node and parent based on the tree's flags.
//
// Arguments:
// pTree - Pointer to the tree
// fgWalkPost - Walk data
//
// Notes:
// The routine is used for updating the stale side effect flags for ancestor
// nodes starting from treeParent up to the top-level stmt expr.
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPost(GenTree** pTree, fgWalkData* fgWalkPost)
{
GenTree* tree = *pTree;
// Update the node's side effects first.
fgWalkPost->compiler->gtUpdateNodeOperSideEffectsPost(tree);
// If this node is an indir or array length, and it doesn't have the GTF_EXCEPT bit set, we
// set the GTF_IND_NONFAULTING bit. This needs to be done after all children, and this node, have
// been processed.
if (tree->OperIsIndirOrArrLength() && ((tree->gtFlags & GTF_EXCEPT) == 0))
{
tree->gtFlags |= GTF_IND_NONFAULTING;
}
// Then update the parent's side effects based on this node.
GenTree* parent = fgWalkPost->parent;
if (parent != nullptr)
{
parent->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
}
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// gtGetThisArg: Return this pointer node for the call.
//
// Arguments:
// call - the call node with a this argument.
//
// Return value:
// the this pointer node.
//
GenTree* Compiler::gtGetThisArg(GenTreeCall* call)
{
assert(call->gtCallThisArg != nullptr);
GenTree* thisArg = call->gtCallThisArg->GetNode();
if (!thisArg->OperIs(GT_ASG))
{
if ((thisArg->gtFlags & GTF_LATE_ARG) == 0)
{
return thisArg;
}
}
assert(call->gtCallLateArgs != nullptr);
unsigned argNum = 0;
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, argNum);
GenTree* result = thisArgTabEntry->GetNode();
// Assert if we used DEBUG_DESTROY_NODE.
assert(result->gtOper != GT_COUNT);
return result;
}
bool GenTree::gtSetFlags() const
{
//
// When FEATURE_SET_FLAGS (TARGET_ARM) is active the method returns true
// when the gtFlags has the flag GTF_SET_FLAGS set
// otherwise the architecture will be have instructions that typically set
// the flags and this method will return true.
//
// Exceptions: GT_IND (load/store) is not allowed to set the flags
// and on XARCH the GT_MUL/GT_DIV and all overflow instructions
// do not set the condition flags
//
// Precondition we have a GTK_SMPOP
//
if (!varTypeIsIntegralOrI(TypeGet()) && (TypeGet() != TYP_VOID))
{
return false;
}
if (((gtFlags & GTF_SET_FLAGS) != 0) && (gtOper != GT_IND))
{
// GTF_SET_FLAGS is not valid on GT_IND and is overlaid with GTF_NONFAULTING_IND
return true;
}
else
{
return false;
}
}
bool GenTree::gtRequestSetFlags()
{
bool result = false;
#if FEATURE_SET_FLAGS
// This method is a Nop unless FEATURE_SET_FLAGS is defined
// In order to set GTF_SET_FLAGS
// we must have a GTK_SMPOP
// and we have a integer or machine size type (not floating point or TYP_LONG on 32-bit)
//
if (!OperIsSimple())
return false;
if (!varTypeIsIntegralOrI(TypeGet()))
return false;
switch (gtOper)
{
case GT_IND:
case GT_ARR_LENGTH:
// These will turn into simple load from memory instructions
// and we can't force the setting of the flags on load from memory
break;
case GT_MUL:
case GT_DIV:
// These instructions don't set the flags (on x86/x64)
//
break;
default:
// Otherwise we can set the flags for this gtOper
// and codegen must set the condition flags.
//
gtFlags |= GTF_SET_FLAGS;
result = true;
break;
}
#endif // FEATURE_SET_FLAGS
// Codegen for this tree must set the condition flags if
// this method returns true.
//
return result;
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator()
: m_advance(nullptr), m_node(nullptr), m_edge(nullptr), m_statePtr(nullptr), m_state(-1)
{
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
: m_advance(nullptr), m_node(node), m_edge(nullptr), m_statePtr(nullptr), m_state(0)
{
assert(m_node != nullptr);
// NOTE: the switch statement below must be updated when introducing new nodes.
switch (m_node->OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
m_state = -1;
return;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
case GT_RETURNTRAP:
m_edge = &m_node->AsUnOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
return;
// Unary operators with an optional operand
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
if (m_node->AsUnOp()->gtOp1 == nullptr)
{
assert(m_node->NullOp1Legal());
m_state = -1;
}
else
{
m_edge = &m_node->AsUnOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
return;
// Variadic nodes
#ifdef FEATURE_SIMD
case GT_SIMD:
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
SetEntryStateForMultiOp();
return;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// LEA, which may have no first operand
case GT_LEA:
if (m_node->AsAddrMode()->gtOp1 == nullptr)
{
m_edge = &m_node->AsAddrMode()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else
{
SetEntryStateForBinOp();
}
return;
// Special nodes
case GT_FIELD_LIST:
m_statePtr = m_node->AsFieldList()->Uses().GetHead();
m_advance = &GenTreeUseEdgeIterator::AdvanceFieldList;
AdvanceFieldList();
return;
case GT_PHI:
m_statePtr = m_node->AsPhi()->gtUses;
m_advance = &GenTreeUseEdgeIterator::AdvancePhi;
AdvancePhi();
return;
case GT_CMPXCHG:
m_edge = &m_node->AsCmpXchg()->gtOpLocation;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceCmpXchg;
return;
case GT_ARR_ELEM:
m_edge = &m_node->AsArrElem()->gtArrObj;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrElem;
return;
case GT_ARR_OFFSET:
m_edge = &m_node->AsArrOffs()->gtOffset;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrOffset;
return;
case GT_STORE_DYN_BLK:
m_edge = &m_node->AsStoreDynBlk()->Addr();
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceStoreDynBlk;
return;
case GT_CALL:
AdvanceCall<CALL_INSTANCE>();
return;
// Binary nodes
default:
assert(m_node->OperIsBinary());
SetEntryStateForBinOp();
return;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCmpXchg: produces the next operand of a CmpXchg node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceCmpXchg()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsCmpXchg()->gtOpValue;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsCmpXchg()->gtOpComparand;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrElem: produces the next operand of a ArrElem node and advances the state.
//
// Because these nodes are variadic, this function uses `m_state` to index into the list of array indices.
//
void GenTreeUseEdgeIterator::AdvanceArrElem()
{
if (m_state < m_node->AsArrElem()->gtArrRank)
{
m_edge = &m_node->AsArrElem()->gtArrInds[m_state];
assert(*m_edge != nullptr);
m_state++;
}
else
{
m_state = -1;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrOffset: produces the next operand of a ArrOffset node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceArrOffset()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsArrOffs()->gtIndex;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsArrOffs()->gtArrObj;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceStoreDynBlk: produces the next operand of a StoreDynBlk node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceStoreDynBlk()
{
GenTreeStoreDynBlk* const dynBlock = m_node->AsStoreDynBlk();
switch (m_state)
{
case 0:
m_edge = &dynBlock->Data();
m_state = 1;
break;
case 1:
m_edge = &dynBlock->gtDynamicSize;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceFieldList: produces the next operand of a FieldList node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceFieldList()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreeFieldList::Use* currentUse = static_cast<GenTreeFieldList::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvancePhi: produces the next operand of a Phi node and advances the state.
//
void GenTreeUseEdgeIterator::AdvancePhi()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreePhi::Use* currentUse = static_cast<GenTreePhi::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceBinOp: produces the next operand of a binary node and advances the state.
//
// This function must be instantiated s.t. `ReverseOperands` is `true` iff the node is marked with the
// `GTF_REVERSE_OPS` flag.
//
template <bool ReverseOperands>
void GenTreeUseEdgeIterator::AdvanceBinOp()
{
assert(ReverseOperands == ((m_node->gtFlags & GTF_REVERSE_OPS) != 0));
m_edge = !ReverseOperands ? &m_node->AsOp()->gtOp2 : &m_node->AsOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForBinOp: produces the first operand of a binary node and chooses
// the appropriate advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForBinOp()
{
assert(m_node != nullptr);
assert(m_node->OperIsBinary());
GenTreeOp* const node = m_node->AsOp();
if (node->gtOp2 == nullptr)
{
assert(node->gtOp1 != nullptr);
assert(node->NullOp2Legal());
m_edge = &node->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else if ((node->gtFlags & GTF_REVERSE_OPS) != 0)
{
m_edge = &m_node->AsOp()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<true>;
}
else
{
m_edge = &m_node->AsOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<false>;
}
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceMultiOp: produces the next operand of a multi-op node and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// incrementing the "m_edge" pointer, unless the end, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
m_edge++;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceReversedMultiOp: produces the next operand of a multi-op node
// marked with GTF_REVRESE_OPS and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// decrementing the "m_edge" pointer, unless the beginning, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceReversedMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
assert((m_node->AsMultiOp()->GetOperandCount() == 2) && m_node->IsReverseOp());
m_edge--;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForMultiOp: produces the first operand of a multi-op node and sets the
// required advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForMultiOp()
{
size_t operandCount = m_node->AsMultiOp()->GetOperandCount();
if (operandCount == 0)
{
Terminate();
}
else
{
if (m_node->IsReverseOp())
{
assert(operandCount == 2);
m_edge = m_node->AsMultiOp()->GetOperandArray() + 1;
m_statePtr = m_node->AsMultiOp()->GetOperandArray() - 1;
m_advance = &GenTreeUseEdgeIterator::AdvanceReversedMultiOp;
}
else
{
m_edge = m_node->AsMultiOp()->GetOperandArray();
m_statePtr = m_node->AsMultiOp()->GetOperandArray(operandCount);
m_advance = &GenTreeUseEdgeIterator::AdvanceMultiOp;
}
}
}
#endif
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCall: produces the next operand of a call node and advances the state.
//
// This function is a bit tricky: in order to avoid doing unnecessary work, it is instantiated with the
// state number the iterator will be in when it is called. For example, `AdvanceCall<CALL_INSTANCE>`
// is the instantiation used when the iterator is at the `CALL_INSTANCE` state (i.e. the entry state).
// This sort of templating allows each state to avoid processing earlier states without unnecessary
// duplication of code.
//
// Note that this method expands the argument lists (`gtCallArgs` and `gtCallLateArgs`) into their
// component operands.
//
template <int state>
void GenTreeUseEdgeIterator::AdvanceCall()
{
GenTreeCall* const call = m_node->AsCall();
switch (state)
{
case CALL_INSTANCE:
m_statePtr = call->gtCallArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ARGS>;
if (call->gtCallThisArg != nullptr)
{
m_edge = &call->gtCallThisArg->NodeRef();
return;
}
FALLTHROUGH;
case CALL_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_statePtr = call->gtCallLateArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_LATE_ARGS>;
FALLTHROUGH;
case CALL_LATE_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_CONTROL_EXPR>;
FALLTHROUGH;
case CALL_CONTROL_EXPR:
if (call->gtControlExpr != nullptr)
{
if (call->gtCallType == CT_INDIRECT)
{
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_COOKIE>;
}
else
{
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
m_edge = &call->gtControlExpr;
return;
}
else if (call->gtCallType != CT_INDIRECT)
{
m_state = -1;
return;
}
FALLTHROUGH;
case CALL_COOKIE:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ADDRESS>;
if (call->gtCallCookie != nullptr)
{
m_edge = &call->gtCallCookie;
return;
}
FALLTHROUGH;
case CALL_ADDRESS:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::Terminate;
if (call->gtCallAddr != nullptr)
{
m_edge = &call->gtCallAddr;
}
return;
default:
unreached();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::Terminate: advances the iterator to the terminal state.
//
void GenTreeUseEdgeIterator::Terminate()
{
m_state = -1;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::operator++: advances the iterator to the next operand.
//
GenTreeUseEdgeIterator& GenTreeUseEdgeIterator::operator++()
{
// If we've reached the terminal state, do nothing.
if (m_state != -1)
{
(this->*m_advance)();
}
return *this;
}
GenTreeUseEdgeIterator GenTree::UseEdgesBegin()
{
return GenTreeUseEdgeIterator(this);
}
GenTreeUseEdgeIterator GenTree::UseEdgesEnd()
{
return GenTreeUseEdgeIterator();
}
IteratorPair<GenTreeUseEdgeIterator> GenTree::UseEdges()
{
return MakeIteratorPair(UseEdgesBegin(), UseEdgesEnd());
}
GenTreeOperandIterator GenTree::OperandsBegin()
{
return GenTreeOperandIterator(this);
}
GenTreeOperandIterator GenTree::OperandsEnd()
{
return GenTreeOperandIterator();
}
IteratorPair<GenTreeOperandIterator> GenTree::Operands()
{
return MakeIteratorPair(OperandsBegin(), OperandsEnd());
}
bool GenTree::Precedes(GenTree* other)
{
assert(other != nullptr);
for (GenTree* node = gtNext; node != nullptr; node = node->gtNext)
{
if (node == other)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------------
// SetIndirExceptionFlags : Set GTF_EXCEPT and GTF_IND_NONFAULTING flags as appropriate
// on an indirection or an array length node.
//
// Arguments:
// comp - compiler instance
//
void GenTree::SetIndirExceptionFlags(Compiler* comp)
{
assert(OperIsIndirOrArrLength());
if (OperMayThrow(comp))
{
gtFlags |= GTF_EXCEPT;
return;
}
GenTree* addr = nullptr;
if (OperIsIndir())
{
addr = AsIndir()->Addr();
}
else
{
assert(gtOper == GT_ARR_LENGTH);
addr = AsArrLen()->ArrRef();
}
if ((addr->gtFlags & GTF_EXCEPT) != 0)
{
gtFlags |= GTF_EXCEPT;
}
else
{
gtFlags &= ~GTF_EXCEPT;
gtFlags |= GTF_IND_NONFAULTING;
}
}
#ifdef DEBUG
/* static */ int GenTree::gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags)
{
int charsDisplayed = 11; // 11 is the "baseline" number of flag characters displayed
printf("%c", (flags & GTF_ASG) ? 'A' : (IsContained(flags) ? 'c' : '-'));
printf("%c", (flags & GTF_CALL) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-');
printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set
(flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-'
printf("%c", (flags & GTF_COLON_COND) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-');
#if FEATURE_SET_FLAGS
printf("%c", (flags & GTF_SET_FLAGS) ? 'S' : '-');
++charsDisplayed;
#endif
printf("%c", (flags & GTF_LATE_ARG) ? 'L' : '-');
printf("%c", (flags & GTF_SPILLED) ? 'z' : (flags & GTF_SPILL) ? 'Z' : '-');
return charsDisplayed;
}
#ifdef TARGET_X86
inline const char* GetCallConvName(CorInfoCallConvExtension callConv)
{
switch (callConv)
{
case CorInfoCallConvExtension::Managed:
return "Managed";
case CorInfoCallConvExtension::C:
return "C";
case CorInfoCallConvExtension::Stdcall:
return "Stdcall";
case CorInfoCallConvExtension::Thiscall:
return "Thiscall";
case CorInfoCallConvExtension::Fastcall:
return "Fastcall";
case CorInfoCallConvExtension::CMemberFunction:
return "CMemberFunction";
case CorInfoCallConvExtension::StdcallMemberFunction:
return "StdcallMemberFunction";
case CorInfoCallConvExtension::FastcallMemberFunction:
return "FastcallMemberFunction";
default:
return "UnknownCallConv";
}
}
#endif // TARGET_X86
/*****************************************************************************/
void Compiler::gtDispNodeName(GenTree* tree)
{
/* print the node name */
const char* name;
assert(tree);
if (tree->gtOper < GT_COUNT)
{
name = GenTree::OpName(tree->OperGet());
}
else
{
name = "<ERROR>";
}
char buf[32];
char* bufp = &buf[0];
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
sprintf_s(bufp, sizeof(buf), " %s(h)%c", name, 0);
}
else if (tree->gtOper == GT_PUTARG_STK)
{
sprintf_s(bufp, sizeof(buf), " %s [+0x%02x]%c", name, tree->AsPutArgStk()->getArgOffset(), 0);
}
else if (tree->gtOper == GT_CALL)
{
const char* callType = "CALL";
const char* gtfType = "";
const char* ctType = "";
char gtfTypeBuf[100];
if (tree->AsCall()->gtCallType == CT_USER_FUNC)
{
if (tree->AsCall()->IsVirtual())
{
callType = "CALLV";
}
}
else if (tree->AsCall()->gtCallType == CT_HELPER)
{
ctType = " help";
}
else if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
ctType = " ind";
}
else
{
assert(!"Unknown gtCallType");
}
if (tree->gtFlags & GTF_CALL_NULLCHECK)
{
gtfType = " nullcheck";
}
if (tree->AsCall()->IsVirtualVtable())
{
gtfType = " vt-ind";
}
else if (tree->AsCall()->IsVirtualStub())
{
gtfType = " stub";
}
#ifdef FEATURE_READYTORUN
else if (tree->AsCall()->IsR2RRelativeIndir())
{
gtfType = " r2r_ind";
}
#endif // FEATURE_READYTORUN
else if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
char* gtfTypeBufWalk = gtfTypeBuf;
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " unman");
if (tree->gtFlags & GTF_CALL_POP_ARGS)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " popargs");
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " thiscall");
}
#ifdef TARGET_X86
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " %s",
GetCallConvName(tree->AsCall()->GetUnmanagedCallConv()));
#endif // TARGET_X86
gtfType = gtfTypeBuf;
}
sprintf_s(bufp, sizeof(buf), " %s%s%s%c", callType, ctType, gtfType, 0);
}
else if (tree->gtOper == GT_ARR_ELEM)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
for (unsigned rank = tree->AsArrElem()->gtArrRank - 1; rank; rank--)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_ARR_OFFSET || tree->gtOper == GT_ARR_INDEX)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
unsigned char currDim;
unsigned char rank;
if (tree->gtOper == GT_ARR_OFFSET)
{
currDim = tree->AsArrOffs()->gtCurrDim;
rank = tree->AsArrOffs()->gtArrRank;
}
else
{
currDim = tree->AsArrIndex()->gtCurrDim;
rank = tree->AsArrIndex()->gtArrRank;
}
for (unsigned char dim = 0; dim < rank; dim++)
{
// Use a defacto standard i,j,k for the dimensions.
// Note that we only support up to rank 3 arrays with these nodes, so we won't run out of characters.
char dimChar = '*';
if (dim == currDim)
{
dimChar = 'i' + dim;
}
else if (dim > currDim)
{
dimChar = ' ';
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%c", dimChar);
if (dim != rank - 1)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_LEA)
{
GenTreeAddrMode* lea = tree->AsAddrMode();
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s(", name);
if (lea->Base() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+");
}
if (lea->Index() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale);
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%d)", lea->Offset());
}
else if (tree->gtOper == GT_BOUNDS_CHECK)
{
switch (tree->AsBoundsChk()->gtThrowKind)
{
case SCK_RNGCHK_FAIL:
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s_Rng", name);
if (tree->AsBoundsChk()->gtIndRngFailBB != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " -> " FMT_BB,
tree->AsBoundsChk()->gtIndRngFailBB->bbNum);
}
break;
}
case SCK_ARG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_Arg", name);
break;
case SCK_ARG_RNG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_ArgRng", name);
break;
default:
unreached();
}
}
else if (tree->gtOverflowEx())
{
sprintf_s(bufp, sizeof(buf), " %s_ovfl%c", name, 0);
}
else
{
sprintf_s(bufp, sizeof(buf), " %s%c", name, 0);
}
if (strlen(buf) < 10)
{
printf(" %-10s", buf);
}
else
{
printf(" %s", buf);
}
}
//------------------------------------------------------------------------
// gtDispZeroFieldSeq: If this node has a zero fieldSeq annotation
// then print this Field Sequence
//
void Compiler::gtDispZeroFieldSeq(GenTree* tree)
{
NodeToFieldSeqMap* map = GetZeroOffsetFieldMap();
// THe most common case is having no entries in this map
if (map->GetCount() > 0)
{
FieldSeqNode* fldSeq = nullptr;
if (map->Lookup(tree, &fldSeq))
{
printf(" Zero");
gtDispAnyFieldSeq(fldSeq);
}
}
}
//------------------------------------------------------------------------
// gtDispVN: Utility function that prints a tree's ValueNumber: gtVNPair
//
void Compiler::gtDispVN(GenTree* tree)
{
if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
{
assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);
printf(" ");
vnpPrint(tree->gtVNPair, 0);
}
}
//------------------------------------------------------------------------
// gtDispCommonEndLine
// Utility function that prints the following node information
// 1: The associated zero field sequence (if any)
// 2. The register assigned to this node (if any)
// 2. The value number assigned (if any)
// 3. A newline character
//
void Compiler::gtDispCommonEndLine(GenTree* tree)
{
gtDispZeroFieldSeq(tree);
gtDispRegVal(tree);
gtDispVN(tree);
printf("\n");
}
//------------------------------------------------------------------------
// gtDispNode: Print a tree to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// msg - a contextual method (i.e. from the parent) to print
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' may be null
void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_z_ const char* msg, bool isLIR)
{
bool printFlags = true; // always true..
int msgLength = 25;
GenTree* prev;
if (tree->gtSeqNum)
{
printf("N%03u ", tree->gtSeqNum);
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
printf("(???"
",???"
") "); // This probably indicates a bug: the node has a sequence number, but not costs.
}
}
else
{
prev = tree;
bool hasSeqNum = true;
unsigned dotNum = 0;
do
{
dotNum++;
prev = prev->gtPrev;
if ((prev == nullptr) || (prev == tree))
{
hasSeqNum = false;
break;
}
assert(prev);
} while (prev->gtSeqNum == 0);
// If we have an indent stack, don't add additional characters,
// as it will mess up the alignment.
bool displayDotNum = hasSeqNum && (indentStack == nullptr);
if (displayDotNum)
{
printf("N%03u.%02u ", prev->gtSeqNum, dotNum);
}
else
{
printf(" ");
}
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
if (displayDotNum)
{
// Do better alignment in this case
printf(" ");
}
else
{
printf(" ");
}
}
}
if (optValnumCSE_phase)
{
if (IS_CSE_INDEX(tree->gtCSEnum))
{
printf(FMT_CSE " (%s)", GET_CSE_INDEX(tree->gtCSEnum), (IS_CSE_USE(tree->gtCSEnum) ? "use" : "def"));
}
else
{
printf(" ");
}
}
/* Print the node ID */
printTreeID(tree);
printf(" ");
if (tree->gtOper >= GT_COUNT)
{
printf(" **** ILLEGAL NODE ****");
return;
}
if (printFlags)
{
/* First print the flags associated with the node */
switch (tree->gtOper)
{
case GT_LEA:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_IND:
// We prefer printing V or U
if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
{
if (tree->gtFlags & GTF_IND_TGTANYWHERE)
{
printf("*");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP)
{
printf("s");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_INVARIANT)
{
printf("#");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ARR_INDEX)
{
printf("a");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
printf("n"); // print a n for non-faulting
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ASG_LHS)
{
printf("D"); // print a D for definition
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONNULL)
{
printf("@");
--msgLength;
break;
}
}
FALLTHROUGH;
case GT_INDEX:
case GT_INDEX_ADDR:
case GT_FIELD:
case GT_CLS_VAR:
if (tree->gtFlags & GTF_IND_VOLATILE)
{
printf("V");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_UNALIGNED)
{
printf("U");
--msgLength;
break;
}
goto DASH;
case GT_ASG:
if (tree->OperIsInitBlkOp())
{
printf("I");
--msgLength;
break;
}
goto DASH;
case GT_CALL:
if (tree->AsCall()->IsInlineCandidate())
{
if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("&");
}
else
{
printf("I");
}
--msgLength;
break;
}
else if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("G");
--msgLength;
break;
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
{
printf("S");
--msgLength;
break;
}
if (tree->gtFlags & GTF_CALL_HOISTABLE)
{
printf("H");
--msgLength;
break;
}
goto DASH;
case GT_MUL:
#if !defined(TARGET_64BIT)
case GT_MUL_LONG:
#endif
if (tree->gtFlags & GTF_MUL_64RSLT)
{
printf("L");
--msgLength;
break;
}
goto DASH;
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (tree->gtFlags & GTF_DIV_BY_CNS_OPT)
{
printf("M"); // We will use a Multiply by reciprical
--msgLength;
break;
}
goto DASH;
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
case GT_STORE_LCL_VAR:
if (tree->gtFlags & GTF_VAR_USEASG)
{
printf("U");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_MULTIREG)
{
printf((tree->gtFlags & GTF_VAR_DEF) ? "M" : "m");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_DEF)
{
printf("D");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CAST)
{
printf("C");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
printf("i");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CONTEXT)
{
printf("!");
--msgLength;
break;
}
goto DASH;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
printf("N");
--msgLength;
break;
}
if (tree->gtFlags & GTF_RELOP_JMP_USED)
{
printf("J");
--msgLength;
break;
}
goto DASH;
case GT_JCMP:
printf((tree->gtFlags & GTF_JCMP_TST) ? "T" : "C");
printf((tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
goto DASH;
case GT_CNS_INT:
if (tree->IsIconHandle())
{
if ((tree->gtFlags & GTF_ICON_INITCLASS) != 0)
{
printf("I"); // Static Field handle with INITCLASS requirement
--msgLength;
break;
}
else if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf("O");
--msgLength;
break;
}
else
{
// Some other handle
printf("H");
--msgLength;
break;
}
}
goto DASH;
default:
DASH:
printf("-");
--msgLength;
break;
}
/* Then print the general purpose flags */
GenTreeFlags flags = tree->gtFlags;
if (tree->OperIsBinary() || tree->OperIsMultiOp())
{
genTreeOps oper = tree->OperGet();
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul/shl Binary Operators
if ((oper == GT_ADD) || (oper == GT_MUL) || (oper == GT_LSH))
{
if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
{
flags |= GTF_DONT_CSE; // Force the GTF_ADDRMODE_NO_CSE flag to print out like GTF_DONT_CSE
}
}
}
else // !(tree->OperIsBinary() || tree->OperIsMultiOp())
{
// the GTF_REVERSE flag only applies to binary operations (which some MultiOp nodes are).
flags &= ~GTF_REVERSE_OPS; // we use this value for GTF_VAR_ARR_INDEX above
}
msgLength -= GenTree::gtDispFlags(flags, tree->gtDebugFlags);
/*
printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
printf("%c", (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-');
printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
(flags & GTF_BOOLEAN ) ? 'B' : '-');
printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-');
printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-');
*/
}
// If we're printing a node for LIR, we use the space normally associated with the message
// to display the node's temp name (if any)
const bool hasOperands = tree->OperandsBegin() != tree->OperandsEnd();
if (isLIR)
{
assert(msg == nullptr);
// If the tree does not have any operands, we do not display the indent stack. This gives us
// two additional characters for alignment.
if (!hasOperands)
{
msgLength += 1;
}
if (tree->IsValue())
{
const size_t bufLength = msgLength - 1;
msg = reinterpret_cast<char*>(_alloca(bufLength * sizeof(char)));
sprintf_s(const_cast<char*>(msg), bufLength, "t%d = %s", tree->gtTreeID, hasOperands ? "" : " ");
}
}
/* print the msg associated with the node */
if (msg == nullptr)
{
msg = "";
}
if (msgLength < 0)
{
msgLength = 0;
}
printf(isLIR ? " %+*s" : " %-*s", msgLength, msg);
/* Indent the node accordingly */
if (!isLIR || hasOperands)
{
printIndent(indentStack);
}
gtDispNodeName(tree);
assert(tree == nullptr || tree->gtOper < GT_COUNT);
if (tree)
{
/* print the type of the node */
if (tree->gtOper != GT_CAST)
{
printf(" %-6s", varTypeName(tree->TypeGet()));
if (varTypeIsStruct(tree->TypeGet()))
{
ClassLayout* layout = nullptr;
if (tree->OperIs(GT_BLK, GT_OBJ, GT_STORE_BLK, GT_STORE_OBJ))
{
layout = tree->AsBlk()->GetLayout();
}
else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varTypeIsStruct(varDsc->TypeGet()))
{
layout = varDsc->GetLayout();
}
}
if (layout != nullptr)
{
gtDispClassLayout(layout, tree->TypeGet());
}
}
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_STORE_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->IsAddressExposed())
{
printf("(AX)"); // Variable has address exposed.
}
if (varDsc->lvUnusedStruct)
{
assert(varDsc->lvPromoted);
printf("(U)"); // Unused struct
}
else if (varDsc->lvPromoted)
{
if (varTypeIsPromotable(varDsc))
{
printf("(P)"); // Promoted struct
}
else
{
// Promoted implicit by-refs can have this state during
// global morph while they are being rewritten
printf("(P?!)"); // Promoted struct
}
}
}
if (tree->IsArgPlaceHolderNode() && (tree->AsArgPlace()->gtArgPlaceClsHnd != nullptr))
{
printf(" => [clsHnd=%08X]", dspPtr(tree->AsArgPlace()->gtArgPlaceClsHnd));
}
if (tree->gtOper == GT_RUNTIMELOOKUP)
{
#ifdef TARGET_64BIT
printf(" 0x%llx", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#else
printf(" 0x%x", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#endif
switch (tree->AsRuntimeLookup()->gtHndType)
{
case CORINFO_HANDLETYPE_CLASS:
printf(" class");
break;
case CORINFO_HANDLETYPE_METHOD:
printf(" method");
break;
case CORINFO_HANDLETYPE_FIELD:
printf(" field");
break;
default:
printf(" unknown");
break;
}
}
}
// for tracking down problems in reguse prediction or liveness tracking
if (verbose && 0)
{
printf(" RR=");
dspRegMask(tree->gtRsvdRegs);
printf("\n");
}
}
}
#if FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispMultiRegCount: determine how many registers to print for a multi-reg node
//
// Arguments:
// tree - GenTree node whose registers we want to print
//
// Return Value:
// The number of registers to print
//
// Notes:
// This is not the same in all cases as GenTree::GetMultiRegCount().
// In particular, for COPY or RELOAD it only returns the number of *valid* registers,
// and for CALL, it will return 0 if the ReturnTypeDesc hasn't yet been initialized.
// But we want to print all register positions.
//
unsigned Compiler::gtDispMultiRegCount(GenTree* tree)
{
if (tree->IsCopyOrReload())
{
// GetRegCount() will return only the number of valid regs for COPY or RELOAD,
// but we want to print all positions, so we get the reg count for op1.
return gtDispMultiRegCount(tree->gtGetOp1());
}
else if (!tree->IsMultiRegNode())
{
// We can wind up here because IsMultiRegNode() always returns true for COPY or RELOAD,
// even if its op1 is not multireg.
// Note that this method won't be called for non-register-producing nodes.
return 1;
}
else if (tree->OperIs(GT_CALL))
{
unsigned regCount = tree->AsCall()->GetReturnTypeDesc()->TryGetReturnRegCount();
// If it hasn't yet been initialized, we'd still like to see the registers printed.
if (regCount == 0)
{
regCount = MAX_RET_REG_COUNT;
}
return regCount;
}
else
{
return tree->GetMultiRegCount(this);
}
}
#endif // FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispRegVal: Print the register(s) defined by the given node
//
// Arguments:
// tree - Gentree node whose registers we want to print
//
void Compiler::gtDispRegVal(GenTree* tree)
{
switch (tree->GetRegTag())
{
// Don't display anything for the GT_REGTAG_NONE case;
// the absence of printed register values will imply this state.
case GenTree::GT_REGTAG_REG:
printf(" REG %s", compRegVarName(tree->GetRegNum()));
break;
default:
return;
}
#if FEATURE_MULTIREG_RET
if (tree->IsMultiRegNode())
{
// 0th reg is GetRegNum(), which is already printed above.
// Print the remaining regs of a multi-reg node.
unsigned regCount = gtDispMultiRegCount(tree);
// For some nodes, e.g. COPY, RELOAD or CALL, we may not have valid regs for all positions.
for (unsigned i = 1; i < regCount; ++i)
{
regNumber reg = tree->GetRegByIndex(i);
printf(",%s", genIsValidReg(reg) ? compRegVarName(reg) : "NA");
}
}
#endif
}
// We usually/commonly don't expect to print anything longer than this string,
#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame"
#define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY))
#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2)
void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut)
{
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = compMap2ILvarNum(lclNum);
if (ilNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM)
{
ilName = "RetBuf";
}
else if (ilNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM)
{
ilName = "VarArgHandle";
}
else if (ilNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM)
{
ilName = "TypeCtx";
}
else if (ilNum == (unsigned)ICorDebugInfo::UNKNOWN_ILNUM)
{
if (lclNumIsTrueCSE(lclNum))
{
ilKind = "cse";
ilNum = lclNum - optCSEstart;
}
else if (lclNum >= optCSEstart)
{
// Currently any new LclVar's introduced after the CSE phase
// are believed to be created by the "rationalizer" that is what is meant by the "rat" prefix.
ilKind = "rat";
ilNum = lclNum - (optCSEstart + optCSEcount);
}
else
{
if (lclNum == info.compLvFrameListRoot)
{
ilName = "FramesRoot";
}
else if (lclNum == lvaInlinedPInvokeFrameVar)
{
ilName = "PInvokeFrame";
}
else if (lclNum == lvaGSSecurityCookie)
{
ilName = "GsCookie";
}
else if (lclNum == lvaRetAddrVar)
{
ilName = "ReturnAddress";
}
#if FEATURE_FIXED_OUT_ARGS
else if (lclNum == lvaPInvokeFrameRegSaveVar)
{
ilName = "PInvokeFrameRegSave";
}
else if (lclNum == lvaOutgoingArgSpaceVar)
{
ilName = "OutArgs";
}
#endif // FEATURE_FIXED_OUT_ARGS
#if !defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaShadowSPslotsVar)
{
ilName = "EHSlots";
}
#endif // !FEATURE_EH_FUNCLETS
#ifdef JIT32_GCENCODER
else if (lclNum == lvaLocAllocSPvar)
{
ilName = "LocAllocSP";
}
#endif // JIT32_GCENCODER
#if defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaPSPSym)
{
ilName = "PSPSym";
}
#endif // FEATURE_EH_FUNCLETS
else
{
ilKind = "tmp";
if (compIsForInlining())
{
ilNum = lclNum - impInlineInfo->InlinerCompiler->info.compLocalsCount;
}
else
{
ilNum = lclNum - info.compLocalsCount;
}
}
}
}
else if (lclNum < (compIsForInlining() ? impInlineInfo->InlinerCompiler->info.compArgsCount : info.compArgsCount))
{
if (ilNum == 0 && !info.compIsStatic)
{
ilName = "this";
}
else
{
ilKind = "arg";
}
}
else
{
if (!lvaTable[lclNum].lvIsStructField)
{
ilKind = "loc";
}
if (compIsForInlining())
{
ilNum -= impInlineInfo->InlinerCompiler->info.compILargsCount;
}
else
{
ilNum -= info.compILargsCount;
}
}
*ilKindOut = ilKind;
*ilNameOut = ilName;
*ilNumOut = ilNum;
}
/*****************************************************************************/
int Compiler::gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining)
{
char* bufp_next = buf;
unsigned charsPrinted = 0;
int sprintf_result;
sprintf_result = sprintf_s(bufp_next, buf_remaining, "V%02u", lclNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = 0;
gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
if (ilName != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s", ilName);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
else if (ilKind != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s%d", ilKind, ilNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
assert(charsPrinted > 0);
assert(buf_remaining > 0);
return (int)charsPrinted;
}
/*****************************************************************************
* Get the local var name, and create a copy of the string that can be used in debug output.
*/
char* Compiler::gtGetLclVarName(unsigned lclNum)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return nullptr;
}
char* retBuf = new (this, CMK_DebugOnly) char[charsPrinted + 1];
strcpy_s(retBuf, charsPrinted + 1, buf);
return retBuf;
}
/*****************************************************************************/
void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return;
}
printf("%s", buf);
if (padForBiggestDisp && (charsPrinted < (int)LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH))
{
printf("%*c", LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH - charsPrinted, ' ');
}
}
//------------------------------------------------------------------------
// gtDispLclVarStructType: Print size and type information about a struct or lclBlk local variable.
//
// Arguments:
// lclNum - The local var id.
//
void Compiler::gtDispLclVarStructType(unsigned lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
var_types type = varDsc->TypeGet();
if (type == TYP_STRUCT)
{
ClassLayout* layout = varDsc->GetLayout();
assert(layout != nullptr);
gtDispClassLayout(layout, type);
}
else if (type == TYP_LCLBLK)
{
#if FEATURE_FIXED_OUT_ARGS
assert(lclNum == lvaOutgoingArgSpaceVar);
// Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until
// after we set it to something.
if (lvaOutgoingArgSpaceSize.HasFinalValue())
{
// A PhasedVar<T> can't be directly used as an arg to a variadic function
unsigned value = lvaOutgoingArgSpaceSize;
printf("<%u> ", value);
}
else
{
printf("<na> "); // The value hasn't yet been determined
}
#else
assert(!"Unknown size");
NO_WAY("Target doesn't support TYP_LCLBLK");
#endif // FEATURE_FIXED_OUT_ARGS
}
}
//------------------------------------------------------------------------
// gtDispClassLayout: Print size and type information about a layout.
//
// Arguments:
// layout - the layout;
// type - variable type, used to avoid printing size for SIMD nodes.
//
void Compiler::gtDispClassLayout(ClassLayout* layout, var_types type)
{
assert(layout != nullptr);
if (layout->IsBlockLayout())
{
printf("<%u>", layout->GetSize());
}
else if (varTypeIsSIMD(type))
{
printf("<%s>", layout->GetClassName());
}
else
{
printf("<%s, %u>", layout->GetClassName(), layout->GetSize());
}
}
/*****************************************************************************/
void Compiler::gtDispConst(GenTree* tree)
{
assert(tree->OperIsConst());
switch (tree->gtOper)
{
case GT_CNS_INT:
if (tree->IsIconHandle(GTF_ICON_STR_HDL))
{
const WCHAR* str = eeGetCPString(tree->AsIntCon()->gtIconVal);
// If *str points to a '\0' then don't print the string's values
if ((str != nullptr) && (*str != '\0'))
{
printf(" 0x%X \"%S\"", dspPtr(tree->AsIntCon()->gtIconVal), str);
}
else // We can't print the value of the string
{
// Note that eeGetCPString isn't currently implemented on Linux/ARM
// and instead always returns nullptr
printf(" 0x%X [ICON_STR_HDL]", dspPtr(tree->AsIntCon()->gtIconVal));
}
}
else
{
ssize_t dspIconVal =
tree->IsIconHandle() ? dspPtr(tree->AsIntCon()->gtIconVal) : tree->AsIntCon()->gtIconVal;
if (tree->TypeGet() == TYP_REF)
{
assert(tree->AsIntCon()->gtIconVal == 0);
printf(" null");
}
else if ((tree->AsIntCon()->gtIconVal > -1000) && (tree->AsIntCon()->gtIconVal < 1000))
{
printf(" %ld", dspIconVal);
}
#ifdef TARGET_64BIT
else if ((tree->AsIntCon()->gtIconVal & 0xFFFFFFFF00000000LL) != 0)
{
if (dspIconVal >= 0)
{
printf(" 0x%llx", dspIconVal);
}
else
{
printf(" -0x%llx", -dspIconVal);
}
}
#endif
else
{
if (dspIconVal >= 0)
{
printf(" 0x%X", dspIconVal);
}
else
{
printf(" -0x%X", -dspIconVal);
}
}
if (tree->IsIconHandle())
{
switch (tree->GetIconHandleFlag())
{
case GTF_ICON_SCOPE_HDL:
printf(" scope");
break;
case GTF_ICON_CLASS_HDL:
printf(" class");
break;
case GTF_ICON_METHOD_HDL:
printf(" method");
break;
case GTF_ICON_FIELD_HDL:
printf(" field");
break;
case GTF_ICON_STATIC_HDL:
printf(" static");
break;
case GTF_ICON_STR_HDL:
unreached(); // This case is handled above
break;
case GTF_ICON_CONST_PTR:
printf(" const ptr");
break;
case GTF_ICON_GLOBAL_PTR:
printf(" global ptr");
break;
case GTF_ICON_VARG_HDL:
printf(" vararg");
break;
case GTF_ICON_PINVKI_HDL:
printf(" pinvoke");
break;
case GTF_ICON_TOKEN_HDL:
printf(" token");
break;
case GTF_ICON_TLS_HDL:
printf(" tls");
break;
case GTF_ICON_FTN_ADDR:
printf(" ftn");
break;
case GTF_ICON_CIDMID_HDL:
printf(" cid/mid");
break;
case GTF_ICON_BBC_PTR:
printf(" bbc");
break;
case GTF_ICON_STATIC_BOX_PTR:
printf(" static box ptr");
break;
default:
printf(" UNKNOWN");
break;
}
}
if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf(" field offset");
}
#ifdef FEATURE_SIMD
if ((tree->gtFlags & GTF_ICON_SIMD_COUNT) != 0)
{
printf(" vector element count");
}
#endif
if ((tree->IsReuseRegVal()) != 0)
{
printf(" reuse reg val");
}
}
gtDispFieldSeq(tree->AsIntCon()->gtFieldSeq);
break;
case GT_CNS_LNG:
printf(" 0x%016I64x", tree->AsLngCon()->gtLconVal);
break;
case GT_CNS_DBL:
if (*((__int64*)&tree->AsDblCon()->gtDconVal) == (__int64)I64(0x8000000000000000))
{
printf(" -0.00000");
}
else
{
printf(" %#.17g", tree->AsDblCon()->gtDconVal);
}
break;
case GT_CNS_STR:
printf("<string constant>");
break;
default:
assert(!"unexpected constant node");
}
}
//------------------------------------------------------------------------
// gtDispFieldSeq: "gtDispFieldSeq" that also prints "<NotAField>".
//
// Useful for printing zero-offset field sequences.
//
void Compiler::gtDispAnyFieldSeq(FieldSeqNode* fieldSeq)
{
if (fieldSeq == FieldSeqStore::NotAField())
{
printf(" Fseq<NotAField>");
return;
}
gtDispFieldSeq(fieldSeq);
}
//------------------------------------------------------------------------
// gtDispFieldSeq: Print out the fields in this field sequence.
//
void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
{
if ((pfsn == nullptr) || (pfsn == FieldSeqStore::NotAField()))
{
return;
}
// Otherwise...
printf(" Fseq[");
while (pfsn != nullptr)
{
assert(pfsn != FieldSeqStore::NotAField()); // Can't exist in a field sequence list except alone
CORINFO_FIELD_HANDLE fldHnd = pfsn->m_fieldHnd;
// First check the "pseudo" field handles...
if (fldHnd == FieldSeqStore::FirstElemPseudoField)
{
printf("#FirstElem");
}
else if (fldHnd == FieldSeqStore::ConstantIndexPseudoField)
{
printf("#ConstantIndex");
}
else
{
printf("%s", eeGetFieldName(fldHnd));
}
pfsn = pfsn->m_next;
if (pfsn != nullptr)
{
printf(", ");
}
}
printf("]");
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a single leaf node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
{
if (tree->OperIsConst())
{
gtDispConst(tree);
return;
}
bool isLclFld = false;
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
isLclFld = true;
FALLTHROUGH;
case GT_PHI_ARG:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_STORE_LCL_VAR:
{
printf(" ");
const unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(varNum);
gtDispLclVar(varNum);
if (tree->AsLclVarCommon()->HasSsaName())
{
if (tree->gtFlags & GTF_VAR_USEASG)
{
assert(tree->gtFlags & GTF_VAR_DEF);
printf("ud:%d->%d", tree->AsLclVarCommon()->GetSsaNum(), GetSsaNumForLocalVarDef(tree));
}
else
{
printf("%s:%d", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->AsLclVarCommon()->GetSsaNum());
}
}
if (isLclFld)
{
printf("[+%u]", tree->AsLclFld()->GetLclOffs());
gtDispFieldSeq(tree->AsLclFld()->GetFieldSeq());
}
if (varDsc->lvRegister)
{
printf(" ");
varDsc->PrintVarReg();
}
else if (tree->InReg())
{
printf(" %s", compRegVarName(tree->GetRegNum()));
}
if (varDsc->lvPromoted)
{
if (!varTypeIsPromotable(varDsc) && !varDsc->lvUnusedStruct)
{
// Promoted implicit byrefs can get in this state while they are being rewritten
// in global morph.
}
else
{
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(i);
const char* fieldName;
#if !defined(TARGET_64BIT)
if (varTypeIsLong(varDsc))
{
fieldName = (i == 0) ? "lo" : "hi";
}
else
#endif // !defined(TARGET_64BIT)
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
CORINFO_FIELD_HANDLE fldHnd =
info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal);
fieldName = eeGetFieldName(fldHnd);
}
printf("\n");
printf(" ");
printIndent(indentStack);
printf(" %-6s V%02u.%s (offs=0x%02x) -> ", varTypeName(fieldVarDsc->TypeGet()),
tree->AsLclVarCommon()->GetLclNum(), fieldName, fieldVarDsc->lvFldOffset);
gtDispLclVar(i);
if (fieldVarDsc->lvRegister)
{
printf(" ");
fieldVarDsc->PrintVarReg();
}
if (fieldVarDsc->lvTracked && fgLocalVarLivenessDone && tree->IsMultiRegLclVar() &&
tree->AsLclVar()->IsLastUse(i - varDsc->lvFieldLclStart))
{
printf(" (last use)");
}
}
}
}
else // a normal not-promoted lclvar
{
if (varDsc->lvTracked && fgLocalVarLivenessDone && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
printf(" (last use)");
}
}
}
break;
case GT_JMP:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsVal()->gtVal1, &className);
printf(" %s.%s\n", className, methodName);
}
break;
case GT_CLS_VAR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
gtDispFieldSeq(tree->AsClsVar()->gtFieldSeq);
break;
case GT_CLS_VAR_ADDR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
break;
case GT_LABEL:
break;
case GT_FTN_ADDR:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsFptrVal()->gtFptrMethod, &className);
printf(" %s.%s\n", className, methodName);
}
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
printf(" endNstLvl=%d", tree->AsVal()->gtVal1);
break;
#endif // !FEATURE_EH_FUNCLETS
// Vanilla leaves. No qualifying information available. So do nothing
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
case GT_CATCH_ARG:
case GT_MEMORYBARRIER:
case GT_ARGPLACE:
case GT_PINVOKE_PROLOG:
case GT_JMPTABLE:
break;
case GT_RET_EXPR:
{
GenTree* const associatedTree = tree->AsRetExpr()->gtInlineCandidate;
printf("(inl return %s ", tree->IsCall() ? " from call" : "expr");
printTreeID(associatedTree);
printf(")");
}
break;
case GT_PHYSREG:
printf(" %s", getRegName(tree->AsPhysReg()->gtSrcReg));
break;
case GT_IL_OFFSET:
printf(" ");
tree->AsILOffset()->gtStmtDI.Dump(true);
break;
case GT_JCC:
case GT_SETCC:
printf(" cond=%s", tree->AsCC()->gtCondition.Name());
break;
case GT_JCMP:
printf(" cond=%s%s", (tree->gtFlags & GTF_JCMP_TST) ? "TEST_" : "",
(tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
break;
default:
assert(!"don't know how to display tree leaf node");
}
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a child node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// arcType - the type of arc to use for this child
// msg - a contextual method (i.e. from the parent) to print
// topOnly - a boolean indicating whether to print the children, or just the top node
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' has a default value of null
// 'topOnly' is an optional argument that defaults to false
void Compiler::gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg, /* = nullptr */
bool topOnly) /* = false */
{
indentStack->Push(arcType);
gtDispTree(child, indentStack, msg, topOnly);
indentStack->Pop();
}
#ifdef FEATURE_SIMD
// Intrinsic Id to name map
extern const char* const simdIntrinsicNames[] = {
#define SIMD_INTRINSIC(mname, inst, id, name, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) name,
#include "simdintrinsiclist.h"
};
#endif // FEATURE_SIMD
/*****************************************************************************/
void Compiler::gtDispTree(GenTree* tree,
IndentStack* indentStack, /* = nullptr */
_In_ _In_opt_z_ const char* msg, /* = nullptr */
bool topOnly, /* = false */
bool isLIR) /* = false */
{
if (tree == nullptr)
{
printf(" [%08X] <NULL>\n", tree);
printf(""); // null string means flush
return;
}
if (indentStack == nullptr)
{
indentStack = new (this, CMK_DebugOnly) IndentStack(this);
}
if (IsUninitialized(tree))
{
/* Value used to initalize nodes */
printf("Uninitialized tree node!\n");
return;
}
if (tree->gtOper >= GT_COUNT)
{
gtDispNode(tree, indentStack, msg, isLIR);
printf("Bogus operator!\n");
return;
}
/* Is tree a leaf node? */
if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
{
gtDispNode(tree, indentStack, msg, isLIR);
gtDispLeaf(tree, indentStack);
gtDispCommonEndLine(tree);
if (tree->OperIsLocalStore() && !topOnly)
{
gtDispChild(tree->AsOp()->gtOp1, indentStack, IINone);
}
return;
}
// Determine what kind of arc to propagate.
IndentInfo myArc = IINone;
IndentInfo lowerArc = IINone;
if (indentStack->Depth() > 0)
{
myArc = indentStack->Pop();
switch (myArc)
{
case IIArcBottom:
indentStack->Push(IIArc);
lowerArc = IINone;
break;
case IIArc:
indentStack->Push(IIArc);
lowerArc = IIArc;
break;
case IIArcTop:
indentStack->Push(IINone);
lowerArc = IIArc;
break;
case IINone:
indentStack->Push(IINone);
lowerArc = IINone;
break;
default:
unreached();
break;
}
}
/* Is it a 'simple' unary/binary operator? */
const char* childMsg = nullptr;
if (tree->OperIsSimple())
{
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
if (tree->gtOper == GT_CAST)
{
/* Format a message that explains the effect of this GT_CAST */
var_types fromType = genActualType(tree->AsCast()->CastOp()->TypeGet());
var_types toType = tree->CastToType();
var_types finalType = tree->TypeGet();
/* if GTF_UNSIGNED is set then force fromType to an unsigned type */
if (tree->gtFlags & GTF_UNSIGNED)
{
fromType = varTypeToUnsigned(fromType);
}
if (finalType != toType)
{
printf(" %s <-", varTypeName(finalType));
}
printf(" %s <- %s", varTypeName(toType), varTypeName(fromType));
}
if (tree->OperIsBlkOp())
{
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
if (tree->OperIsStoreBlk() && (tree->AsBlk()->gtBlkOpKind != GenTreeBlk::BlkOpKindInvalid))
{
switch (tree->AsBlk()->gtBlkOpKind)
{
#ifdef TARGET_XARCH
case GenTreeBlk::BlkOpKindRepInstr:
printf(" (RepInstr)");
break;
#endif
case GenTreeBlk::BlkOpKindUnroll:
printf(" (Unroll)");
break;
#ifndef TARGET_X86
case GenTreeBlk::BlkOpKindHelper:
printf(" (Helper)");
break;
#endif
default:
unreached();
}
}
}
#if FEATURE_PUT_STRUCT_ARG_STK
else if (tree->OperGet() == GT_PUTARG_STK)
{
const GenTreePutArgStk* putArg = tree->AsPutArgStk();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d slot), (%d byteOffset)", putArg->gtNumSlots,
putArg->GetStackByteSize(), putArg->gtSlotNum, putArg->getArgOffset());
}
#endif
if (putArg->gtPutArgStkKind != GenTreePutArgStk::Kind::Invalid)
{
switch (putArg->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
printf(" (RepInstr)");
break;
case GenTreePutArgStk::Kind::PartialRepInstr:
printf(" (PartialRepInstr)");
break;
case GenTreePutArgStk::Kind::Unroll:
printf(" (Unroll)");
break;
case GenTreePutArgStk::Kind::Push:
printf(" (Push)");
break;
case GenTreePutArgStk::Kind::PushAllSlots:
printf(" (PushAllSlots)");
break;
default:
unreached();
}
}
}
#if FEATURE_ARG_SPLIT
else if (tree->OperGet() == GT_PUTARG_SPLIT)
{
const GenTreePutArgSplit* putArg = tree->AsPutArgSplit();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d numRegs)", putArg->gtNumSlots, putArg->GetStackByteSize(),
putArg->gtNumRegs);
}
#endif
}
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
if (tree->OperIs(GT_FIELD))
{
if (FieldSeqStore::IsPseudoField(tree->AsField()->gtFldHnd))
{
printf(" #PseudoField:0x%x", tree->AsField()->gtFldOffset);
}
else
{
printf(" %s", eeGetFieldName(tree->AsField()->gtFldHnd), 0);
}
}
if (tree->gtOper == GT_INTRINSIC)
{
GenTreeIntrinsic* intrinsic = tree->AsIntrinsic();
switch (intrinsic->gtIntrinsicName)
{
case NI_System_Math_Abs:
printf(" abs");
break;
case NI_System_Math_Acos:
printf(" acos");
break;
case NI_System_Math_Acosh:
printf(" acosh");
break;
case NI_System_Math_Asin:
printf(" asin");
break;
case NI_System_Math_Asinh:
printf(" asinh");
break;
case NI_System_Math_Atan:
printf(" atan");
break;
case NI_System_Math_Atanh:
printf(" atanh");
break;
case NI_System_Math_Atan2:
printf(" atan2");
break;
case NI_System_Math_Cbrt:
printf(" cbrt");
break;
case NI_System_Math_Ceiling:
printf(" ceiling");
break;
case NI_System_Math_Cos:
printf(" cos");
break;
case NI_System_Math_Cosh:
printf(" cosh");
break;
case NI_System_Math_Exp:
printf(" exp");
break;
case NI_System_Math_Floor:
printf(" floor");
break;
case NI_System_Math_FMod:
printf(" fmod");
break;
case NI_System_Math_FusedMultiplyAdd:
printf(" fma");
break;
case NI_System_Math_ILogB:
printf(" ilogb");
break;
case NI_System_Math_Log:
printf(" log");
break;
case NI_System_Math_Log2:
printf(" log2");
break;
case NI_System_Math_Log10:
printf(" log10");
break;
case NI_System_Math_Max:
printf(" max");
break;
case NI_System_Math_Min:
printf(" min");
break;
case NI_System_Math_Pow:
printf(" pow");
break;
case NI_System_Math_Round:
printf(" round");
break;
case NI_System_Math_Sin:
printf(" sin");
break;
case NI_System_Math_Sinh:
printf(" sinh");
break;
case NI_System_Math_Sqrt:
printf(" sqrt");
break;
case NI_System_Math_Tan:
printf(" tan");
break;
case NI_System_Math_Tanh:
printf(" tanh");
break;
case NI_System_Math_Truncate:
printf(" truncate");
break;
case NI_System_Object_GetType:
printf(" objGetType");
break;
case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant:
printf(" isKnownConst");
break;
default:
unreached();
}
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
if (tree->AsOp()->gtOp1 != nullptr)
{
// Label the child of the GT_COLON operator
// op1 is the else part
if (tree->gtOper == GT_COLON)
{
childMsg = "else";
}
else if (tree->gtOper == GT_QMARK)
{
childMsg = " if";
}
gtDispChild(tree->AsOp()->gtOp1, indentStack,
(tree->gtGetOp2IfPresent() == nullptr) ? IIArcBottom : IIArc, childMsg, topOnly);
}
if (tree->gtGetOp2IfPresent())
{
// Label the childMsgs of the GT_COLON operator
// op2 is the then part
if (tree->gtOper == GT_COLON)
{
childMsg = "then";
}
gtDispChild(tree->AsOp()->gtOp2, indentStack, IIArcBottom, childMsg, topOnly);
}
}
return;
}
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
// See what kind of a special operator we have here, and handle its special children.
switch (tree->gtOper)
{
case GT_FIELD_LIST:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
char offset[32];
sprintf_s(offset, sizeof(offset), "ofs %u", use.GetOffset());
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, offset);
}
}
break;
case GT_PHI:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
char block[32];
sprintf_s(block, sizeof(block), "pred " FMT_BB, use.GetNode()->AsPhiArg()->gtPredBB->bbNum);
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, block);
}
}
break;
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
GenTree* lastChild = nullptr;
call->VisitOperands([&lastChild](GenTree* operand) -> GenTree::VisitResult {
lastChild = operand;
return GenTree::VisitResult::Continue;
});
if (call->gtCallType != CT_INDIRECT)
{
const char* methodName;
const char* className;
methodName = eeGetMethodName(call->gtCallMethHnd, &className);
printf(" %s.%s", className, methodName);
}
if ((call->gtFlags & GTF_CALL_UNMANAGED) && (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH))
{
printf(" (FramesRoot last use)");
}
if (((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0) && (call->gtInlineCandidateInfo != nullptr) &&
(call->gtInlineCandidateInfo->exactContextHnd != nullptr))
{
printf(" (exactContextHnd=0x%p)", dspPtr(call->gtInlineCandidateInfo->exactContextHnd));
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
char buf[64];
char* bufp;
bufp = &buf[0];
if ((call->gtCallThisArg != nullptr) && !call->gtCallThisArg->GetNode()->OperIs(GT_NOP, GT_ARGPLACE))
{
if (call->gtCallThisArg->GetNode()->OperIs(GT_ASG))
{
sprintf_s(bufp, sizeof(buf), "this SETUP%c", 0);
}
else
{
sprintf_s(bufp, sizeof(buf), "this in %s%c", compRegVarName(REG_ARG_0), 0);
}
gtDispChild(call->gtCallThisArg->GetNode(), indentStack,
(call->gtCallThisArg->GetNode() == lastChild) ? IIArcBottom : IIArc, bufp, topOnly);
}
if (call->gtCallArgs)
{
gtDispArgList(call, lastChild, indentStack);
}
if (call->gtCallType == CT_INDIRECT)
{
gtDispChild(call->gtCallAddr, indentStack, (call->gtCallAddr == lastChild) ? IIArcBottom : IIArc,
"calli tgt", topOnly);
}
if (call->gtControlExpr != nullptr)
{
gtDispChild(call->gtControlExpr, indentStack,
(call->gtControlExpr == lastChild) ? IIArcBottom : IIArc, "control expr", topOnly);
}
int lateArgIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
IndentInfo arcType = (use.GetNext() == nullptr) ? IIArcBottom : IIArc;
gtGetLateArgMsg(call, use.GetNode(), lateArgIndex, bufp, sizeof(buf));
gtDispChild(use.GetNode(), indentStack, arcType, bufp, topOnly);
lateArgIndex++;
}
}
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD)
if (tree->OperIs(GT_SIMD))
{
printf(" %s %s", varTypeName(tree->AsSIMD()->GetSimdBaseType()),
simdIntrinsicNames[tree->AsSIMD()->GetSIMDIntrinsicId()]);
}
#endif // defined(FEATURE_SIMD)
#if defined(FEATURE_HW_INTRINSICS)
if (tree->OperIs(GT_HWINTRINSIC))
{
printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN
? ""
: varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()),
HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->GetHWIntrinsicId()));
}
#endif // defined(FEATURE_HW_INTRINSICS)
gtDispCommonEndLine(tree);
if (!topOnly)
{
size_t index = 0;
size_t count = tree->AsMultiOp()->GetOperandCount();
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
gtDispChild(operand, indentStack, ++index < count ? IIArc : IIArcBottom, nullptr, topOnly);
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrElem()->gtArrObj, indentStack, IIArc, nullptr, topOnly);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
IndentInfo arcType = ((dim + 1) == tree->AsArrElem()->gtArrRank) ? IIArcBottom : IIArc;
gtDispChild(tree->AsArrElem()->gtArrInds[dim], indentStack, arcType, nullptr, topOnly);
}
}
break;
case GT_ARR_OFFSET:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrOffs()->gtOffset, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtIndex, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtArrObj, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_CMPXCHG:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsCmpXchg()->gtOpLocation, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpValue, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpComparand, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_STORE_DYN_BLK:
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsStoreDynBlk()->Addr(), indentStack, IIArc, nullptr, topOnly);
if (tree->AsStoreDynBlk()->Data() != nullptr)
{
gtDispChild(tree->AsStoreDynBlk()->Data(), indentStack, IIArc, nullptr, topOnly);
}
gtDispChild(tree->AsStoreDynBlk()->gtDynamicSize, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
default:
printf("<DON'T KNOW HOW TO DISPLAY THIS NODE> :");
printf(""); // null string means flush
break;
}
}
//------------------------------------------------------------------------
// gtGetArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// arg - The argument for which a message should be constructed
// argNum - The ordinal number of the arg in the argument list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength)
{
if (call->gtCallLateArgs != nullptr)
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(call, argNum);
assert(curArgTabEntry);
if (arg->gtFlags & GTF_LATE_ARG)
{
sprintf_s(bufp, bufLength, "arg%d SETUP%c", argNum, 0);
}
else
{
#ifdef TARGET_ARM
if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
#if FEATURE_FIXED_OUT_ARGS
sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->GetByteOffset(), 0);
#else
sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0);
#endif
}
}
else
{
sprintf_s(bufp, bufLength, "arg%d%c", argNum, 0);
}
}
//------------------------------------------------------------------------
// gtGetLateArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// argx - The argument for which a message should be constructed
// lateArgIndex - The ordinal number of the arg in the lastArg list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetLateArgMsg(GenTreeCall* call, GenTree* argx, int lateArgIndex, char* bufp, unsigned bufLength)
{
assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(call, lateArgIndex);
assert(curArgTabEntry);
regNumber argReg = curArgTabEntry->GetRegNum();
#if FEATURE_FIXED_OUT_ARGS
if (argReg == REG_STK)
{
sprintf_s(bufp, bufLength, "arg%d in out+%02x%c", curArgTabEntry->argNum, curArgTabEntry->GetByteOffset(), 0);
}
else
#endif
{
if (curArgTabEntry->use == call->gtCallThisArg)
{
sprintf_s(bufp, bufLength, "this in %s%c", compRegVarName(argReg), 0);
}
#ifdef TARGET_ARM
else if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
unsigned argNum = curArgTabEntry->argNum;
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
else
{
#if FEATURE_MULTIREG_ARGS
if (curArgTabEntry->numRegs >= 2)
{
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
sprintf_s(bufp, bufLength, "arg%d %s%c%s%c", curArgTabEntry->argNum, compRegVarName(argReg), separator,
compRegVarName(curArgTabEntry->GetRegNum(curArgTabEntry->numRegs - 1)), 0);
}
else
#endif
{
sprintf_s(bufp, bufLength, "arg%d in %s%c", curArgTabEntry->argNum, compRegVarName(argReg), 0);
}
}
}
}
//------------------------------------------------------------------------
// gtDispArgList: Dump the tree for a call arg list
//
// Arguments:
// call - the call to dump arguments for
// lastCallOperand - the call's last operand (to determine the arc types)
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
void Compiler::gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack)
{
unsigned argnum = 0;
if (call->gtCallThisArg != nullptr)
{
argnum++;
}
for (GenTreeCall::Use& use : call->Args())
{
GenTree* argNode = use.GetNode();
if (!argNode->IsNothingNode() && !argNode->IsArgPlaceHolderNode())
{
char buf[256];
gtGetArgMsg(call, argNode, argnum, buf, sizeof(buf));
gtDispChild(argNode, indentStack, (argNode == lastCallOperand) ? IIArcBottom : IIArc, buf, false);
}
argnum++;
}
}
// gtDispStmt: Print a statement to jitstdout.
//
// Arguments:
// stmt - the statement to be printed;
// msg - an additional message to print before the statement.
//
void Compiler::gtDispStmt(Statement* stmt, const char* msg /* = nullptr */)
{
if (opts.compDbgInfo)
{
if (msg != nullptr)
{
printf("%s ", msg);
}
printStmtID(stmt);
printf(" ( ");
const DebugInfo& di = stmt->GetDebugInfo();
// For statements in the root we display just the location without the
// inline context info.
if (di.GetInlineContext() == nullptr || di.GetInlineContext()->IsRoot())
{
di.GetLocation().Dump();
}
else
{
stmt->GetDebugInfo().Dump(false);
}
printf(" ... ");
IL_OFFSET lastILOffs = stmt->GetLastILOffset();
if (lastILOffs == BAD_IL_OFFSET)
{
printf("???");
}
else
{
printf("0x%03X", lastILOffs);
}
printf(" )");
DebugInfo par;
if (stmt->GetDebugInfo().GetParent(&par))
{
printf(" <- ");
par.Dump(true);
}
printf("\n");
}
gtDispTree(stmt->GetRootNode());
}
//------------------------------------------------------------------------
// gtDispBlockStmts: dumps all statements inside `block`.
//
// Arguments:
// block - the block to display statements for.
//
void Compiler::gtDispBlockStmts(BasicBlock* block)
{
for (Statement* const stmt : block->Statements())
{
gtDispStmt(stmt);
printf("\n");
}
}
//------------------------------------------------------------------------
// Compiler::gtDispRange: dumps a range of LIR.
//
// Arguments:
// range - the range of LIR to display.
//
void Compiler::gtDispRange(LIR::ReadOnlyRange const& range)
{
for (GenTree* node : range)
{
gtDispLIRNode(node);
}
}
//------------------------------------------------------------------------
// Compiler::gtDispTreeRange: dumps the LIR range that contains all of the
// nodes in the dataflow tree rooted at a given
// node.
//
// Arguments:
// containingRange - the LIR range that contains the root node.
// tree - the root of the dataflow tree.
//
void Compiler::gtDispTreeRange(LIR::Range& containingRange, GenTree* tree)
{
bool unused;
gtDispRange(containingRange.GetTreeRange(tree, &unused));
}
//------------------------------------------------------------------------
// Compiler::gtDispLIRNode: dumps a single LIR node.
//
// Arguments:
// node - the LIR node to dump.
// prefixMsg - an optional prefix for each line of output.
//
void Compiler::gtDispLIRNode(GenTree* node, const char* prefixMsg /* = nullptr */)
{
auto displayOperand = [](GenTree* operand, const char* message, IndentInfo operandArc, IndentStack& indentStack,
size_t prefixIndent) {
assert(operand != nullptr);
assert(message != nullptr);
if (prefixIndent != 0)
{
printf("%*s", (int)prefixIndent, "");
}
// 50 spaces for alignment
printf("%-50s", "");
#if FEATURE_SET_FLAGS
// additional flag enlarges the flag field by one character
printf(" ");
#endif
indentStack.Push(operandArc);
indentStack.print();
indentStack.Pop();
operandArc = IIArc;
printf(" t%-5d %-6s %s\n", operand->gtTreeID, varTypeName(operand->TypeGet()), message);
};
IndentStack indentStack(this);
size_t prefixIndent = 0;
if (prefixMsg != nullptr)
{
prefixIndent = strlen(prefixMsg);
}
const int bufLength = 256;
char buf[bufLength];
const bool nodeIsCall = node->IsCall();
// Visit operands
IndentInfo operandArc = IIArcTop;
for (GenTree* operand : node->Operands())
{
if (operand->IsArgPlaceHolderNode() || !operand->IsValue())
{
// Either of these situations may happen with calls.
continue;
}
if (nodeIsCall)
{
GenTreeCall* call = node->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
sprintf_s(buf, sizeof(buf), "this in %s", compRegVarName(REG_ARG_0));
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallAddr)
{
displayOperand(operand, "calli tgt", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtControlExpr)
{
displayOperand(operand, "control expr", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallCookie)
{
displayOperand(operand, "cookie", operandArc, indentStack, prefixIndent);
}
else
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByNode(call, operand);
assert(curArgTabEntry);
if (!curArgTabEntry->isLateArg())
{
gtGetArgMsg(call, operand, curArgTabEntry->argNum, buf, sizeof(buf));
}
else
{
gtGetLateArgMsg(call, operand, curArgTabEntry->GetLateArgInx(), buf, sizeof(buf));
}
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_STORE_DYN_BLK))
{
if (operand == node->AsBlk()->Addr())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else if (operand == node->AsBlk()->Data())
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
else
{
assert(operand == node->AsStoreDynBlk()->gtDynamicSize);
displayOperand(operand, "size", operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_ASG))
{
if (operand == node->gtGetOp1())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
}
else
{
displayOperand(operand, "", operandArc, indentStack, prefixIndent);
}
operandArc = IIArc;
}
// Visit the operator
if (prefixMsg != nullptr)
{
printf("%s", prefixMsg);
}
const bool topOnly = true;
const bool isLIR = true;
gtDispTree(node, &indentStack, nullptr, topOnly, isLIR);
}
/*****************************************************************************/
#endif // DEBUG
/*****************************************************************************
*
* Check if the given node can be folded,
* and call the methods to perform the folding
*/
GenTree* Compiler::gtFoldExpr(GenTree* tree)
{
unsigned kind = tree->OperKind();
/* We must have a simple operation to fold */
// If we're in CSE, it's not safe to perform tree
// folding given that it can will potentially
// change considered CSE candidates.
if (optValnumCSE_phase)
{
return tree;
}
if (!(kind & GTK_SMPOP))
{
return tree;
}
GenTree* op1 = tree->AsOp()->gtOp1;
/* Filter out non-foldable trees that can have constant children */
assert(kind & (GTK_UNOP | GTK_BINOP));
switch (tree->gtOper)
{
case GT_RETFILT:
case GT_RETURN:
case GT_IND:
return tree;
default:
break;
}
/* try to fold the current node */
if ((kind & GTK_UNOP) && op1)
{
if (op1->OperIsConst())
{
return gtFoldExprConst(tree);
}
}
else if ((kind & GTK_BINOP) && op1 && tree->AsOp()->gtOp2 &&
// Don't take out conditionals for debugging
(opts.OptimizationEnabled() || !tree->OperIsCompare()))
{
GenTree* op2 = tree->AsOp()->gtOp2;
// The atomic operations are exempted here because they are never computable statically;
// one of their arguments is an address.
if (op1->OperIsConst() && op2->OperIsConst() && !tree->OperIsAtomicOp())
{
/* both nodes are constants - fold the expression */
return gtFoldExprConst(tree);
}
else if (op1->OperIsConst() || op2->OperIsConst())
{
/* at least one is a constant - see if we have a
* special operator that can use only one constant
* to fold - e.g. booleans */
return gtFoldExprSpecial(tree);
}
else if (tree->OperIsCompare())
{
/* comparisons of two local variables can sometimes be folded */
return gtFoldExprCompare(tree);
}
}
/* Return the original node (folded/bashed or not) */
return tree;
}
//------------------------------------------------------------------------
// gtFoldExprCall: see if a call is foldable
//
// Arguments:
// call - call to examine
//
// Returns:
// The original call if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// Checks for calls to Type.op_Equality, Type.op_Inequality, and
// Enum.HasFlag, and if the call is to one of these,
// attempts to optimize.
GenTree* Compiler::gtFoldExprCall(GenTreeCall* call)
{
// Can only fold calls to special intrinsics.
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0)
{
return call;
}
// Defer folding if not optimizing.
if (opts.OptimizationDisabled())
{
return call;
}
// Check for a new-style jit intrinsic.
const NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
switch (ni)
{
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = call->gtCallThisArg->GetNode();
GenTree* flagOp = call->gtCallArgs->GetNode();
GenTree* result = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (result != nullptr)
{
return result;
}
break;
}
case NI_System_Type_op_Equality:
case NI_System_Type_op_Inequality:
{
noway_assert(call->TypeGet() == TYP_INT);
GenTree* op1 = call->gtCallArgs->GetNode();
GenTree* op2 = call->gtCallArgs->GetNext()->GetNode();
// If either operand is known to be a RuntimeType, this can be folded
GenTree* result = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2);
if (result != nullptr)
{
return result;
}
break;
}
default:
break;
}
return call;
}
//------------------------------------------------------------------------
// gtFoldTypeEqualityCall: see if a (potential) type equality call is foldable
//
// Arguments:
// isEq -- is it == or != operator
// op1 -- first argument to call
// op2 -- second argument to call
//
// Returns:
// nulltpr if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// If either operand is known to be a a RuntimeType, then the type
// equality methods will simply check object identity and so we can
// fold the call into a simple compare of the call's operands.
GenTree* Compiler::gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2)
{
if ((gtGetTypeProducerKind(op1) == TPK_Unknown) && (gtGetTypeProducerKind(op2) == TPK_Unknown))
{
return nullptr;
}
const genTreeOps simpleOp = isEq ? GT_EQ : GT_NE;
JITDUMP("\nFolding call to Type:op_%s to a simple compare via %s\n", isEq ? "Equality" : "Inequality",
GenTree::OpName(simpleOp));
GenTree* compare = gtNewOperNode(simpleOp, TYP_INT, op1, op2);
return compare;
}
/*****************************************************************************
*
* Some comparisons can be folded:
*
* locA == locA
* classVarA == classVarA
* locA + locB == locB + locA
*
*/
GenTree* Compiler::gtFoldExprCompare(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
assert(tree->OperIsCompare());
/* Filter out cases that cannot be folded here */
/* Do not fold floats or doubles (e.g. NaN != Nan) */
if (varTypeIsFloating(op1->TypeGet()))
{
return tree;
}
// Currently we can only fold when the two subtrees exactly match
// and everything is side effect free.
//
if (((tree->gtFlags & GTF_SIDE_EFFECT) != 0) || !GenTree::Compare(op1, op2, true))
{
// No folding.
//
return tree;
}
// GTF_ORDER_SIDEEFF here may indicate volatile subtrees.
// Or it may indicate a non-null assertion prop into an indir subtree.
//
// Check the operands.
//
if ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)
{
// If op1 is "volatle" and op2 is not, we can still fold.
//
const bool op1MayBeVolatile = (op1->gtFlags & GTF_ORDER_SIDEEFF) != 0;
const bool op2MayBeVolatile = (op2->gtFlags & GTF_ORDER_SIDEEFF) != 0;
if (!op1MayBeVolatile || op2MayBeVolatile)
{
// No folding.
//
return tree;
}
}
GenTree* cons;
switch (tree->gtOper)
{
case GT_EQ:
case GT_LE:
case GT_GE:
cons = gtNewIconNode(true); /* Folds to GT_CNS_INT(true) */
break;
case GT_NE:
case GT_LT:
case GT_GT:
cons = gtNewIconNode(false); /* Folds to GT_CNS_INT(false) */
break;
default:
assert(!"Unexpected relOp");
return tree;
}
/* The node has beeen folded into 'cons' */
JITDUMP("\nFolding comparison with identical operands:\n");
DISPTREE(tree);
if (fgGlobalMorph)
{
fgMorphTreeDone(cons);
}
else
{
cons->gtNext = tree->gtNext;
cons->gtPrev = tree->gtPrev;
}
JITDUMP("Bashed to %s:\n", cons->AsIntConCommon()->IconValue() ? "true" : "false");
DISPTREE(cons);
return cons;
}
//------------------------------------------------------------------------
// gtCreateHandleCompare: generate a type handle comparison
//
// Arguments:
// oper -- comparison operation (equal/not equal)
// op1 -- first operand
// op2 -- second operand
// typeCheckInliningResult -- indicates how the comparison should happen
//
// Returns:
// Type comparison tree
//
GenTree* Compiler::gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult)
{
// If we can compare pointers directly, just emit the binary operation
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_PASS)
{
return gtNewOperNode(oper, TYP_INT, op1, op2);
}
assert(typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_USE_HELPER);
// Emit a call to a runtime helper
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1, op2);
GenTree* ret = gtNewHelperCallNode(CORINFO_HELP_ARE_TYPES_EQUIVALENT, TYP_INT, helperArgs);
if (oper == GT_EQ)
{
ret = gtNewOperNode(GT_NE, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
else
{
assert(oper == GT_NE);
ret = gtNewOperNode(GT_EQ, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
return ret;
}
//------------------------------------------------------------------------
// gtFoldTypeCompare: see if a type comparison can be further simplified
//
// Arguments:
// tree -- tree possibly comparing types
//
// Returns:
// An alternative tree if folding happens.
// Original tree otherwise.
//
// Notes:
// Checks for
// typeof(...) == obj.GetType()
// typeof(...) == typeof(...)
// obj1.GetType() == obj2.GetType()
//
// And potentially optimizes away the need to obtain actual
// RuntimeType objects to do the comparison.
GenTree* Compiler::gtFoldTypeCompare(GenTree* tree)
{
// Only handle EQ and NE
// (maybe relop vs null someday)
const genTreeOps oper = tree->OperGet();
if ((oper != GT_EQ) && (oper != GT_NE))
{
return tree;
}
// Screen for the right kinds of operands
GenTree* const op1 = tree->AsOp()->gtOp1;
const TypeProducerKind op1Kind = gtGetTypeProducerKind(op1);
if (op1Kind == TPK_Unknown)
{
return tree;
}
GenTree* const op2 = tree->AsOp()->gtOp2;
const TypeProducerKind op2Kind = gtGetTypeProducerKind(op2);
if (op2Kind == TPK_Unknown)
{
return tree;
}
// If both types are created via handles, we can simply compare
// handles instead of the types that they'd create.
if ((op1Kind == TPK_Handle) && (op2Kind == TPK_Handle))
{
JITDUMP("Optimizing compare of types-from-handles to instead compare handles\n");
GenTree* op1ClassFromHandle = tree->AsOp()->gtOp1->AsCall()->gtCallArgs->GetNode();
GenTree* op2ClassFromHandle = tree->AsOp()->gtOp2->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE cls1Hnd = NO_CLASS_HANDLE;
CORINFO_CLASS_HANDLE cls2Hnd = NO_CLASS_HANDLE;
// Try and find class handles from op1 and op2
cls1Hnd = gtGetHelperArgClassHandle(op1ClassFromHandle);
cls2Hnd = gtGetHelperArgClassHandle(op2ClassFromHandle);
// If we have both class handles, try and resolve the type equality test completely.
bool resolveFailed = false;
if ((cls1Hnd != NO_CLASS_HANDLE) && (cls2Hnd != NO_CLASS_HANDLE))
{
JITDUMP("Asking runtime to compare %p (%s) and %p (%s) for equality\n", dspPtr(cls1Hnd),
info.compCompHnd->getClassName(cls1Hnd), dspPtr(cls2Hnd), info.compCompHnd->getClassName(cls2Hnd));
TypeCompareState s = info.compCompHnd->compareTypesForEquality(cls1Hnd, cls2Hnd);
if (s != TypeCompareState::May)
{
// Type comparison result is known.
const bool typesAreEqual = (s == TypeCompareState::Must);
const bool operatorIsEQ = (oper == GT_EQ);
const int compareResult = operatorIsEQ ^ typesAreEqual ? 0 : 1;
JITDUMP("Runtime reports comparison is known at jit time: %u\n", compareResult);
GenTree* result = gtNewIconNode(compareResult);
return result;
}
else
{
resolveFailed = true;
}
}
if (resolveFailed)
{
JITDUMP("Runtime reports comparison is NOT known at jit time\n");
}
else
{
JITDUMP("Could not find handle for %s%s\n", (cls1Hnd == NO_CLASS_HANDLE) ? " cls1" : "",
(cls2Hnd == NO_CLASS_HANDLE) ? " cls2" : "");
}
// We can't answer the equality comparison definitively at jit
// time, but can still simplify the comparison.
//
// Find out how we can compare the two handles.
// NOTE: We're potentially passing NO_CLASS_HANDLE, but the runtime knows what to do with it here.
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(cls1Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
// If the first type needs helper, check the other type: it might be okay with a simple compare.
if (inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER)
{
inliningKind = info.compCompHnd->canInlineTypeCheck(cls2Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
}
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, op1ClassFromHandle, op2ClassFromHandle, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
if ((op1Kind == TPK_GetType) && (op2Kind == TPK_GetType))
{
GenTree* arg1;
if (op1->OperGet() == GT_INTRINSIC)
{
arg1 = op1->AsUnOp()->gtOp1;
}
else
{
arg1 = op1->AsCall()->gtCallThisArg->GetNode();
}
arg1 = gtNewMethodTableLookup(arg1);
GenTree* arg2;
if (op2->OperGet() == GT_INTRINSIC)
{
arg2 = op2->AsUnOp()->gtOp1;
}
else
{
arg2 = op2->AsCall()->gtCallThisArg->GetNode();
}
arg2 = gtNewMethodTableLookup(arg2);
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(nullptr, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, arg1, arg2, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
// If one operand creates a type from a handle and the other operand is fetching the type from an object,
// we can sometimes optimize the type compare into a simpler
// method table comparison.
//
// TODO: if other operand is null...
if (!(((op1Kind == TPK_GetType) && (op2Kind == TPK_Handle)) ||
((op1Kind == TPK_Handle) && (op2Kind == TPK_GetType))))
{
return tree;
}
GenTree* const opHandle = (op1Kind == TPK_Handle) ? op1 : op2;
GenTree* const opOther = (op1Kind == TPK_Handle) ? op2 : op1;
// Tunnel through the handle operand to get at the class handle involved.
GenTree* const opHandleArgument = opHandle->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE clsHnd = gtGetHelperArgClassHandle(opHandleArgument);
// If we couldn't find the class handle, give up.
if (clsHnd == NO_CLASS_HANDLE)
{
return tree;
}
// Ask the VM if this type can be equality tested by a simple method
// table comparison.
CorInfoInlineTypeCheck typeCheckInliningResult =
info.compCompHnd->canInlineTypeCheck(clsHnd, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_NONE)
{
return tree;
}
// We're good to go.
JITDUMP("Optimizing compare of obj.GetType()"
" and type-from-handle to compare method table pointer\n");
// opHandleArgument is the method table we're looking for.
GenTree* const knownMT = opHandleArgument;
// Fetch object method table from the object itself.
GenTree* objOp = nullptr;
// Note we may see intrinsified or regular calls to GetType
if (opOther->OperGet() == GT_INTRINSIC)
{
objOp = opOther->AsUnOp()->gtOp1;
}
else
{
objOp = opOther->AsCall()->gtCallThisArg->GetNode();
}
bool pIsExact = false;
bool pIsNonNull = false;
CORINFO_CLASS_HANDLE objCls = gtGetClassHandle(objOp, &pIsExact, &pIsNonNull);
// if both classes are "final" (e.g. System.String[]) we can replace the comparison
// with `true/false` + null check.
if ((objCls != NO_CLASS_HANDLE) && (pIsExact || impIsClassExact(objCls)))
{
TypeCompareState tcs = info.compCompHnd->compareTypesForEquality(objCls, clsHnd);
if (tcs != TypeCompareState::May)
{
const bool operatorIsEQ = oper == GT_EQ;
const bool typesAreEqual = tcs == TypeCompareState::Must;
GenTree* compareResult = gtNewIconNode((operatorIsEQ ^ typesAreEqual) ? 0 : 1);
if (!pIsNonNull)
{
// we still have to emit a null-check
// obj.GetType == typeof() -> (nullcheck) true/false
GenTree* nullcheck = gtNewNullCheck(objOp, compCurBB);
return gtNewOperNode(GT_COMMA, tree->TypeGet(), nullcheck, compareResult);
}
else if (objOp->gtFlags & GTF_ALL_EFFECT)
{
return gtNewOperNode(GT_COMMA, tree->TypeGet(), objOp, compareResult);
}
else
{
return compareResult;
}
}
}
// Fetch the method table from the object
GenTree* const objMT = gtNewMethodTableLookup(objOp);
// Compare the two method tables
GenTree* const compare = gtCreateHandleCompare(oper, objMT, knownMT, typeCheckInliningResult);
// Drop any now irrelevant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// And we're done
return compare;
}
//------------------------------------------------------------------------
// gtGetHelperArgClassHandle: find the compile time class handle from
// a helper call argument tree
//
// Arguments:
// tree - tree that passes the handle to the helper
//
// Returns:
// The compile time class handle if known.
//
CORINFO_CLASS_HANDLE Compiler::gtGetHelperArgClassHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE result = NO_CLASS_HANDLE;
// Walk through any wrapping nop.
if ((tree->gtOper == GT_NOP) && (tree->gtType == TYP_I_IMPL))
{
tree = tree->AsOp()->gtOp1;
}
// The handle could be a literal constant
if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() == TYP_I_IMPL))
{
assert(tree->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)tree->AsIntCon()->gtCompileTimeHandle;
}
// Or the result of a runtime lookup
else if (tree->OperGet() == GT_RUNTIMELOOKUP)
{
result = tree->AsRuntimeLookup()->GetClassHandle();
}
// Or something reached indirectly
else if (tree->gtOper == GT_IND)
{
// The handle indirs we are looking for will be marked as non-faulting.
// Certain others (eg from refanytype) may not be.
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
GenTree* handleTreeInternal = tree->AsOp()->gtOp1;
if ((handleTreeInternal->OperGet() == GT_CNS_INT) && (handleTreeInternal->TypeGet() == TYP_I_IMPL))
{
// These handle constants should be class handles.
assert(handleTreeInternal->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)handleTreeInternal->AsIntCon()->gtCompileTimeHandle;
}
}
}
return result;
}
//------------------------------------------------------------------------
// gtFoldExprSpecial -- optimize binary ops with one constant operand
//
// Arguments:
// tree - tree to optimize
//
// Return value:
// Tree (possibly modified at root or below), or a new tree
// Any new tree is fully morphed, if necessary.
//
GenTree* Compiler::gtFoldExprSpecial(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
genTreeOps oper = tree->OperGet();
GenTree* op;
GenTree* cons;
ssize_t val;
assert(tree->OperKind() & GTK_BINOP);
/* Filter out operators that cannot be folded here */
if (oper == GT_CAST)
{
return tree;
}
/* We only consider TYP_INT for folding
* Do not fold pointer arithmetic (e.g. addressing modes!) */
if (oper != GT_QMARK && !varTypeIsIntOrI(tree->gtType))
{
return tree;
}
/* Find out which is the constant node */
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
/* Get the constant value */
val = cons->AsIntConCommon()->IconValue();
// Transforms that would drop op cannot be performed if op has side effects
bool opHasSideEffects = (op->gtFlags & GTF_SIDE_EFFECT) != 0;
// Helper function that creates a new IntCon node and morphs it, if required
auto NewMorphedIntConNode = [&](int value) -> GenTreeIntCon* {
GenTreeIntCon* icon = gtNewIconNode(value);
if (fgGlobalMorph)
{
fgMorphTreeDone(icon);
}
return icon;
};
// Here `op` is the non-constant operand, `cons` is the constant operand
// and `val` is the constant value.
switch (oper)
{
case GT_LE:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 <= x) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_GE:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x >= 0) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_LT:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x < 0) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
break;
case GT_GT:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 > x) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
FALLTHROUGH;
case GT_EQ:
case GT_NE:
// Optimize boxed value classes; these are always false. This IL is
// generated when a generic value is tested against null:
// <T> ... foo(T x) { ... if ((object)x == null) ...
if ((val == 0) && op->IsBoxedValue())
{
JITDUMP("\nAttempting to optimize BOX(valueType) %s null [%06u]\n", GenTree::OpName(oper),
dspTreeID(tree));
// We don't expect GT_GT with signed compares, and we
// can't predict the result if we do see it, since the
// boxed object addr could have its high bit set.
if ((oper == GT_GT) && !tree->IsUnsigned())
{
JITDUMP(" bailing; unexpected signed compare via GT_GT\n");
}
else
{
// The tree under the box must be side effect free
// since we will drop it if we optimize.
assert(!gtTreeHasSideEffects(op->AsBox()->BoxOp(), GTF_SIDE_EFFECT));
// See if we can optimize away the box and related statements.
GenTree* boxSourceTree = gtTryRemoveBoxUpstreamEffects(op);
bool didOptimize = (boxSourceTree != nullptr);
// If optimization succeeded, remove the box.
if (didOptimize)
{
// Set up the result of the compare.
int compareResult = 0;
if (oper == GT_GT)
{
// GT_GT(null, box) == false
// GT_GT(box, null) == true
compareResult = (op1 == op);
}
else if (oper == GT_EQ)
{
// GT_EQ(box, null) == false
// GT_EQ(null, box) == false
compareResult = 0;
}
else
{
assert(oper == GT_NE);
// GT_NE(box, null) == true
// GT_NE(null, box) == true
compareResult = 1;
}
JITDUMP("\nSuccess: replacing BOX(valueType) %s null with %d\n", GenTree::OpName(oper),
compareResult);
return NewMorphedIntConNode(compareResult);
}
}
}
else
{
return gtFoldBoxNullable(tree);
}
break;
case GT_ADD:
if (val == 0)
{
goto DONE_FOLD;
}
break;
case GT_MUL:
if (val == 1)
{
goto DONE_FOLD;
}
else if (val == 0)
{
/* Multiply by zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_DIV:
case GT_UDIV:
if ((op2 == cons) && (val == 1) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_SUB:
if ((op2 == cons) && (val == 0) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_AND:
if (val == 0)
{
/* AND with zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
else
{
/* The GTF_BOOLEAN flag is set for nodes that are part
* of a boolean expression, thus all their children
* are known to evaluate to only 0 or 1 */
if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1
* AND with 1 stays the same */
assert(val == 1);
goto DONE_FOLD;
}
}
break;
case GT_OR:
if (val == 0)
{
goto DONE_FOLD;
}
else if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1 - OR with 1 is 1 */
assert(val == 1);
/* OR with one - return the 'one' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
if (val == 0)
{
if (op2 == cons)
{
goto DONE_FOLD;
}
else if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_QMARK:
{
assert(op1 == cons && op2 == op && op2->gtOper == GT_COLON);
assert(op2->AsOp()->gtOp1 && op2->AsOp()->gtOp2);
assert(val == 0 || val == 1);
if (val)
{
op = op2->AsColon()->ThenNode();
}
else
{
op = op2->AsColon()->ElseNode();
}
// Clear colon flags only if the qmark itself is not conditionaly executed
if ((tree->gtFlags & GTF_COLON_COND) == 0)
{
fgWalkTreePre(&op, gtClearColonCond);
}
}
goto DONE_FOLD;
default:
break;
}
/* The node is not foldable */
return tree;
DONE_FOLD:
/* The node has beeen folded into 'op' */
// If there was an assigment update, we just morphed it into
// a use, update the flags appropriately
if (op->gtOper == GT_LCL_VAR)
{
assert(tree->OperIs(GT_ASG) || (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_DEF)) == 0);
op->gtFlags &= ~(GTF_VAR_USEASG | GTF_VAR_DEF);
}
JITDUMP("\nFolding binary operator with a constant operand:\n");
DISPTREE(tree);
JITDUMP("Transformed into:\n");
DISPTREE(op);
return op;
}
//------------------------------------------------------------------------
// gtFoldBoxNullable -- optimize a boxed nullable feeding a compare to zero
//
// Arguments:
// tree - binop tree to potentially optimize, must be
// GT_GT, GT_EQ, or GT_NE
//
// Return value:
// Tree (possibly modified below the root).
//
GenTree* Compiler::gtFoldBoxNullable(GenTree* tree)
{
assert(tree->OperKind() & GTK_BINOP);
assert(tree->OperIs(GT_GT, GT_EQ, GT_NE));
genTreeOps const oper = tree->OperGet();
if ((oper == GT_GT) && !tree->IsUnsigned())
{
return tree;
}
GenTree* const op1 = tree->AsOp()->gtOp1;
GenTree* const op2 = tree->AsOp()->gtOp2;
GenTree* op;
GenTree* cons;
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
ssize_t const val = cons->AsIntConCommon()->IconValue();
if (val != 0)
{
return tree;
}
if (!op->IsCall())
{
return tree;
}
GenTreeCall* const call = op->AsCall();
if (!call->IsHelperCall(this, CORINFO_HELP_BOX_NULLABLE))
{
return tree;
}
JITDUMP("\nAttempting to optimize BOX_NULLABLE(&x) %s null [%06u]\n", GenTree::OpName(oper), dspTreeID(tree));
// Get the address of the struct being boxed
GenTree* const arg = call->gtCallArgs->GetNext()->GetNode();
if (arg->OperIs(GT_ADDR) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
CORINFO_CLASS_HANDLE nullableHnd = gtGetStructHandle(arg->AsOp()->gtOp1);
CORINFO_FIELD_HANDLE fieldHnd = info.compCompHnd->getFieldInClass(nullableHnd, 0);
// Replace the box with an access of the nullable 'hasValue' field.
JITDUMP("\nSuccess: replacing BOX_NULLABLE(&x) [%06u] with x.hasValue\n", dspTreeID(op));
GenTree* newOp = gtNewFieldRef(TYP_BOOL, fieldHnd, arg, 0);
if (op == op1)
{
tree->AsOp()->gtOp1 = newOp;
}
else
{
tree->AsOp()->gtOp2 = newOp;
}
cons->gtType = TYP_INT;
}
return tree;
}
//------------------------------------------------------------------------
// gtTryRemoveBoxUpstreamEffects: given an unused value type box,
// try and remove the upstream allocation and unnecessary parts of
// the copy.
//
// Arguments:
// op - the box node to optimize
// options - controls whether and how trees are modified
// (see notes)
//
// Return Value:
// A tree representing the original value to box, if removal
// is successful/possible (but see note). nullptr if removal fails.
//
// Notes:
// Value typed box gets special treatment because it has associated
// side effects that can be removed if the box result is not used.
//
// By default (options == BR_REMOVE_AND_NARROW) this method will
// try and remove unnecessary trees and will try and reduce remaning
// operations to the minimal set, possibly narrowing the width of
// loads from the box source if it is a struct.
//
// To perform a trial removal, pass BR_DONT_REMOVE. This can be
// useful to determine if this optimization should only be
// performed if some other conditions hold true.
//
// To remove but not alter the access to the box source, pass
// BR_REMOVE_BUT_NOT_NARROW.
//
// To remove and return the tree for the type handle used for
// the boxed newobj, pass BR_REMOVE_BUT_NOT_NARROW_WANT_TYPE_HANDLE.
// This can be useful when the only part of the box that is "live"
// is its type.
//
// If removal fails, is is possible that a subsequent pass may be
// able to optimize. Blocking side effects may now be minimized
// (null or bounds checks might have been removed) or might be
// better known (inline return placeholder updated with the actual
// return expression). So the box is perhaps best left as is to
// help trigger this re-examination.
GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions options)
{
assert(op->IsBoxedValue());
// grab related parts for the optimization
GenTreeBox* box = op->AsBox();
Statement* asgStmt = box->gtAsgStmtWhenInlinedBoxValue;
Statement* copyStmt = box->gtCopyStmtWhenInlinedBoxValue;
JITDUMP("gtTryRemoveBoxUpstreamEffects: %s to %s of BOX (valuetype)"
" [%06u] (assign/newobj " FMT_STMT " copy " FMT_STMT "\n",
(options == BR_DONT_REMOVE) ? "checking if it is possible" : "attempting",
(options == BR_MAKE_LOCAL_COPY) ? "make local unboxed version" : "remove side effects", dspTreeID(op),
asgStmt->GetID(), copyStmt->GetID());
// If we don't recognize the form of the assign, bail.
GenTree* asg = asgStmt->GetRootNode();
if (asg->gtOper != GT_ASG)
{
JITDUMP(" bailing; unexpected assignment op %s\n", GenTree::OpName(asg->gtOper));
return nullptr;
}
// If we're eventually going to return the type handle, remember it now.
GenTree* boxTypeHandle = nullptr;
if ((options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE) || (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE))
{
GenTree* asgSrc = asg->AsOp()->gtOp2;
genTreeOps asgSrcOper = asgSrc->OperGet();
// Allocation may be via AllocObj or via helper call, depending
// on when this is invoked and whether the jit is using AllocObj
// for R2R allocations.
if (asgSrcOper == GT_ALLOCOBJ)
{
GenTreeAllocObj* allocObj = asgSrc->AsAllocObj();
boxTypeHandle = allocObj->AsOp()->gtOp1;
}
else if (asgSrcOper == GT_CALL)
{
GenTreeCall* newobjCall = asgSrc->AsCall();
GenTreeCall::Use* newobjArgs = newobjCall->gtCallArgs;
// In R2R expansions the handle may not be an explicit operand to the helper,
// so we can't remove the box.
if (newobjArgs == nullptr)
{
assert(newobjCall->IsHelperCall(this, CORINFO_HELP_READYTORUN_NEW));
JITDUMP(" bailing; newobj via R2R helper\n");
return nullptr;
}
boxTypeHandle = newobjArgs->GetNode();
}
else
{
unreached();
}
assert(boxTypeHandle != nullptr);
}
// If we don't recognize the form of the copy, bail.
GenTree* copy = copyStmt->GetRootNode();
if (copy->gtOper != GT_ASG)
{
// GT_RET_EXPR is a tolerable temporary failure.
// The jit will revisit this optimization after
// inlining is done.
if (copy->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy %s\n", GenTree::OpName(copy->gtOper));
}
else
{
// Anything else is a missed case we should
// figure out how to handle. One known case
// is GT_COMMAs enclosing the GT_ASG we are
// looking for.
JITDUMP(" bailing; unexpected copy op %s\n", GenTree::OpName(copy->gtOper));
}
return nullptr;
}
// Handle case where we are optimizing the box into a local copy
if (options == BR_MAKE_LOCAL_COPY)
{
// Drill into the box to get at the box temp local and the box type
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
assert(lvaTable[boxTempLcl].lvType == TYP_REF);
CORINFO_CLASS_HANDLE boxClass = lvaTable[boxTempLcl].lvClassHnd;
assert(boxClass != nullptr);
// Verify that the copyDst has the expected shape
// (blk|obj|ind (add (boxTempLcl, ptr-size)))
//
// The shape here is constrained to the patterns we produce
// over in impImportAndPushBox for the inlined box case.
GenTree* copyDst = copy->AsOp()->gtOp1;
if (!copyDst->OperIs(GT_BLK, GT_IND, GT_OBJ))
{
JITDUMP("Unexpected copy dest operator %s\n", GenTree::OpName(copyDst->gtOper));
return nullptr;
}
GenTree* copyDstAddr = copyDst->AsOp()->gtOp1;
if (copyDstAddr->OperGet() != GT_ADD)
{
JITDUMP("Unexpected copy dest address tree\n");
return nullptr;
}
GenTree* copyDstAddrOp1 = copyDstAddr->AsOp()->gtOp1;
if ((copyDstAddrOp1->OperGet() != GT_LCL_VAR) || (copyDstAddrOp1->AsLclVarCommon()->GetLclNum() != boxTempLcl))
{
JITDUMP("Unexpected copy dest address 1st addend\n");
return nullptr;
}
GenTree* copyDstAddrOp2 = copyDstAddr->AsOp()->gtOp2;
if (!copyDstAddrOp2->IsIntegralConst(TARGET_POINTER_SIZE))
{
JITDUMP("Unexpected copy dest address 2nd addend\n");
return nullptr;
}
// Screening checks have all passed. Do the transformation.
//
// Retype the box temp to be a struct
JITDUMP("Retyping box temp V%02u to struct %s\n", boxTempLcl, eeGetClassName(boxClass));
lvaTable[boxTempLcl].lvType = TYP_UNDEF;
const bool isUnsafeValueClass = false;
lvaSetStruct(boxTempLcl, boxClass, isUnsafeValueClass);
var_types boxTempType = lvaTable[boxTempLcl].lvType;
// Remove the newobj and assigment to box temp
JITDUMP("Bashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Update the copy from the value to be boxed to the box temp
GenTree* newDst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
copyDst->AsOp()->gtOp1 = newDst;
// Return the address of the now-struct typed box temp
GenTree* retValue = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
return retValue;
}
// If the copy is a struct copy, make sure we know how to isolate
// any source side effects.
GenTree* copySrc = copy->AsOp()->gtOp2;
// If the copy source is from a pending inline, wait for it to resolve.
if (copySrc->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy source %s\n", GenTree::OpName(copySrc->gtOper));
return nullptr;
}
bool hasSrcSideEffect = false;
bool isStructCopy = false;
if (gtTreeHasSideEffects(copySrc, GTF_SIDE_EFFECT))
{
hasSrcSideEffect = true;
if (varTypeIsStruct(copySrc->gtType))
{
isStructCopy = true;
if ((copySrc->gtOper != GT_OBJ) && (copySrc->gtOper != GT_IND) && (copySrc->gtOper != GT_FIELD))
{
// We don't know how to handle other cases, yet.
JITDUMP(" bailing; unexpected copy source struct op with side effect %s\n",
GenTree::OpName(copySrc->gtOper));
return nullptr;
}
}
}
// If this was a trial removal, we're done.
if (options == BR_DONT_REMOVE)
{
return copySrc;
}
if (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
// Otherwise, proceed with the optimization.
//
// Change the assignment expression to a NOP.
JITDUMP("\nBashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Change the copy expression so it preserves key
// source side effects.
JITDUMP("\nBashing COPY [%06u]", dspTreeID(copy));
if (!hasSrcSideEffect)
{
// If there were no copy source side effects just bash
// the copy to a NOP.
copy->gtBashToNOP();
JITDUMP(" to NOP; no source side effects.\n");
}
else if (!isStructCopy)
{
// For scalar types, go ahead and produce the
// value as the copy is fairly cheap and likely
// the optimizer can trim things down to just the
// minimal side effect parts.
copyStmt->SetRootNode(copySrc);
JITDUMP(" to scalar read via [%06u]\n", dspTreeID(copySrc));
}
else
{
// For struct types read the first byte of the
// source struct; there's no need to read the
// entire thing, and no place to put it.
assert(copySrc->OperIs(GT_OBJ, GT_IND, GT_FIELD));
copyStmt->SetRootNode(copySrc);
if (options == BR_REMOVE_AND_NARROW || options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
JITDUMP(" to read first byte of struct via modified [%06u]\n", dspTreeID(copySrc));
gtChangeOperToNullCheck(copySrc, compCurBB);
}
else
{
JITDUMP(" to read entire struct via modified [%06u]\n", dspTreeID(copySrc));
}
}
if (fgStmtListThreaded)
{
fgSetStmtSeq(asgStmt);
fgSetStmtSeq(copyStmt);
}
// Box effects were successfully optimized.
if (options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
else
{
return copySrc;
}
}
//------------------------------------------------------------------------
// gtOptimizeEnumHasFlag: given the operands for a call to Enum.HasFlag,
// try and optimize the call to a simple and/compare tree.
//
// Arguments:
// thisOp - first argument to the call
// flagOp - second argument to the call
//
// Return Value:
// A new cmp/amd tree if successful. nullptr on failure.
//
// Notes:
// If successful, may allocate new temps and modify connected
// statements.
GenTree* Compiler::gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp)
{
JITDUMP("Considering optimizing call to Enum.HasFlag....\n");
// Operands must be boxes
if (!thisOp->IsBoxedValue() || !flagOp->IsBoxedValue())
{
JITDUMP("bailing, need both inputs to be BOXes\n");
return nullptr;
}
// Operands must have same type
bool isExactThis = false;
bool isNonNullThis = false;
CORINFO_CLASS_HANDLE thisHnd = gtGetClassHandle(thisOp, &isExactThis, &isNonNullThis);
if (thisHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'this' operand\n");
return nullptr;
}
// A boxed thisOp should have exact type and non-null instance
assert(isExactThis);
assert(isNonNullThis);
bool isExactFlag = false;
bool isNonNullFlag = false;
CORINFO_CLASS_HANDLE flagHnd = gtGetClassHandle(flagOp, &isExactFlag, &isNonNullFlag);
if (flagHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'flag' operand\n");
return nullptr;
}
// A boxed flagOp should have exact type and non-null instance
assert(isExactFlag);
assert(isNonNullFlag);
if (flagHnd != thisHnd)
{
JITDUMP("bailing, operand types differ\n");
return nullptr;
}
// If we have a shared type instance we can't safely check type
// equality, so bail.
DWORD classAttribs = info.compCompHnd->getClassAttribs(thisHnd);
if (classAttribs & CORINFO_FLG_SHAREDINST)
{
JITDUMP("bailing, have shared instance type\n");
return nullptr;
}
// Simulate removing the box for thisOP. We need to know that it can
// be safely removed before we can optimize.
GenTree* thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_DONT_REMOVE);
if (thisVal == nullptr)
{
// Note we may fail here if the this operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'this' operand\n");
return nullptr;
}
// Do likewise with flagOp.
GenTree* flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_DONT_REMOVE);
if (flagVal == nullptr)
{
// Note we may fail here if the flag operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'flag' operand\n");
return nullptr;
}
// Only proceed when both box sources have the same actual type.
// (this rules out long/int mismatches)
if (genActualType(thisVal->TypeGet()) != genActualType(flagVal->TypeGet()))
{
JITDUMP("bailing, pre-boxed values have different types\n");
return nullptr;
}
// Yes, both boxes can be cleaned up. Optimize.
JITDUMP("Optimizing call to Enum.HasFlag\n");
// Undo the boxing of the Ops and prepare to operate directly
// on the pre-boxed values.
thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_REMOVE_BUT_NOT_NARROW);
flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_REMOVE_BUT_NOT_NARROW);
// Our trial removals above should guarantee successful removals here.
assert(thisVal != nullptr);
assert(flagVal != nullptr);
assert(genActualType(thisVal->TypeGet()) == genActualType(flagVal->TypeGet()));
// Type to use for optimized check
var_types type = genActualType(thisVal->TypeGet());
// The thisVal and flagVal trees come from earlier statements.
//
// Unless they are invariant values, we need to evaluate them both
// to temps at those points to safely transmit the values here.
//
// Also we need to use the flag twice, so we need two trees for it.
GenTree* thisValOpt = nullptr;
GenTree* flagValOpt = nullptr;
GenTree* flagValOptCopy = nullptr;
if (thisVal->IsIntegralConst())
{
thisValOpt = gtClone(thisVal);
assert(thisValOpt != nullptr);
}
else
{
const unsigned thisTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag this temp"));
GenTree* thisAsg = gtNewTempAssign(thisTmp, thisVal);
Statement* thisAsgStmt = thisOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
thisAsgStmt->SetRootNode(thisAsg);
thisValOpt = gtNewLclvNode(thisTmp, type);
}
if (flagVal->IsIntegralConst())
{
flagValOpt = gtClone(flagVal);
assert(flagValOpt != nullptr);
flagValOptCopy = gtClone(flagVal);
assert(flagValOptCopy != nullptr);
}
else
{
const unsigned flagTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag flag temp"));
GenTree* flagAsg = gtNewTempAssign(flagTmp, flagVal);
Statement* flagAsgStmt = flagOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
flagAsgStmt->SetRootNode(flagAsg);
flagValOpt = gtNewLclvNode(flagTmp, type);
flagValOptCopy = gtNewLclvNode(flagTmp, type);
}
// Turn the call into (thisValTmp & flagTmp) == flagTmp.
GenTree* andTree = gtNewOperNode(GT_AND, type, thisValOpt, flagValOpt);
GenTree* cmpTree = gtNewOperNode(GT_EQ, TYP_INT, andTree, flagValOptCopy);
JITDUMP("Optimized call to Enum.HasFlag\n");
return cmpTree;
}
/*****************************************************************************
*
* Fold the given constant tree.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::gtFoldExprConst(GenTree* tree)
{
SSIZE_T i1, i2, itemp;
INT64 lval1, lval2, ltemp;
float f1, f2;
double d1, d2;
var_types switchType;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
assert(tree->OperIsUnary() || tree->OperIsBinary());
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2IfPresent();
if (!opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
return tree;
}
if (tree->OperIs(GT_NOP, GT_ALLOCOBJ, GT_RUNTIMELOOKUP))
{
return tree;
}
// This condition exists to preserve previous behavior.
// TODO-CQ: enable folding for bounds checks nodes.
if (tree->OperIs(GT_BOUNDS_CHECK))
{
return tree;
}
#ifdef FEATURE_SIMD
if (tree->OperIs(GT_SIMD))
{
return tree;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (tree->OperIs(GT_HWINTRINSIC))
{
return tree;
}
#endif
if (tree->OperIsUnary())
{
assert(op1->OperIsConst());
switch (op1->TypeGet())
{
case TYP_INT:
// Fold constant INT unary operator.
if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = (INT32)op1->AsIntCon()->IconValue();
// If we fold a unary oper, then the folded constant
// is considered a ConstantIndexField if op1 was one.
if ((op1->AsIntCon()->gtFieldSeq != nullptr) && op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
switch (tree->OperGet())
{
case GT_NOT:
i1 = ~i1;
break;
case GT_NEG:
i1 = -i1;
break;
case GT_BSWAP:
i1 = ((i1 >> 24) & 0xFF) | ((i1 >> 8) & 0xFF00) | ((i1 << 8) & 0xFF0000) |
((i1 << 24) & 0xFF000000);
break;
case GT_BSWAP16:
i1 = ((i1 >> 8) & 0xFF) | ((i1 << 8) & 0xFF00);
break;
case GT_CAST:
// assert (genActualType(tree->CastToType()) == tree->TypeGet());
if (tree->gtOverflow() &&
CheckedOps::CastFromIntOverflows((INT32)i1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(i1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(i1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(i1));
goto CNS_INT;
case TYP_BOOL:
case TYP_UBYTE:
i1 = INT32(UINT8(i1));
goto CNS_INT;
case TYP_UINT:
case TYP_INT:
goto CNS_INT;
case TYP_ULONG:
if (tree->IsUnsigned())
{
lval1 = UINT64(UINT32(i1));
}
else
{
lval1 = UINT64(INT32(i1));
}
goto CNS_LONG;
case TYP_LONG:
if (tree->IsUnsigned())
{
lval1 = INT64(UINT32(i1));
}
else
{
lval1 = INT64(INT32(i1));
}
goto CNS_LONG;
case TYP_FLOAT:
if (tree->IsUnsigned())
{
f1 = forceCastToFloat(UINT32(i1));
}
else
{
f1 = forceCastToFloat(INT32(i1));
}
d1 = f1;
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (tree->IsUnsigned())
{
d1 = (double)UINT32(i1);
}
else
{
d1 = (double)INT32(i1);
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from int");
return tree;
}
default:
return tree;
}
goto CNS_INT;
case TYP_LONG:
// Fold constant LONG unary operator.
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
switch (tree->OperGet())
{
case GT_NOT:
lval1 = ~lval1;
break;
case GT_NEG:
lval1 = -lval1;
break;
case GT_BSWAP:
lval1 = ((lval1 >> 56) & 0xFF) | ((lval1 >> 40) & 0xFF00) | ((lval1 >> 24) & 0xFF0000) |
((lval1 >> 8) & 0xFF000000) | ((lval1 << 8) & 0xFF00000000) |
((lval1 << 24) & 0xFF0000000000) | ((lval1 << 40) & 0xFF000000000000) |
((lval1 << 56) & 0xFF00000000000000);
break;
case GT_CAST:
assert(tree->TypeIs(genActualType(tree->CastToType())));
if (tree->gtOverflow() &&
CheckedOps::CastFromLongOverflows(lval1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(lval1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(lval1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(lval1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(lval1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(lval1);
goto CNS_INT;
case TYP_UINT:
i1 = UINT32(lval1);
goto CNS_INT;
case TYP_ULONG:
case TYP_LONG:
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->IsUnsigned() && (lval1 < 0))
{
d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
}
else
{
d1 = (double)lval1;
}
if (tree->CastToType() == TYP_FLOAT)
{
f1 = forceCastToFloat(d1); // truncate precision
d1 = f1;
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from long");
return tree;
}
default:
return tree;
}
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
assert(op1->OperIs(GT_CNS_DBL));
// Fold constant DOUBLE unary operator.
d1 = op1->AsDblCon()->gtDconVal;
switch (tree->OperGet())
{
case GT_NEG:
d1 = -d1;
break;
case GT_CAST:
f1 = forceCastToFloat(d1);
if ((op1->TypeIs(TYP_DOUBLE) && CheckedOps::CastFromDoubleOverflows(d1, tree->CastToType())) ||
(op1->TypeIs(TYP_FLOAT) && CheckedOps::CastFromFloatOverflows(f1, tree->CastToType())))
{
// The conversion overflows. The ECMA spec says, in III 3.27, that
// "...if overflow occurs converting a floating point type to an integer, ...,
// the value returned is unspecified." However, it would at least be
// desirable to have the same value returned for casting an overflowing
// constant to an int as would be obtained by passing that constant as
// a parameter and then casting that parameter to an int type.
// Don't fold overflowing converions, as the value returned by
// JIT's codegen doesn't always match with the C compiler's cast result.
// We want the behavior to be the same with or without folding.
return tree;
}
assert(tree->TypeIs(genActualType(tree->CastToType())));
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(d1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(d1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(d1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(d1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(d1);
goto CNS_INT;
case TYP_UINT:
i1 = forceCastToUInt32(d1);
goto CNS_INT;
case TYP_LONG:
lval1 = INT64(d1);
goto CNS_LONG;
case TYP_ULONG:
lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
goto CNS_LONG;
case TYP_FLOAT:
d1 = forceCastToFloat(d1);
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (op1->TypeIs(TYP_FLOAT))
{
d1 = forceCastToFloat(d1); // Truncate precision.
}
goto CNS_DOUBLE; // Redundant cast.
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from double/float");
break;
}
return tree;
default:
return tree;
}
goto CNS_DOUBLE;
default:
// Not a foldable typ - e.g. RET const.
return tree;
}
}
// We have a binary operator.
assert(tree->OperIsBinary());
assert(op2 != nullptr);
assert(op1->OperIsConst());
assert(op2->OperIsConst());
if (tree->OperIs(GT_COMMA))
{
return op2;
}
switchType = op1->TypeGet();
// Normally we will just switch on op1 types, but for the case where
// only op2 is a GC type and op1 is not a GC type, we use the op2 type.
// This makes us handle this as a case of folding for GC type.
if (varTypeIsGC(op2->gtType) && !varTypeIsGC(op1->gtType))
{
switchType = op2->TypeGet();
}
switch (switchType)
{
// Fold constant REF of BYREF binary operator.
// These can only be comparisons or null pointers.
case TYP_REF:
// String nodes are an RVA at this point.
if (op1->OperIs(GT_CNS_STR) || op2->OperIs(GT_CNS_STR))
{
// Fold "ldstr" ==/!= null.
if (op2->IsIntegralConst(0))
{
if (tree->OperIs(GT_EQ))
{
i1 = 0;
goto FOLD_COND;
}
if (tree->OperIs(GT_NE) || (tree->OperIs(GT_GT) && tree->IsUnsigned()))
{
i1 = 1;
goto FOLD_COND;
}
}
return tree;
}
FALLTHROUGH;
case TYP_BYREF:
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (i1 == i2);
goto FOLD_COND;
case GT_NE:
i1 = (i1 != i2);
goto FOLD_COND;
case GT_ADD:
noway_assert(!tree->TypeIs(TYP_REF));
// We only fold a GT_ADD that involves a null reference.
if ((op1->TypeIs(TYP_REF) && (i1 == 0)) || (op2->TypeIs(TYP_REF) && (i2 == 0)))
{
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Fold into GT_IND of null byref.
tree->BashToConst(0, TYP_BYREF);
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("\nFolded to null byref:\n");
DISPTREE(tree);
goto DONE;
}
break;
default:
break;
}
return tree;
// Fold constant INT binary operator.
case TYP_INT:
assert(tree->TypeIs(TYP_INT) || varTypeIsGC(tree) || tree->OperIs(GT_MKREFANY));
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (INT32(i1) == INT32(i2));
break;
case GT_NE:
i1 = (INT32(i1) != INT32(i2));
break;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) < UINT32(i2));
}
else
{
i1 = (INT32(i1) < INT32(i2));
}
break;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) <= UINT32(i2));
}
else
{
i1 = (INT32(i1) <= INT32(i2));
}
break;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) >= UINT32(i2));
}
else
{
i1 = (INT32(i1) >= INT32(i2));
}
break;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) > UINT32(i2));
}
else
{
i1 = (INT32(i1) > INT32(i2));
}
break;
case GT_ADD:
itemp = i1 + i2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
break;
case GT_SUB:
itemp = i1 - i2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
break;
case GT_MUL:
itemp = i1 * i2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
// For the very particular case of the "constant array index" pseudo-field, we
// assume that multiplication is by the field width, and preserves that field.
// This could obviously be made more robust by a more complicated set of annotations...
if ((op1->AsIntCon()->gtFieldSeq != nullptr) &&
op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
else if ((op2->AsIntCon()->gtFieldSeq != nullptr) &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op1->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op2->AsIntCon()->gtFieldSeq;
}
i1 = itemp;
break;
case GT_OR:
i1 |= i2;
break;
case GT_XOR:
i1 ^= i2;
break;
case GT_AND:
i1 &= i2;
break;
case GT_LSH:
i1 <<= (i2 & 0x1f);
break;
case GT_RSH:
i1 >>= (i2 & 0x1f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
i1 = UINT32(i1) >> (i2 & 0x1f);
break;
case GT_ROL:
i1 = (i1 << (i2 & 0x1f)) | (UINT32(i1) >> ((32 - i2) & 0x1f));
break;
case GT_ROR:
i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f));
break;
// DIV and MOD can throw an exception - if the division is by 0
// or there is overflow - when dividing MIN by -1.
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (INT32(i2) == 0)
{
// Division by zero.
// We have to evaluate this expression and throw an exception.
return tree;
}
else if ((INT32(i2) == -1) && (UINT32(i1) == 0x80000000))
{
// Overflow Division.
// We have to evaluate this expression and throw an exception.
return tree;
}
if (tree->OperIs(GT_DIV))
{
i1 = INT32(i1) / INT32(i2);
}
else if (tree->OperIs(GT_MOD))
{
i1 = INT32(i1) % INT32(i2);
}
else if (tree->OperIs(GT_UDIV))
{
i1 = UINT32(i1) / UINT32(i2);
}
else
{
assert(tree->OperIs(GT_UMOD));
i1 = UINT32(i1) % UINT32(i2);
}
break;
default:
return tree;
}
// We get here after folding to a GT_CNS_INT type.
// change the node to the new type / value and make sure the node sizes are OK.
CNS_INT:
FOLD_COND:
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Also all conditional folding jumps here since the node hanging from
// GT_JTRUE has to be a GT_CNS_INT - value 0 or 1.
// Some operations are performed as 64 bit instead of 32 bit so the upper 32 bits
// need to be discarded. Since constant values are stored as ssize_t and the node
// has TYP_INT the result needs to be sign extended rather than zero extended.
tree->BashToConst(static_cast<int>(i1));
tree->AsIntCon()->gtFieldSeq = fieldSeq;
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to int constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant LONG binary operator.
case TYP_LONG:
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
// op1 is known to be a TYP_LONG, op2 is normally a TYP_LONG, unless we have a shift operator in which case
// it is a TYP_INT.
assert(op2->TypeIs(TYP_LONG, TYP_INT));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
// For the shift operators we can have a op2 that is a TYP_INT.
// Thus we cannot just use LngValue(), as it will assert on 32 bit if op2 is not GT_CNS_LNG.
lval2 = op2->AsIntConCommon()->IntegralValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (lval1 == lval2);
goto FOLD_COND;
case GT_NE:
i1 = (lval1 != lval2);
goto FOLD_COND;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) < UINT64(lval2));
}
else
{
i1 = (lval1 < lval2);
}
goto FOLD_COND;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) <= UINT64(lval2));
}
else
{
i1 = (lval1 <= lval2);
}
goto FOLD_COND;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) >= UINT64(lval2));
}
else
{
i1 = (lval1 >= lval2);
}
goto FOLD_COND;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) > UINT64(lval2));
}
else
{
i1 = (lval1 > lval2);
}
goto FOLD_COND;
case GT_ADD:
ltemp = lval1 + lval2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
#ifdef TARGET_64BIT
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
#endif
break;
case GT_SUB:
ltemp = lval1 - lval2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_MUL:
ltemp = lval1 * lval2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_OR:
lval1 |= lval2;
break;
case GT_XOR:
lval1 ^= lval2;
break;
case GT_AND:
lval1 &= lval2;
break;
case GT_LSH:
lval1 <<= (lval2 & 0x3f);
break;
case GT_RSH:
lval1 >>= (lval2 & 0x3f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
lval1 = UINT64(lval1) >> (lval2 & 0x3f);
break;
case GT_ROL:
lval1 = (lval1 << (lval2 & 0x3f)) | (UINT64(lval1) >> ((64 - lval2) & 0x3f));
break;
case GT_ROR:
lval1 = (lval1 << ((64 - lval2) & 0x3f)) | (UINT64(lval1) >> (lval2 & 0x3f));
break;
// Both DIV and IDIV on x86 raise an exception for min_int (and min_long) / -1. So we preserve
// that behavior here.
case GT_DIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 /= lval2;
break;
case GT_MOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 %= lval2;
break;
case GT_UDIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) / UINT64(lval2);
break;
case GT_UMOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) % UINT64(lval2);
break;
default:
return tree;
}
CNS_LONG:
#if !defined(TARGET_64BIT)
if (fieldSeq != FieldSeqStore::NotAField())
{
assert(!"Field sequences on CNS_LNG nodes!?");
return tree;
}
#endif // !defined(TARGET_64BIT)
JITDUMP("\nFolding long operator with constant nodes into a constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(lval1);
#ifdef TARGET_64BIT
tree->AsIntCon()->gtFieldSeq = fieldSeq;
#endif
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to long constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant FLOAT or DOUBLE binary operator
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->gtOverflowEx())
{
return tree;
}
assert(op1->OperIs(GT_CNS_DBL));
d1 = op1->AsDblCon()->gtDconVal;
assert(varTypeIsFloating(op2->TypeGet()));
assert(op2->OperIs(GT_CNS_DBL));
d2 = op2->AsDblCon()->gtDconVal;
// Special case - check if we have NaN operands.
// For comparisons if not an unordered operation always return 0.
// For unordered operations (i.e. the GTF_RELOP_NAN_UN flag is set)
// the result is always true - return 1.
if (_isnan(d1) || _isnan(d2))
{
JITDUMP("Double operator(s) is NaN\n");
if (tree->OperIsCompare())
{
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
// Unordered comparison with NaN always succeeds.
i1 = 1;
goto FOLD_COND;
}
else
{
// Normal comparison with NaN always fails.
i1 = 0;
goto FOLD_COND;
}
}
}
switch (tree->OperGet())
{
case GT_EQ:
i1 = (d1 == d2);
goto FOLD_COND;
case GT_NE:
i1 = (d1 != d2);
goto FOLD_COND;
case GT_LT:
i1 = (d1 < d2);
goto FOLD_COND;
case GT_LE:
i1 = (d1 <= d2);
goto FOLD_COND;
case GT_GE:
i1 = (d1 >= d2);
goto FOLD_COND;
case GT_GT:
i1 = (d1 > d2);
goto FOLD_COND;
// Floating point arithmetic should be done in declared
// precision while doing constant folding. For this reason though TYP_FLOAT
// constants are stored as double constants, while performing float arithmetic,
// double constants should be converted to float. Here is an example case
// where performing arithmetic in double precision would lead to incorrect
// results.
//
// Example:
// float a = float.MaxValue;
// float b = a*a; This will produce +inf in single precision and 1.1579207543382391e+077 in double
// precision.
// flaot c = b/b; This will produce NaN in single precision and 1 in double precision.
case GT_ADD:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 + f2);
}
else
{
d1 += d2;
}
break;
case GT_SUB:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 - f2);
}
else
{
d1 -= d2;
}
break;
case GT_MUL:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 * f2);
}
else
{
d1 *= d2;
}
break;
case GT_DIV:
// We do not fold division by zero, even for floating point.
// This is because the result will be platform-dependent for an expression like 0d / 0d.
if (d2 == 0)
{
return tree;
}
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 / f2);
}
else
{
d1 /= d2;
}
break;
default:
return tree;
}
CNS_DOUBLE:
JITDUMP("\nFolding fp operator with constant nodes into a fp constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_DBL] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(d1, tree->TypeGet());
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to fp constant:\n");
DISPTREE(tree);
goto DONE;
default:
// Not a foldable type.
return tree;
}
DONE:
// Make sure no side effect flags are set on this constant node.
tree->gtFlags &= ~GTF_ALL_EFFECT;
return tree;
INTEGRAL_OVF:
// This operation is going to cause an overflow exception. Morph into
// an overflow helper. Put a dummy constant value for code generation.
//
// We could remove all subsequent trees in the current basic block,
// unless this node is a child of GT_COLON
//
// NOTE: Since the folded value is not constant we should not change the
// "tree" node - otherwise we confuse the logic that checks if the folding
// was successful - instead use one of the operands, e.g. op1.
// Don't fold overflow operations if not global morph phase.
// The reason for this is that this optimization is replacing a gentree node
// with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
// involving overflow arithmetic. During assertion prop, it is possible
// that the 'arg' could be constant folded and the result could lead to an
// overflow. In such a case 'arg' will get replaced with GT_COMMA node
// but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
// update args table. For this reason this optimization is enabled only
// for global morphing phase.
//
// TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
if (!fgGlobalMorph)
{
assert(tree->gtOverflow());
return tree;
}
var_types type = genActualType(tree->TypeGet());
op1 = type == TYP_LONG ? gtNewLconNode(0) : gtNewIconNode(0);
if (vnStore != nullptr)
{
op1->gtVNPair.SetBoth(vnStore->VNZeroForType(type));
}
JITDUMP("\nFolding binary operator with constant nodes into a comma throw:\n");
DISPTREE(tree);
// We will change the cast to a GT_COMMA and attach the exception helper as AsOp()->gtOp1.
// The constant expression zero becomes op2.
assert(tree->gtOverflow());
assert(tree->OperIs(GT_ADD, GT_SUB, GT_CAST, GT_MUL));
assert(op1 != nullptr);
op2 = op1;
op1 = gtNewHelperCallNode(CORINFO_HELP_OVERFLOW, TYP_VOID, gtNewCallArgs(gtNewIconNode(compCurBB->bbTryIndex)));
// op1 is a call to the JIT helper that throws an Overflow exception.
// Attach the ExcSet for VNF_OverflowExc(Void) to this call.
if (vnStore != nullptr)
{
op1->gtVNPair = vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc,
vnStore->VNPForVoid())));
}
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), op1, op2);
return tree;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//------------------------------------------------------------------------
// gtNewTempAssign: Create an assignment of the given value to a temp.
//
// Arguments:
// tmp - local number for a compiler temp
// val - value to assign to the temp
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// Normally a new assignment node.
// However may return a nop node if val is simply a reference to the temp.
//
// Notes:
// Self-assignments may be represented via NOPs.
//
// May update the type of the temp, if it was previously unknown.
//
// May set compFloatingPointUsed.
GenTree* Compiler::gtNewTempAssign(
unsigned tmp, GenTree* val, Statement** pAfterStmt, const DebugInfo& di, BasicBlock* block)
{
// Self-assignment is a nop.
if (val->OperGet() == GT_LCL_VAR && val->AsLclVarCommon()->GetLclNum() == tmp)
{
return gtNewNothingNode();
}
LclVarDsc* varDsc = lvaGetDesc(tmp);
if (varDsc->TypeGet() == TYP_I_IMPL && val->TypeGet() == TYP_BYREF)
{
impBashVarAddrsToI(val);
}
var_types valTyp = val->TypeGet();
if (val->OperGet() == GT_LCL_VAR && lvaTable[val->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
valTyp = lvaGetRealType(val->AsLclVar()->GetLclNum());
val->gtType = valTyp;
}
var_types dstTyp = varDsc->TypeGet();
/* If the variable's lvType is not yet set then set it here */
if (dstTyp == TYP_UNDEF)
{
varDsc->lvType = dstTyp = genActualType(valTyp);
#if FEATURE_SIMD
if (varTypeIsSIMD(dstTyp))
{
varDsc->lvSIMDType = 1;
}
#endif
}
#ifdef DEBUG
// Make sure the actual types match.
if (genActualType(valTyp) != genActualType(dstTyp))
{
// Plus some other exceptions that are apparently legal:
// 1) TYP_REF or BYREF = TYP_I_IMPL
bool ok = false;
if (varTypeIsGC(dstTyp) && (valTyp == TYP_I_IMPL))
{
ok = true;
}
// 2) TYP_DOUBLE = TYP_FLOAT or TYP_FLOAT = TYP_DOUBLE
else if (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))
{
ok = true;
}
// 3) TYP_BYREF = TYP_REF when object stack allocation is enabled
else if (JitConfig.JitObjectStackAllocation() && (dstTyp == TYP_BYREF) && (valTyp == TYP_REF))
{
ok = true;
}
else if (!varTypeIsGC(dstTyp) && (genTypeSize(valTyp) == genTypeSize(dstTyp)))
{
// We can have assignments that require a change of register file, e.g. for arguments
// and call returns. Lowering and Codegen will handle these.
ok = true;
}
else if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_INT))
{
// It could come from `ASG(struct, 0)` that was propagated to `RETURN struct(0)`,
// and now it is merging to a struct again.
assert(tmp == genReturnLocal);
ok = true;
}
else if (varTypeIsSIMD(dstTyp) && (valTyp == TYP_STRUCT))
{
assert(val->IsCall());
ok = true;
}
if (!ok)
{
gtDispTree(val);
assert(!"Incompatible types for gtNewTempAssign");
}
}
#endif
// Added this noway_assert for runtime\issue 44895, to protect against silent bad codegen
//
if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_REF))
{
noway_assert(!"Incompatible types for gtNewTempAssign");
}
// Floating Point assignments can be created during inlining
// see "Zero init inlinee locals:" in fgInlinePrependStatements
// thus we may need to set compFloatingPointUsed to true here.
//
if (varTypeUsesFloatReg(dstTyp) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
/* Create the assignment node */
GenTree* asg;
GenTree* dest = gtNewLclvNode(tmp, dstTyp);
dest->gtFlags |= GTF_VAR_DEF;
// With first-class structs, we should be propagating the class handle on all non-primitive
// struct types. We don't have a convenient way to do that for all SIMD temps, since some
// internal trees use SIMD types that are not used by the input IL. In this case, we allow
// a null type handle and derive the necessary information about the type from its varType.
CORINFO_CLASS_HANDLE valStructHnd = gtGetStructHandleIfPresent(val);
if (varTypeIsStruct(varDsc) && (valStructHnd == NO_CLASS_HANDLE) && !varTypeIsSIMD(valTyp))
{
// There are 2 special cases:
// 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields,
// the field was transformed as IND opr GT_LCL_FLD;
// 2. we are propagation `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
// in these cases, we can use the type of the merge return for the assignment.
assert(val->gtEffectiveVal(true)->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT));
assert(tmp == genReturnLocal);
valStructHnd = lvaGetStruct(genReturnLocal);
assert(valStructHnd != NO_CLASS_HANDLE);
}
if ((valStructHnd != NO_CLASS_HANDLE) && val->IsConstInitVal())
{
asg = gtNewAssignNode(dest, val);
}
else if (varTypeIsStruct(varDsc) && ((valStructHnd != NO_CLASS_HANDLE) || varTypeIsSIMD(valTyp)))
{
// The struct value may be be a child of a GT_COMMA due to explicit null checks of indirs/fields.
GenTree* valx = val->gtEffectiveVal(/*commaOnly*/ true);
if (valStructHnd != NO_CLASS_HANDLE)
{
lvaSetStruct(tmp, valStructHnd, false);
}
else
{
assert(valx->gtOper != GT_OBJ);
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, di, block);
}
else
{
// We may have a scalar type variable assigned a struct value, e.g. a 'genReturnLocal'
// when the ABI calls for returning a struct as a primitive type.
// TODO-1stClassStructs: When we stop "lying" about the types for ABI purposes, the
// 'genReturnLocal' should be the original struct type.
assert(!varTypeIsStruct(valTyp) || ((valStructHnd != NO_CLASS_HANDLE) &&
(typGetObjLayout(valStructHnd)->GetSize() == genTypeSize(varDsc))));
asg = gtNewAssignNode(dest, val);
}
if (compRationalIRForm)
{
Rationalizer::RewriteAssignmentIntoStoreLcl(asg->AsOp());
}
return asg;
}
/*****************************************************************************
*
* Create a helper call to access a COM field (iff 'assg' is non-zero this is
* an assignment and 'assg' is the new value).
*/
GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg)
{
assert(pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_ADDR_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDR_HELPER);
/* If we can't access it directly, we need to call a helper function */
GenTreeCall::Use* args = nullptr;
var_types helperType = TYP_BYREF;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_SET)
{
assert(assg != nullptr);
// helper needs pointer to struct, not struct itself
if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(structType != nullptr);
assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true);
}
else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT)
{
assg = gtNewCastNode(TYP_DOUBLE, assg, false, TYP_DOUBLE);
}
else if (lclTyp == TYP_FLOAT && assg->TypeGet() == TYP_DOUBLE)
{
assg = gtNewCastNode(TYP_FLOAT, assg, false, TYP_FLOAT);
}
args = gtNewCallArgs(assg);
helperType = TYP_VOID;
}
else if (access & CORINFO_ACCESS_GET)
{
helperType = lclTyp;
// The calling convention for the helper does not take into
// account optimization of primitive structs.
if ((pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT) && !varTypeIsStruct(lclTyp))
{
helperType = TYP_STRUCT;
}
}
}
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT || pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(pFieldInfo->structType != nullptr);
args = gtPrependNewCallArg(gtNewIconEmbClsHndNode(pFieldInfo->structType), args);
}
GenTree* fieldHnd = impTokenToHandle(pResolvedToken);
if (fieldHnd == nullptr)
{ // compDonotInline()
return nullptr;
}
args = gtPrependNewCallArg(fieldHnd, args);
// If it's a static field, we shouldn't have an object node
// If it's an instance field, we have an object node
assert((pFieldInfo->fieldAccessor != CORINFO_FIELD_STATIC_ADDR_HELPER) ^ (objPtr == nullptr));
if (objPtr != nullptr)
{
args = gtPrependNewCallArg(objPtr, args);
}
GenTreeCall* call = gtNewHelperCallNode(pFieldInfo->helper, genActualType(helperType), args);
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(call))
{
call->InitializeStructReturnType(this, structType, call->GetUnmanagedCallConv());
}
#endif // FEATURE_MULTIREG_RET
GenTree* result = call;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_GET)
{
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT)
{
if (!varTypeIsStruct(lclTyp))
{
// get the result as primitive type
result = impGetStructAddr(result, structType, (unsigned)CHECK_SPILL_ALL, true);
result = gtNewOperNode(GT_IND, lclTyp, result);
}
}
else if (varTypeIsIntegral(lclTyp) && genTypeSize(lclTyp) < genTypeSize(TYP_INT))
{
// The helper does not extend the small return types.
result = gtNewCastNode(genActualType(lclTyp), result, false, lclTyp);
}
}
}
else
{
// OK, now do the indirection
if (access & CORINFO_ACCESS_GET)
{
if (varTypeIsStruct(lclTyp))
{
result = gtNewObjNode(structType, result);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
}
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF);
}
else if (access & CORINFO_ACCESS_SET)
{
if (varTypeIsStruct(lclTyp))
{
result = impAssignStructPtr(result, assg, structType, (unsigned)CHECK_SPILL_ALL);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
result = gtNewAssignNode(result, assg);
}
}
}
return result;
}
/*****************************************************************************
*
* Return true if the given node (excluding children trees) contains side effects.
* Note that it does not recurse, and children need to be handled separately.
* It may return false even if the node has GTF_SIDE_EFFECT (because of its children).
*
* Similar to OperMayThrow() (but handles GT_CALLs specially), but considers
* assignments too.
*/
bool Compiler::gtNodeHasSideEffects(GenTree* tree, GenTreeFlags flags)
{
if (flags & GTF_ASG)
{
// TODO-Bug: This only checks for GT_ASG/GT_STORE_DYN_BLK but according to OperRequiresAsgFlag
// there are many more opers that are considered to have an assignment side effect: atomic ops
// (GT_CMPXCHG & co.), GT_MEMORYBARRIER (not classified as an atomic op) and HW intrinsic
// memory stores. Atomic ops have special handling in gtExtractSideEffList but the others
// will simply be dropped is they are ever subject to an "extract side effects" operation.
// It is possible that the reason no bugs have yet been observed in this area is that the
// other nodes are likely to always be tree roots.
if (tree->OperIs(GT_ASG, GT_STORE_DYN_BLK))
{
return true;
}
}
// Are there only GTF_CALL side effects remaining? (and no other side effect kinds)
if (flags & GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
GenTreeCall* const call = tree->AsCall();
const bool ignoreExceptions = (flags & GTF_EXCEPT) == 0;
const bool ignoreCctors = (flags & GTF_IS_IN_CSE) != 0; // We can CSE helpers that run cctors.
if (!call->HasSideEffects(this, ignoreExceptions, ignoreCctors))
{
// If this call is otherwise side effect free, check its arguments.
for (GenTreeCall::Use& use : call->Args())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// I'm a little worried that args that assign to temps that are late args will look like
// side effects...but better to be conservative for now.
for (GenTreeCall::Use& use : call->LateArgs())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// Otherwise:
return false;
}
// Otherwise the GT_CALL is considered to have side-effects.
return true;
}
}
if (flags & GTF_EXCEPT)
{
if (tree->OperMayThrow(this))
{
return true;
}
}
// Expressions declared as CSE by (e.g.) hoisting code are considered to have relevant side
// effects (if we care about GTF_MAKE_CSE).
if ((flags & GTF_MAKE_CSE) && (tree->gtFlags & GTF_MAKE_CSE))
{
return true;
}
return false;
}
/*****************************************************************************
* Returns true if the expr tree has any side effects.
*/
bool Compiler::gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags /* = GTF_SIDE_EFFECT*/)
{
// These are the side effect flags that we care about for this tree
GenTreeFlags sideEffectFlags = tree->gtFlags & flags;
// Does this tree have any Side-effect flags set that we care about?
if (sideEffectFlags == 0)
{
// no it doesn't..
return false;
}
if (sideEffectFlags == GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
// Generally all trees that contain GT_CALL nodes are considered to have side-effects.
//
if (tree->AsCall()->gtCallType == CT_HELPER)
{
// If this node is a helper call we may not care about the side-effects.
// Note that gtNodeHasSideEffects checks the side effects of the helper itself
// as well as the side effects of its arguments.
return gtNodeHasSideEffects(tree, flags);
}
}
else if (tree->OperGet() == GT_INTRINSIC)
{
if (gtNodeHasSideEffects(tree, flags))
{
return true;
}
if (gtNodeHasSideEffects(tree->AsOp()->gtOp1, flags))
{
return true;
}
if ((tree->AsOp()->gtOp2 != nullptr) && gtNodeHasSideEffects(tree->AsOp()->gtOp2, flags))
{
return true;
}
return false;
}
}
return true;
}
GenTree* Compiler::gtBuildCommaList(GenTree* list, GenTree* expr)
{
// 'list' starts off as null,
// and when it is null we haven't started the list yet.
//
if (list != nullptr)
{
// Create a GT_COMMA that appends 'expr' in front of the remaining set of expressions in (*list)
GenTree* result = gtNewOperNode(GT_COMMA, TYP_VOID, expr, list);
// Set the flags in the comma node
result->gtFlags |= (list->gtFlags & GTF_ALL_EFFECT);
result->gtFlags |= (expr->gtFlags & GTF_ALL_EFFECT);
DBEXEC(fgGlobalMorph, result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
// 'list' and 'expr' should have valuenumbers defined for both or for neither one (unless we are remorphing,
// in which case a prior transform involving either node may have discarded or otherwise invalidated the value
// numbers).
assert((list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined()) || !fgGlobalMorph);
// Set the ValueNumber 'gtVNPair' for the new GT_COMMA node
//
if (list->gtVNPair.BothDefined() && expr->gtVNPair.BothDefined())
{
// The result of a GT_COMMA node is op2, the normal value number is op2vnp
// But we also need to include the union of side effects from op1 and op2.
// we compute this value into exceptions_vnp.
ValueNumPair op1vnp;
ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
ValueNumPair op2vnp;
ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(expr->gtVNPair, &op1vnp, &op1Xvnp);
vnStore->VNPUnpackExc(list->gtVNPair, &op2vnp, &op2Xvnp);
ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet();
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
result->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
}
return result;
}
else
{
// The 'expr' will start the list of expressions
return expr;
}
}
//------------------------------------------------------------------------
// gtExtractSideEffList: Extracts side effects from the given expression.
//
// Arguments:
// expr - the expression tree to extract side effects from
// pList - pointer to a (possibly null) GT_COMMA list that
// will contain the extracted side effects
// flags - side effect flags to be considered
// ignoreRoot - ignore side effects on the expression root node
//
// Notes:
// Side effects are prepended to the GT_COMMA list such that op1 of
// each comma node holds the side effect tree and op2 points to the
// next comma node. The original side effect execution order is preserved.
//
void Compiler::gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags flags /* = GTF_SIDE_EFFECT*/,
bool ignoreRoot /* = false */)
{
class SideEffectExtractor final : public GenTreeVisitor<SideEffectExtractor>
{
public:
const GenTreeFlags m_flags;
ArrayStack<GenTree*> m_sideEffects;
enum
{
DoPreOrder = true,
UseExecutionOrder = true
};
SideEffectExtractor(Compiler* compiler, GenTreeFlags flags)
: GenTreeVisitor(compiler), m_flags(flags), m_sideEffects(compiler->getAllocator(CMK_SideEffects))
{
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
GenTree* node = *use;
bool treeHasSideEffects = m_compiler->gtTreeHasSideEffects(node, m_flags);
if (treeHasSideEffects)
{
if (m_compiler->gtNodeHasSideEffects(node, m_flags))
{
PushSideEffects(node);
if (node->OperIsBlk() && !node->OperIsStoreBlk())
{
JITDUMP("Replace an unused OBJ/BLK node [%06d] with a NULLCHECK\n", dspTreeID(node));
m_compiler->gtChangeOperToNullCheck(node, m_compiler->compCurBB);
}
return Compiler::WALK_SKIP_SUBTREES;
}
// TODO-Cleanup: These have GTF_ASG set but for some reason gtNodeHasSideEffects ignores
// them. See the related gtNodeHasSideEffects comment as well.
// Also, these nodes must always be preserved, no matter what side effect flags are passed
// in. But then it should never be the case that gtExtractSideEffList gets called without
// specifying GTF_ASG so there doesn't seem to be any reason to be inconsistent with
// gtNodeHasSideEffects and make this check unconditionally.
if (node->OperIsAtomicOp())
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
if ((m_flags & GTF_EXCEPT) != 0)
{
// Special case - GT_ADDR of GT_IND nodes of TYP_STRUCT have to be kept together.
if (node->OperIs(GT_ADDR) && node->gtGetOp1()->OperIsIndir() &&
(node->gtGetOp1()->TypeGet() == TYP_STRUCT))
{
JITDUMP("Keep the GT_ADDR and GT_IND together:\n");
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
}
// Generally all GT_CALL nodes are considered to have side-effects.
// So if we get here it must be a helper call that we decided it does
// not have side effects that we needed to keep.
assert(!node->OperIs(GT_CALL) || (node->AsCall()->gtCallType == CT_HELPER));
}
if ((m_flags & GTF_IS_IN_CSE) != 0)
{
// If we're doing CSE then we also need to unmark CSE nodes. This will fail for CSE defs,
// those need to be extracted as if they're side effects.
if (!UnmarkCSE(node))
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
// The existence of CSE defs and uses is not propagated up the tree like side
// effects are. We need to continue visiting the tree as if it has side effects.
treeHasSideEffects = true;
}
return treeHasSideEffects ? Compiler::WALK_CONTINUE : Compiler::WALK_SKIP_SUBTREES;
}
private:
bool UnmarkCSE(GenTree* node)
{
assert(m_compiler->optValnumCSE_phase);
if (m_compiler->optUnmarkCSE(node))
{
// The call to optUnmarkCSE(node) should have cleared any CSE info.
assert(!IS_CSE_INDEX(node->gtCSEnum));
return true;
}
else
{
assert(IS_CSE_DEF(node->gtCSEnum));
#ifdef DEBUG
if (m_compiler->verbose)
{
printf("Preserving the CSE def #%02d at ", GET_CSE_INDEX(node->gtCSEnum));
m_compiler->printTreeID(node);
}
#endif
return false;
}
}
void PushSideEffects(GenTree* node)
{
// The extracted side effect will no longer be an argument, so unmark it.
// This is safe to do because the side effects will be visited in pre-order,
// aborting as soon as any tree is extracted. Thus if an argument for a call
// is being extracted, it is guaranteed that the call itself will not be.
node->gtFlags &= ~GTF_LATE_ARG;
m_sideEffects.Push(node);
}
};
SideEffectExtractor extractor(this, flags);
if (ignoreRoot)
{
for (GenTree* op : expr->Operands())
{
extractor.WalkTree(&op, nullptr);
}
}
else
{
extractor.WalkTree(&expr, nullptr);
}
GenTree* list = *pList;
// The extractor returns side effects in execution order but gtBuildCommaList prepends
// to the comma-based side effect list so we have to build the list in reverse order.
// This is also why the list cannot be built while traversing the tree.
// The number of side effects is usually small (<= 4), less than the ArrayStack's
// built-in size, so memory allocation is avoided.
while (!extractor.m_sideEffects.Empty())
{
list = gtBuildCommaList(list, extractor.m_sideEffects.Pop());
}
*pList = list;
}
/*****************************************************************************
*
* For debugging only - displays a tree node list and makes sure all the
* links are correctly set.
*/
#ifdef DEBUG
void dispNodeList(GenTree* list, bool verbose)
{
GenTree* last = nullptr;
GenTree* next;
if (!list)
{
return;
}
for (;;)
{
next = list->gtNext;
if (verbose)
{
printf("%08X -> %08X -> %08X\n", last, list, next);
}
assert(!last || last->gtNext == list);
assert(next == nullptr || next->gtPrev == list);
if (!next)
{
break;
}
last = list;
list = next;
}
printf(""); // null string means flush
}
#endif
/*****************************************************************************
* Callback to mark the nodes of a qmark-colon subtree that are conditionally
* executed.
*/
/* static */
Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTree** pTree, fgWalkData* data)
{
assert(data->pCallbackData == nullptr);
(*pTree)->gtFlags |= GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
* Callback to clear the conditionally executed flags of nodes that no longer
will be conditionally executed. Note that when we find another colon we must
stop, as the nodes below this one WILL be conditionally executed. This callback
is called when folding a qmark condition (ie the condition is constant).
*/
/* static */
Compiler::fgWalkResult Compiler::gtClearColonCond(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
assert(data->pCallbackData == nullptr);
if (tree->OperGet() == GT_COLON)
{
// Nodes below this will be conditionally executed.
return WALK_SKIP_SUBTREES;
}
tree->gtFlags &= ~GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Callback used by the tree walker to implement fgFindLink()
*/
static Compiler::fgWalkResult gtFindLinkCB(GenTree** pTree, Compiler::fgWalkData* cbData)
{
Compiler::FindLinkData* data = (Compiler::FindLinkData*)cbData->pCallbackData;
if (*pTree == data->nodeToFind)
{
data->result = pTree;
data->parent = cbData->parent;
return Compiler::WALK_ABORT;
}
return Compiler::WALK_CONTINUE;
}
Compiler::FindLinkData Compiler::gtFindLink(Statement* stmt, GenTree* node)
{
FindLinkData data = {node, nullptr, nullptr};
fgWalkResult result = fgWalkTreePre(stmt->GetRootNodePointer(), gtFindLinkCB, &data);
if (result == WALK_ABORT)
{
assert(data.nodeToFind == *data.result);
return data;
}
else
{
return {node, nullptr, nullptr};
}
}
/*****************************************************************************
*
* Callback that checks if a tree node has oper type GT_CATCH_ARG
*/
static Compiler::fgWalkResult gtFindCatchArg(GenTree** pTree, Compiler::fgWalkData* /* data */)
{
return ((*pTree)->OperGet() == GT_CATCH_ARG) ? Compiler::WALK_ABORT : Compiler::WALK_CONTINUE;
}
/*****************************************************************************/
bool Compiler::gtHasCatchArg(GenTree* tree)
{
if (((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0) && (fgWalkTreePre(&tree, gtFindCatchArg) == WALK_ABORT))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// gtHasCallOnStack:
//
// Arguments:
// parentStack: a context (stack of parent nodes)
//
// Return Value:
// returns true if any of the parent nodes are a GT_CALL
//
// Assumptions:
// We have a stack of parent nodes. This generally requires that
// we are performing a recursive tree walk using struct fgWalkData
//
//------------------------------------------------------------------------
/* static */ bool Compiler::gtHasCallOnStack(GenTreeStack* parentStack)
{
for (int i = 0; i < parentStack->Height(); i++)
{
GenTree* node = parentStack->Top(i);
if (node->OperGet() == GT_CALL)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------
// gtGetTypeProducerKind: determine if a tree produces a runtime type, and
// if so, how.
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// TypeProducerKind for the tree.
//
// Notes:
// Checks to see if this tree returns a RuntimeType value, and if so,
// how that value is determined.
//
// Currently handles these cases
// 1) The result of Object::GetType
// 2) The result of typeof(...)
// 3) A null reference
// 4) Tree is otherwise known to have type RuntimeType
//
// The null reference case is surprisingly common because operator
// overloading turns the otherwise innocuous
//
// Type t = ....;
// if (t == null)
//
// into a method call.
Compiler::TypeProducerKind Compiler::gtGetTypeProducerKind(GenTree* tree)
{
if (tree->gtOper == GT_CALL)
{
if (tree->AsCall()->gtCallType == CT_HELPER)
{
if (gtIsTypeHandleToRuntimeTypeHelper(tree->AsCall()))
{
return TPK_Handle;
}
}
else if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
if (lookupNamedIntrinsic(tree->AsCall()->gtCallMethHnd) == NI_System_Object_GetType)
{
return TPK_GetType;
}
}
}
else if ((tree->gtOper == GT_INTRINSIC) && (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType))
{
return TPK_GetType;
}
else if ((tree->gtOper == GT_CNS_INT) && (tree->AsIntCon()->gtIconVal == 0))
{
return TPK_Null;
}
else
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull);
if (clsHnd != NO_CLASS_HANDLE && clsHnd == info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE))
{
return TPK_Other;
}
}
return TPK_Unknown;
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHelperCall -- see if tree is constructing
// a RuntimeType from a handle
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call)
{
return call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) ||
call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL);
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHandleHelperCall -- see if tree is constructing
// a RuntimeTypeHandle from a handle
//
// Arguments:
// tree - tree to examine
// pHelper - optional pointer to a variable that receives the type of the helper
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper)
{
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
}
else if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL;
}
if (pHelper != nullptr)
{
*pHelper = helper;
}
return helper != CORINFO_HELP_UNDEF;
}
bool Compiler::gtIsActiveCSE_Candidate(GenTree* tree)
{
return (optValnumCSE_phase && IS_CSE_INDEX(tree->gtCSEnum));
}
/*****************************************************************************/
struct ComplexityStruct
{
unsigned m_numNodes;
unsigned m_nodeLimit;
ComplexityStruct(unsigned nodeLimit) : m_numNodes(0), m_nodeLimit(nodeLimit)
{
}
};
static Compiler::fgWalkResult ComplexityExceedsWalker(GenTree** pTree, Compiler::fgWalkData* data)
{
ComplexityStruct* pComplexity = (ComplexityStruct*)data->pCallbackData;
if (++pComplexity->m_numNodes > pComplexity->m_nodeLimit)
{
return Compiler::WALK_ABORT;
}
else
{
return Compiler::WALK_CONTINUE;
}
}
bool Compiler::gtComplexityExceeds(GenTree** tree, unsigned limit)
{
ComplexityStruct complexity(limit);
if (fgWalkTreePre(tree, &ComplexityExceedsWalker, &complexity) == WALK_ABORT)
{
return true;
}
else
{
return false;
}
}
bool GenTree::IsPhiNode()
{
return (OperGet() == GT_PHI_ARG) || (OperGet() == GT_PHI) || IsPhiDefn();
}
bool GenTree::IsPhiDefn()
{
bool res = ((OperGet() == GT_ASG) && (AsOp()->gtOp2 != nullptr) && (AsOp()->gtOp2->OperGet() == GT_PHI)) ||
((OperGet() == GT_STORE_LCL_VAR) && (AsOp()->gtOp1 != nullptr) && (AsOp()->gtOp1->OperGet() == GT_PHI));
assert(!res || OperGet() == GT_STORE_LCL_VAR || AsOp()->gtOp1->OperGet() == GT_LCL_VAR);
return res;
}
// IsPartialLclFld: Check for a GT_LCL_FLD whose type is a different size than the lclVar.
//
// Arguments:
// comp - the Compiler object.
//
// Return Value:
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR
bool GenTree::IsPartialLclFld(Compiler* comp)
{
return ((gtOper == GT_LCL_FLD) &&
(comp->lvaTable[this->AsLclVarCommon()->GetLclNum()].lvExactSize != genTypeSize(gtType)));
}
bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
GenTreeBlk* blkNode = nullptr;
if (OperIs(GT_ASG))
{
if (AsOp()->gtOp1->IsLocal())
{
GenTreeLclVarCommon* lclVarTree = AsOp()->gtOp1->AsLclVarCommon();
*pLclVarTree = lclVarTree;
if (pIsEntire != nullptr)
{
if (lclVarTree->IsPartialLclFld(comp))
{
*pIsEntire = false;
}
else
{
*pIsEntire = true;
}
}
return true;
}
else if (AsOp()->gtOp1->OperGet() == GT_IND)
{
GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1;
return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire);
}
else if (AsOp()->gtOp1->OperIsBlk())
{
blkNode = AsOp()->gtOp1->AsBlk();
}
}
else if (OperIsBlk())
{
blkNode = this->AsBlk();
}
if (blkNode != nullptr)
{
GenTree* destAddr = blkNode->Addr();
unsigned width = blkNode->Size();
// Do we care about whether this assigns the entire variable?
if (pIsEntire != nullptr && blkNode->OperIs(GT_STORE_DYN_BLK))
{
GenTree* blockWidth = blkNode->AsStoreDynBlk()->gtDynamicSize;
if (blockWidth->IsCnsIntOrI())
{
assert(blockWidth->AsIntConCommon()->FitsInI32());
width = static_cast<unsigned>(blockWidth->AsIntConCommon()->IconValue());
if (width == 0)
{
return false;
}
}
}
return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
// Otherwise...
return false;
}
// Returns true if this GenTree defines a result which is based on the address of a local.
bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
if (OperGet() == GT_ADDR || OperGet() == GT_LCL_VAR_ADDR)
{
GenTree* addrArg = this;
if (OperGet() == GT_ADDR)
{
addrArg = AsOp()->gtOp1;
}
if (addrArg->IsLocal() || addrArg->OperIsLocalAddr())
{
GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon();
*pLclVarTree = addrArgLcl;
if (pIsEntire != nullptr)
{
unsigned lclOffset = addrArgLcl->GetLclOffs();
if (lclOffset != 0)
{
// We aren't updating the bytes at [0..lclOffset-1] so *pIsEntire should be set to false
*pIsEntire = false;
}
else
{
unsigned lclNum = addrArgLcl->GetLclNum();
unsigned varWidth = comp->lvaLclExactSize(lclNum);
if (comp->lvaTable[lclNum].lvNormalizeOnStore())
{
// It's normalize on store, so use the full storage width -- writing to low bytes won't
// necessarily yield a normalized value.
varWidth = genTypeStSz(var_types(comp->lvaTable[lclNum].lvType)) * sizeof(int);
}
*pIsEntire = (varWidth == width);
}
}
return true;
}
else if (addrArg->OperGet() == GT_IND)
{
// A GT_ADDR of a GT_IND can both be optimized away, recurse using the child of the GT_IND
return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp2->DefinesLocalAddr(comp, AsOp()->gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp1->DefinesLocalAddr(comp, AsOp()->gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
}
// Post rationalization we could have GT_IND(GT_LEA(..)) trees.
else if (OperGet() == GT_LEA)
{
// This method gets invoked during liveness computation and therefore it is critical
// that we don't miss 'use' of any local. The below logic is making the assumption
// that in case of LEA(base, index, offset) - only base can be a GT_LCL_VAR_ADDR
// and index is not.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
GenTree* index = AsOp()->gtOp2;
if (index != nullptr)
{
assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire));
}
#endif // DEBUG
// base
GenTree* base = AsOp()->gtOp1;
if (base != nullptr)
{
// Lea could have an Indir as its base.
if (base->OperGet() == GT_IND)
{
base = base->AsOp()->gtOp1->gtEffectiveVal(/*commas only*/ true);
}
return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsLocalExpr: Determine if this is a LclVarCommon node and return some
// additional info about it in the two out parameters.
//
// Arguments:
// comp - The Compiler instance
// pLclVarTree - An "out" argument that returns the local tree as a
// LclVarCommon, if it is indeed local.
// pFldSeq - An "out" argument that returns the value numbering field
// sequence for the node, if any.
//
// Return Value:
// Returns true, and sets the out arguments accordingly, if this is
// a LclVarCommon node.
bool GenTree::IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq)
{
if (IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = AsLclVarCommon();
if (OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
// If this tree evaluates some sum of a local address and some constants,
// return the node for the local being addressed
GenTreeLclVarCommon* GenTree::IsLocalAddrExpr()
{
if (OperGet() == GT_ADDR)
{
return AsOp()->gtOp1->IsLocal() ? AsOp()->gtOp1->AsLclVarCommon() : nullptr;
}
else if (OperIsLocalAddr())
{
return this->AsLclVarCommon();
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp2->IsLocalAddrExpr();
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp1->IsLocalAddrExpr();
}
}
// Otherwise...
return nullptr;
}
//------------------------------------------------------------------------
// IsLocalAddrExpr: finds if "this" is an address of a local var/fld.
//
// Arguments:
// comp - a compiler instance;
// pLclVarTree - [out] sets to the node indicating the local variable if found;
// pFldSeq - [out] sets to the field sequence representing the field, else null;
// pOffset - [out](optional) sets to the sum offset of the lcl/fld if found,
// note it does not include pLclVarTree->GetLclOffs().
//
// Returns:
// Returns true if "this" represents the address of a local, or a field of a local.
//
// Notes:
// It is mostly used for optimizations but assertion propagation depends on it for correctness.
// So if this function does not recognize a def of a LCL_VAR we can have an incorrect optimization.
//
bool GenTree::IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset /* = nullptr */)
{
if (OperGet() == GT_ADDR)
{
assert(!comp->compRationalIRForm);
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = addrArg->AsLclVarCommon();
if (addrArg->OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(addrArg->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
else if (OperIsLocalAddr())
{
*pLclVarTree = this->AsLclVarCommon();
if (this->OperGet() == GT_LCL_FLD_ADDR)
{
*pFldSeq = comp->GetFieldSeqStore()->Append(this->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp1->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp2->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp2->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp1->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsImplicitByrefParameterValue: determine if this tree is the entire
// value of a local implicit byref parameter
//
// Arguments:
// compiler -- compiler instance
//
// Return Value:
// GenTreeLclVar node for the local, or nullptr.
//
GenTreeLclVar* GenTree::IsImplicitByrefParameterValue(Compiler* compiler)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
GenTreeLclVar* lcl = nullptr;
if (OperIs(GT_LCL_VAR))
{
lcl = AsLclVar();
}
else if (OperIs(GT_OBJ))
{
GenTree* addr = AsIndir()->Addr();
if (addr->OperIs(GT_LCL_VAR))
{
lcl = addr->AsLclVar();
}
else if (addr->OperIs(GT_ADDR))
{
GenTree* base = addr->AsOp()->gtOp1;
if (base->OperIs(GT_LCL_VAR))
{
lcl = base->AsLclVar();
}
}
}
if ((lcl != nullptr) && compiler->lvaIsImplicitByRefLocal(lcl->GetLclNum()))
{
return lcl;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return nullptr;
}
//------------------------------------------------------------------------
// IsLclVarUpdateTree: Determine whether this is an assignment tree of the
// form Vn = Vn 'oper' 'otherTree' where Vn is a lclVar
//
// Arguments:
// pOtherTree - An "out" argument in which 'otherTree' will be returned.
// pOper - An "out" argument in which 'oper' will be returned.
//
// Return Value:
// If the tree is of the above form, the lclNum of the variable being
// updated is returned, and 'pOtherTree' and 'pOper' are set.
// Otherwise, returns BAD_VAR_NUM.
//
// Notes:
// 'otherTree' can have any shape.
// We avoid worrying about whether the op is commutative by only considering the
// first operand of the rhs. It is expected that most trees of this form will
// already have the lclVar on the lhs.
// TODO-CQ: Evaluate whether there are missed opportunities due to this, or
// whether gtSetEvalOrder will already have put the lclVar on the lhs in
// the cases of interest.
unsigned GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps* pOper)
{
unsigned lclNum = BAD_VAR_NUM;
if (OperIs(GT_ASG))
{
GenTree* lhs = AsOp()->gtOp1;
GenTree* rhs = AsOp()->gtOp2;
if ((lhs->OperGet() == GT_LCL_VAR) && rhs->OperIsBinary())
{
unsigned lhsLclNum = lhs->AsLclVarCommon()->GetLclNum();
GenTree* rhsOp1 = rhs->AsOp()->gtOp1;
GenTree* rhsOp2 = rhs->AsOp()->gtOp2;
// Some operators, such as HWINTRINSIC, are currently declared as binary but
// may not have two operands. We must check that both operands actually exist.
if ((rhsOp1 != nullptr) && (rhsOp2 != nullptr) && (rhsOp1->OperGet() == GT_LCL_VAR) &&
(rhsOp1->AsLclVarCommon()->GetLclNum() == lhsLclNum))
{
lclNum = lhsLclNum;
*pOtherTree = rhsOp2;
*pOper = rhs->OperGet();
}
}
}
return lclNum;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// canBeContained: check whether this tree node may be a subcomponent of its parent for purposes
// of code generation.
//
// Return Value:
// True if it is possible to contain this node and false otherwise.
//
bool GenTree::canBeContained() const
{
assert(OperIsLIR());
if (IsMultiRegLclVar())
{
return false;
}
if (gtHasReg(nullptr))
{
return false;
}
// It is not possible for nodes that do not produce values or that are not containable values to be contained.
if (!IsValue() || ((DebugOperKind() & DBK_NOCONTAIN) != 0) || (OperIsHWIntrinsic() && !isContainableHWIntrinsic()))
{
return false;
}
return true;
}
#endif // DEBUG
//------------------------------------------------------------------------
// isContained: check whether this tree node is a subcomponent of its parent for codegen purposes
//
// Return Value:
// Returns true if there is no code generated explicitly for this node.
// Essentially, it will be rolled into the code generation for the parent.
//
// Assumptions:
// This method relies upon the value of the GTF_CONTAINED flag.
// Therefore this method is only valid after Lowering.
// Also note that register allocation or other subsequent phases may cause
// nodes to become contained (or not) and therefore this property may change.
//
bool GenTree::isContained() const
{
assert(OperIsLIR());
const bool isMarkedContained = ((gtFlags & GTF_CONTAINED) != 0);
#ifdef DEBUG
if (!canBeContained())
{
assert(!isMarkedContained);
}
// these actually produce a register (the flags reg, we just don't model it)
// and are a separate instruction from the branch that consumes the result.
// They can only produce a result if the child is a SIMD equality comparison.
else if (OperIsCompare())
{
assert(isMarkedContained == false);
}
// if it's contained it can't be unused.
if (isMarkedContained)
{
assert(!IsUnusedValue());
}
#endif // DEBUG
return isMarkedContained;
}
// return true if node is contained and an indir
bool GenTree::isContainedIndir() const
{
return OperIsIndir() && isContained();
}
bool GenTree::isIndirAddrMode()
{
return OperIsIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
}
bool GenTree::isIndir() const
{
return OperGet() == GT_IND || OperGet() == GT_STOREIND;
}
bool GenTreeIndir::HasBase()
{
return Base() != nullptr;
}
bool GenTreeIndir::HasIndex()
{
return Index() != nullptr;
}
GenTree* GenTreeIndir::Base()
{
GenTree* addr = Addr();
if (isIndirAddrMode())
{
GenTree* result = addr->AsAddrMode()->Base();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return addr; // TODO: why do we return 'addr' here, but we return 'nullptr' in the equivalent Index() case?
}
}
GenTree* GenTreeIndir::Index()
{
if (isIndirAddrMode())
{
GenTree* result = Addr()->AsAddrMode()->Index();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return nullptr;
}
}
unsigned GenTreeIndir::Scale()
{
if (HasIndex())
{
return Addr()->AsAddrMode()->gtScale;
}
else
{
return 1;
}
}
ssize_t GenTreeIndir::Offset()
{
if (isIndirAddrMode())
{
return Addr()->AsAddrMode()->Offset();
}
else if (Addr()->gtOper == GT_CLS_VAR_ADDR)
{
return static_cast<ssize_t>(reinterpret_cast<intptr_t>(Addr()->AsClsVar()->gtClsVarHnd));
}
else if (Addr()->IsCnsIntOrI() && Addr()->isContained())
{
return Addr()->AsIntConCommon()->IconValue();
}
else
{
return 0;
}
}
//------------------------------------------------------------------------
// GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM?
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if this immediate value requires us to record a relocation for it; false otherwise.
bool GenTreeIntConCommon::ImmedValNeedsReloc(Compiler* comp)
{
return comp->opts.compReloc && (gtOper == GT_CNS_INT) && IsIconHandle();
}
//------------------------------------------------------------------------
// ImmedValCanBeFolded: can this immediate value be folded for op?
//
// Arguments:
// comp - Compiler instance
// op - Tree operator
//
// Return Value:
// True if this immediate value can be folded for op; false otherwise.
bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
{
// In general, immediate values that need relocations can't be folded.
// There are cases where we do want to allow folding of handle comparisons
// (e.g., typeof(T) == typeof(int)).
return !ImmedValNeedsReloc(comp) || (op == GT_EQ) || (op == GT_NE);
}
#ifdef TARGET_AMD64
// Returns true if this absolute address fits within the base of an addr mode.
// On Amd64 this effectively means, whether an absolute indirect address can
// be encoded as 32-bit offset relative to IP or zero.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
// During Jitting, we are allowed to generate non-relocatable code.
// On Amd64 we can encode an absolute indirect addr as an offset relative to zero or RIP.
// An absolute indir addr that can fit within 32-bits can ben encoded as an offset relative
// to zero. All other absolute indir addr could be attempted to be encoded as RIP relative
// based on reloc hint provided by VM. RIP relative encoding is preferred over relative
// to zero, because the former is one byte smaller than the latter. For this reason
// we check for reloc hint first and then whether addr fits in 32-bits next.
//
// VM starts off with an initial state to allow both data and code address to be encoded as
// pc-relative offsets. Hence JIT will attempt to encode all absolute addresses as pc-relative
// offsets. It is possible while jitting a method, an address could not be encoded as a
// pc-relative offset. In that case VM will note the overflow and will trigger re-jitting
// of the method with reloc hints turned off for all future methods. Second time around
// jitting will succeed since JIT will not attempt to encode data addresses as pc-relative
// offsets. Note that JIT will always attempt to relocate code addresses (.e.g call addr).
// After an overflow, VM will assume any relocation recorded is for a code address and will
// emit jump thunk if it cannot be encoded as pc-relative offset.
return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsInI32();
}
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
return IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue());
}
}
#elif defined(TARGET_X86)
// Returns true if this absolute address fits within the base of an addr mode.
// On x86 all addresses are 4-bytes and can be directly encoded in an addr mode.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
return IsCnsIntOrI();
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
// If generating relocatable code, icons should be reported for recording relocatons.
return comp->opts.compReloc && IsIconHandle();
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// IsFieldAddr: Is "this" a static or class field address?
//
// Recognizes the following patterns:
// this: ADD(baseAddr, CONST [FldSeq])
// this: ADD(CONST [FldSeq], baseAddr)
// this: CONST [FldSeq]
// this: Zero [FldSeq]
//
// Arguments:
// comp - the Compiler object
// pBaseAddr - [out] parameter for "the base address"
// pFldSeq - [out] parameter for the field sequence
//
// Return Value:
// If "this" matches patterns denoted above, and the FldSeq found is "full",
// i. e. starts with a class field or a static field, and includes all the
// struct fields that this tree represents the address of, this method will
// return "true" and set either "pBaseAddr" to some value, which must be used
// by the caller as the key into the "first field map" to obtain the actual
// value for the field. For instance fields, "base address" will be the object
// reference, for statics - the address to which the field offset with the
// field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField".
//
bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq)
{
assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF));
*pBaseAddr = nullptr;
*pFldSeq = FieldSeqStore::NotAField();
GenTree* baseAddr = nullptr;
FieldSeqNode* fldSeq = FieldSeqStore::NotAField();
if (OperIs(GT_ADD))
{
// If one operand has a field sequence, the other operand must not have one
// as the order of fields in that case would not be well-defined.
if (AsOp()->gtOp1->IsCnsIntOrI() && AsOp()->gtOp1->IsIconHandle())
{
assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle());
fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle());
fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp1;
}
if (baseAddr != nullptr)
{
assert(!baseAddr->TypeIs(TYP_REF) || !comp->GetZeroOffsetFieldMap()->Lookup(baseAddr));
}
}
else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL))
{
assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr));
fldSeq = AsIntCon()->gtFieldSeq;
baseAddr = nullptr;
}
else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq))
{
baseAddr = this;
}
else
{
return false;
}
assert(fldSeq != nullptr);
if ((fldSeq == FieldSeqStore::NotAField()) || fldSeq->IsPseudoField())
{
return false;
}
// The above screens out obviously invalid cases, but we have more checks to perform. The
// sequence returned from this method *must* start with either a class (NOT struct) field
// or a static field. To avoid the expense of calling "getFieldClass" here, we will instead
// rely on the invariant that TYP_REF base addresses can never appear for struct fields - we
// will effectively treat such cases ("possible" in unsafe code) as undefined behavior.
if (comp->eeIsFieldStatic(fldSeq->GetFieldHandle()))
{
// TODO-VNTypes: this code is out of sync w.r.t. boxed statics that are numbered with
// VNF_PtrToStatic and treated as "simple" while here we treat them as "complex".
// TODO-VNTypes: we will always return the "baseAddr" here for now, but strictly speaking,
// we only need to do that if we have a shared field, to encode the logical "instantiation"
// argument. In all other cases, this serves no purpose and just leads to redundant maps.
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
if (baseAddr->TypeIs(TYP_REF))
{
assert(!comp->eeIsValueClass(comp->info.compCompHnd->getFieldClass(fldSeq->GetFieldHandle())));
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
// This case is reached, for example, if we have a chain of struct fields that are based on
// some pointer. We do not model such cases because we do not model maps for ByrefExposed
// memory, as it does not have the non-aliasing property of GcHeap and reference types.
return false;
}
bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd)
{
if (fieldNodeType != TYP_REF)
{
return false;
}
noway_assert(fldHnd != nullptr);
CorInfoType cit = info.compCompHnd->getFieldType(fldHnd);
var_types fieldTyp = JITtype2varType(cit);
return fieldTyp != TYP_REF;
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// gtGetSIMDZero: Get a zero value of the appropriate SIMD type.
//
// Arguments:
// var_types - The simdType
// simdBaseJitType - The SIMD base JIT type we need
// simdHandle - The handle for the SIMD type
//
// Return Value:
// A node generating the appropriate Zero, if we are able to discern it,
// otherwise null (note that this shouldn't happen, but callers should
// be tolerant of this case).
GenTree* Compiler::gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle)
{
bool found = false;
bool isHWSIMD = true;
noway_assert(m_simdHandleCache != nullptr);
// First, determine whether this is Vector<T>.
if (simdType == getSIMDVectorType())
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
found = (simdHandle == m_simdHandleCache->SIMDFloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
found = (simdHandle == m_simdHandleCache->SIMDDoubleHandle);
break;
case CORINFO_TYPE_INT:
found = (simdHandle == m_simdHandleCache->SIMDIntHandle);
break;
case CORINFO_TYPE_USHORT:
found = (simdHandle == m_simdHandleCache->SIMDUShortHandle);
break;
case CORINFO_TYPE_UBYTE:
found = (simdHandle == m_simdHandleCache->SIMDUByteHandle);
break;
case CORINFO_TYPE_SHORT:
found = (simdHandle == m_simdHandleCache->SIMDShortHandle);
break;
case CORINFO_TYPE_BYTE:
found = (simdHandle == m_simdHandleCache->SIMDByteHandle);
break;
case CORINFO_TYPE_LONG:
found = (simdHandle == m_simdHandleCache->SIMDLongHandle);
break;
case CORINFO_TYPE_UINT:
found = (simdHandle == m_simdHandleCache->SIMDUIntHandle);
break;
case CORINFO_TYPE_ULONG:
found = (simdHandle == m_simdHandleCache->SIMDULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
found = (simdHandle == m_simdHandleCache->SIMDNIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
found = (simdHandle == m_simdHandleCache->SIMDNUIntHandle);
break;
default:
break;
}
if (found)
{
isHWSIMD = false;
}
}
if (!found)
{
// We must still have isHWSIMD set to true, and the only non-HW types left are the fixed types.
switch (simdType)
{
case TYP_SIMD8:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector2Handle)
{
isHWSIMD = false;
}
#if defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector64FloatHandle);
}
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector64IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector64UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector64UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector64ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector64ByteHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector64UIntHandle);
#endif // defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
break;
default:
break;
}
break;
case TYP_SIMD12:
assert((simdBaseJitType == CORINFO_TYPE_FLOAT) && (simdHandle == m_simdHandleCache->SIMDVector3Handle));
isHWSIMD = false;
break;
case TYP_SIMD16:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector4Handle)
{
isHWSIMD = false;
}
#if defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector128FloatHandle);
}
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector128DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector128IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector128UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector128UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector128ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector128ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector128LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector128UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector128ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector128NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector128NUIntHandle);
break;
#endif // defined(FEATURE_HW_INTRINSICS)
default:
break;
}
break;
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
case TYP_SIMD32:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
assert(simdHandle == m_simdHandleCache->Vector256FloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector256DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector256IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector256UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector256UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector256ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector256ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector256LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector256UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector256ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector256NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector256NUIntHandle);
break;
default:
break;
}
break;
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
default:
break;
}
}
unsigned size = genTypeSize(simdType);
if (isHWSIMD)
{
#if defined(FEATURE_HW_INTRINSICS)
return gtNewSimdZeroNode(simdType, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ false);
#else
JITDUMP("Coudn't find the matching SIMD type for %s<%s> in gtGetSIMDZero\n", varTypeName(simdType),
varTypeName(JitType2PreciseVarType(simdBaseJitType)));
return nullptr;
#endif // FEATURE_HW_INTRINSICS
}
else
{
return gtNewSIMDVectorZero(simdType, simdBaseJitType, size);
}
}
#endif // FEATURE_SIMD
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
tree = tree->gtEffectiveVal();
if (varTypeIsStruct(tree->gtType))
{
switch (tree->gtOper)
{
default:
break;
case GT_MKREFANY:
structHnd = impGetRefAnyClass();
break;
case GT_OBJ:
structHnd = tree->AsObj()->GetLayout()->GetClassHandle();
break;
case GT_BLK:
structHnd = tree->AsBlk()->GetLayout()->GetClassHandle();
break;
case GT_CALL:
structHnd = tree->AsCall()->gtRetClsHnd;
break;
case GT_RET_EXPR:
structHnd = tree->AsRetExpr()->gtRetClsHnd;
break;
case GT_ARGPLACE:
structHnd = tree->AsArgPlace()->gtArgPlaceClsHnd;
break;
case GT_INDEX:
structHnd = tree->AsIndex()->gtStructElemClass;
break;
case GT_FIELD:
info.compCompHnd->getFieldType(tree->AsField()->gtFldHnd, &structHnd);
break;
case GT_ASG:
structHnd = gtGetStructHandleIfPresent(tree->gtGetOp1());
break;
case GT_LCL_FLD:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
#endif
break;
case GT_LCL_VAR:
{
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
structHnd = lvaGetStruct(lclNum);
break;
}
case GT_RETURN:
structHnd = gtGetStructHandleIfPresent(tree->AsOp()->gtOp1);
break;
case GT_IND:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
else
#endif
{
// Attempt to find a handle for this expression.
// We can do this for an array element indirection, or for a field indirection.
ArrayInfo arrInfo;
if (TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
structHnd = arrInfo.m_elemStructType;
}
else
{
GenTree* addr = tree->AsIndir()->Addr();
FieldSeqNode* fieldSeq = nullptr;
if ((addr->OperGet() == GT_ADD) && addr->gtGetOp2()->OperIs(GT_CNS_INT))
{
fieldSeq = addr->gtGetOp2()->AsIntCon()->gtFieldSeq;
}
else
{
GetZeroOffsetFieldMap()->Lookup(addr, &fieldSeq);
}
if (fieldSeq != nullptr)
{
while (fieldSeq->m_next != nullptr)
{
fieldSeq = fieldSeq->m_next;
}
if (fieldSeq != FieldSeqStore::NotAField() && !fieldSeq->IsPseudoField())
{
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &structHnd);
// With unsafe code and type casts
// this can return a primitive type and have nullptr for structHnd
// see runtime/issues/38541
}
}
}
}
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->GetSimdBaseJitType());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
if ((tree->gtFlags & GTF_SIMDASHW_OP) != 0)
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
else
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
break;
#endif
break;
}
// TODO-1stClassStructs: add a check that `structHnd != NO_CLASS_HANDLE`,
// nowadays it won't work because the right part of an ASG could have struct type without a handle
// (check `fgMorphBlockOperand(isBlkReqd`) and a few other cases.
}
return structHnd;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(tree);
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
//------------------------------------------------------------------------
// gtGetClassHandle: find class handle for a ref type
//
// Arguments:
// tree -- tree to find handle for
// pIsExact [out] -- whether handle is exact type
// pIsNonNull [out] -- whether tree value is known not to be null
//
// Return Value:
// nullptr if class handle is unknown,
// otherwise the class handle.
// *pIsExact set true if tree type is known to be exactly the handle type,
// otherwise actual type may be a subtype.
// *pIsNonNull set true if tree value is known not to be null,
// otherwise a null value is possible.
CORINFO_CLASS_HANDLE Compiler::gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull)
{
// Set default values for our out params.
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
// Bail out if we're just importing and not generating code, since
// the jit uses TYP_REF for CORINFO_TYPE_VAR locals and args, but
// these may not be ref types.
if (compIsForImportOnly())
{
return objClass;
}
// Bail out if the tree is not a ref type.
var_types treeType = tree->TypeGet();
if (treeType != TYP_REF)
{
return objClass;
}
// Tunnel through commas.
GenTree* obj = tree->gtEffectiveVal(false);
const genTreeOps objOp = obj->OperGet();
switch (objOp)
{
case GT_COMMA:
{
// gtEffectiveVal above means we shouldn't see commas here.
assert(!"unexpected GT_COMMA");
break;
}
case GT_LCL_VAR:
{
// For locals, pick up type info from the local table.
const unsigned objLcl = obj->AsLclVar()->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
break;
}
case GT_FIELD:
{
// For fields, get the type from the field handle.
CORINFO_FIELD_HANDLE fieldHnd = obj->AsField()->gtFldHnd;
if (fieldHnd != nullptr)
{
objClass = gtGetFieldClassHandle(fieldHnd, pIsExact, pIsNonNull);
}
break;
}
case GT_RET_EXPR:
{
// If we see a RET_EXPR, recurse through to examine the
// return value expression.
GenTree* retExpr = tree->AsRetExpr()->gtInlineCandidate;
objClass = gtGetClassHandle(retExpr, pIsExact, pIsNonNull);
break;
}
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
if ((ni == NI_System_Array_Clone) || (ni == NI_System_Object_MemberwiseClone))
{
objClass = gtGetClassHandle(call->gtCallThisArg->GetNode(), pIsExact, pIsNonNull);
break;
}
CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(call->gtCallMethHnd);
if (specialObjClass != nullptr)
{
objClass = specialObjClass;
*pIsExact = true;
*pIsNonNull = true;
break;
}
}
if (call->IsInlineCandidate())
{
// For inline candidates, we've already cached the return
// type class handle in the inline info.
InlineCandidateInfo* inlInfo = call->gtInlineCandidateInfo;
assert(inlInfo != nullptr);
// Grab it as our first cut at a return type.
assert(inlInfo->methInfo.args.retType == CORINFO_TYPE_CLASS);
objClass = inlInfo->methInfo.args.retTypeClass;
// If the method is shared, the above may not capture
// the most precise return type information (that is,
// it may represent a shared return type and as such,
// have instances of __Canon). See if we can use the
// context to get at something more definite.
//
// For now, we do this here on demand rather than when
// processing the call, but we could/should apply
// similar sharpening to the argument and local types
// of the inlinee.
const unsigned retClassFlags = info.compCompHnd->getClassAttribs(objClass);
if (retClassFlags & CORINFO_FLG_SHAREDINST)
{
CORINFO_CONTEXT_HANDLE context = inlInfo->exactContextHnd;
if (context != nullptr)
{
CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(context);
// Grab the signature in this context.
CORINFO_SIG_INFO sig;
eeGetMethodSig(call->gtCallMethHnd, &sig, exactClass);
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
}
else if (call->gtCallType == CT_USER_FUNC)
{
// For user calls, we can fetch the approximate return
// type info from the method handle. Unfortunately
// we've lost the exact context, so this is the best
// we can do for now.
CORINFO_METHOD_HANDLE method = call->gtCallMethHnd;
CORINFO_CLASS_HANDLE exactClass = nullptr;
CORINFO_SIG_INFO sig;
eeGetMethodSig(method, &sig, exactClass);
if (sig.retType == CORINFO_TYPE_VOID)
{
// This is a constructor call.
const unsigned methodFlags = info.compCompHnd->getMethodAttribs(method);
assert((methodFlags & CORINFO_FLG_CONSTRUCTOR) != 0);
objClass = info.compCompHnd->getMethodClass(method);
*pIsExact = true;
*pIsNonNull = true;
}
else
{
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
else if (call->gtCallType == CT_HELPER)
{
objClass = gtGetHelperCallClassHandle(call, pIsExact, pIsNonNull);
}
break;
}
case GT_INTRINSIC:
{
GenTreeIntrinsic* intrinsic = obj->AsIntrinsic();
if (intrinsic->gtIntrinsicName == NI_System_Object_GetType)
{
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsExact = false;
*pIsNonNull = true;
}
break;
}
case GT_CNS_STR:
{
// For literal strings, we know the class and that the
// value is not null.
objClass = impGetStringClass();
*pIsExact = true;
*pIsNonNull = true;
break;
}
case GT_IND:
{
GenTreeIndir* indir = obj->AsIndir();
if (indir->HasBase() && !indir->HasIndex())
{
// indir(addr(lcl)) --> lcl
//
// This comes up during constrained callvirt on ref types.
GenTree* base = indir->Base();
GenTreeLclVarCommon* lcl = base->IsLocalAddrExpr();
if ((lcl != nullptr) && (base->OperGet() != GT_ADD))
{
const unsigned objLcl = lcl->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
}
else if (base->OperGet() == GT_ARR_ELEM)
{
// indir(arr_elem(...)) -> array element type
GenTree* array = base->AsArrElem()->gtArrObj;
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
}
else if (base->OperGet() == GT_ADD)
{
// This could be a static field access.
//
// See if op1 is a static field base helper call
// and if so, op2 will have the field info.
GenTree* op1 = base->AsOp()->gtOp1;
GenTree* op2 = base->AsOp()->gtOp2;
const bool op1IsStaticFieldBase = gtIsStaticGCBaseHelperCall(op1);
if (op1IsStaticFieldBase && (op2->OperGet() == GT_CNS_INT))
{
FieldSeqNode* fieldSeq = op2->AsIntCon()->gtFieldSeq;
if (fieldSeq != nullptr)
{
while (fieldSeq->m_next != nullptr)
{
fieldSeq = fieldSeq->m_next;
}
assert(!fieldSeq->IsPseudoField());
// No benefit to calling gtGetFieldClassHandle here, as
// the exact field being accessed can vary.
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
assert(fieldCorType == CORINFO_TYPE_CLASS);
objClass = fieldClass;
}
}
}
}
break;
}
case GT_BOX:
{
// Box should just wrap a local var reference which has
// the type we're looking for. Also box only represents a
// non-nullable value type so result cannot be null.
GenTreeBox* box = obj->AsBox();
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
objClass = lvaTable[boxTempLcl].lvClassHnd;
*pIsExact = lvaTable[boxTempLcl].lvClassIsExact;
*pIsNonNull = true;
break;
}
case GT_INDEX:
{
GenTree* array = obj->AsIndex()->Arr();
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
break;
}
default:
{
break;
}
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetHelperCallClassHandle: find class handle for return value of a
// helper call
//
// Arguments:
// call - helper call to examine
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if return value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull)
{
assert(call->gtCallType == CT_HELPER);
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE:
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL:
{
// Note for some runtimes these helpers return exact types.
//
// But in those cases the types are also sealed, so there's no
// need to claim exactness here.
const bool helperResultNonNull = (helper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsNonNull = helperResultNonNull;
break;
}
case CORINFO_HELP_CHKCASTCLASS:
case CORINFO_HELP_CHKCASTANY:
case CORINFO_HELP_CHKCASTARRAY:
case CORINFO_HELP_CHKCASTINTERFACE:
case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
case CORINFO_HELP_ISINSTANCEOFINTERFACE:
case CORINFO_HELP_ISINSTANCEOFARRAY:
case CORINFO_HELP_ISINSTANCEOFCLASS:
case CORINFO_HELP_ISINSTANCEOFANY:
{
// Fetch the class handle from the helper call arglist
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* typeArg = args->GetNode();
CORINFO_CLASS_HANDLE castHnd = gtGetHelperArgClassHandle(typeArg);
// We generally assume the type being cast to is the best type
// for the result, unless it is an interface type.
//
// TODO-CQ: when we have default interface methods then
// this might not be the best assumption. We could also
// explore calling something like mergeClasses to identify
// the more specific class. A similar issue arises when
// typing the temp in impCastClassOrIsInstToTree, when we
// expand the cast inline.
if (castHnd != nullptr)
{
DWORD attrs = info.compCompHnd->getClassAttribs(castHnd);
if ((attrs & CORINFO_FLG_INTERFACE) != 0)
{
castHnd = nullptr;
}
}
// If we don't have a good estimate for the type we can use the
// type from the value being cast instead.
if (castHnd == nullptr)
{
GenTree* valueArg = args->GetNext()->GetNode();
castHnd = gtGetClassHandle(valueArg, pIsExact, pIsNonNull);
}
// We don't know at jit time if the cast will succeed or fail, but if it
// fails at runtime then an exception is thrown for cast helpers, or the
// result is set null for instance helpers.
//
// So it safe to claim the result has the cast type.
// Note we don't know for sure that it is exactly this type.
if (castHnd != nullptr)
{
objClass = castHnd;
}
break;
}
case CORINFO_HELP_NEWARR_1_DIRECT:
case CORINFO_HELP_NEWARR_1_OBJ:
case CORINFO_HELP_NEWARR_1_VC:
case CORINFO_HELP_NEWARR_1_ALIGN8:
case CORINFO_HELP_READYTORUN_NEWARR_1:
{
CORINFO_CLASS_HANDLE arrayHnd = (CORINFO_CLASS_HANDLE)call->compileTimeHelperArgumentHandle;
if (arrayHnd != NO_CLASS_HANDLE)
{
objClass = arrayHnd;
*pIsExact = true;
*pIsNonNull = true;
}
break;
}
default:
break;
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetArrayElementClassHandle: find class handle for elements of an array
// of ref types
//
// Arguments:
// array -- array to find handle for
//
// Return Value:
// nullptr if element class handle is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetArrayElementClassHandle(GenTree* array)
{
bool isArrayExact = false;
bool isArrayNonNull = false;
CORINFO_CLASS_HANDLE arrayClassHnd = gtGetClassHandle(array, &isArrayExact, &isArrayNonNull);
if (arrayClassHnd != nullptr)
{
// We know the class of the reference
DWORD attribs = info.compCompHnd->getClassAttribs(arrayClassHnd);
if ((attribs & CORINFO_FLG_ARRAY) != 0)
{
// We know for sure it is an array
CORINFO_CLASS_HANDLE elemClassHnd = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayClassHnd, &elemClassHnd);
if (arrayElemType == CORINFO_TYPE_CLASS)
{
// We know it is an array of ref types
return elemClassHnd;
}
}
}
return nullptr;
}
//------------------------------------------------------------------------
// gtGetFieldClassHandle: find class handle for a field
//
// Arguments:
// fieldHnd - field handle for field in question
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if field value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
//
// May examine runtime state of static field instances.
CORINFO_CLASS_HANDLE Compiler::gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull)
{
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
if (fieldCorType == CORINFO_TYPE_CLASS)
{
// Optionally, look at the actual type of the field's value
bool queryForCurrentClass = true;
INDEBUG(queryForCurrentClass = (JitConfig.JitQueryCurrentStaticFieldClass() > 0););
if (queryForCurrentClass)
{
#if DEBUG
const char* fieldClassName = nullptr;
const char* fieldName = eeGetFieldName(fieldHnd, &fieldClassName);
JITDUMP("Querying runtime about current class of field %s.%s (declared as %s)\n", fieldClassName, fieldName,
eeGetClassName(fieldClass));
#endif // DEBUG
// Is this a fully initialized init-only static field?
//
// Note we're not asking for speculative results here, yet.
CORINFO_CLASS_HANDLE currentClass = info.compCompHnd->getStaticFieldCurrentClass(fieldHnd);
if (currentClass != NO_CLASS_HANDLE)
{
// Yes! We know the class exactly and can rely on this to always be true.
fieldClass = currentClass;
*pIsExact = true;
*pIsNonNull = true;
JITDUMP("Runtime reports field is init-only and initialized and has class %s\n",
eeGetClassName(fieldClass));
}
else
{
JITDUMP("Field's current class not available\n");
}
}
}
return fieldClass;
}
//------------------------------------------------------------------------
// gtIsGCStaticBaseHelperCall: true if tree is fetching the gc static base
// for a subsequent static field access
//
// Arguments:
// tree - tree to consider
//
// Return Value:
// true if the tree is a suitable helper call
//
// Notes:
// Excludes R2R helpers as they specify the target field in a way
// that is opaque to the jit.
bool Compiler::gtIsStaticGCBaseHelperCall(GenTree* tree)
{
if (tree->OperGet() != GT_CALL)
{
return false;
}
GenTreeCall* call = tree->AsCall();
if (call->gtCallType != CT_HELPER)
{
return false;
}
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
// We are looking for a REF type so only need to check for the GC base helpers
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
return true;
default:
break;
}
return false;
}
void GenTree::ParseArrayAddress(
Compiler* comp, ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq)
{
*pArr = nullptr;
ValueNum inxVN = ValueNumStore::NoVN;
target_ssize_t offset = 0;
FieldSeqNode* fldSeq = nullptr;
ParseArrayAddressWork(comp, 1, pArr, &inxVN, &offset, &fldSeq);
// If we didn't find an array reference (perhaps it is the constant null?) we will give up.
if (*pArr == nullptr)
{
return;
}
// OK, new we have to figure out if any part of the "offset" is a constant contribution to the index.
// First, sum the offsets of any fields in fldSeq.
unsigned fieldOffsets = 0;
FieldSeqNode* fldSeqIter = fldSeq;
// Also, find the first non-pseudo field...
assert(*pFldSeq == nullptr);
while (fldSeqIter != nullptr)
{
if (fldSeqIter == FieldSeqStore::NotAField())
{
// TODO-Review: A NotAField here indicates a failure to properly maintain the field sequence
// See test case self_host_tests_x86\jit\regression\CLR-x86-JIT\v1-m12-beta2\ b70992\ b70992.exe
// Safest thing to do here is to drop back to MinOpts
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (comp->opts.optRepeat)
{
// We don't guarantee preserving these annotations through the entire optimizer, so
// just conservatively return null if under optRepeat.
*pArr = nullptr;
return;
}
#endif // DEBUG
noway_assert(!"fldSeqIter is NotAField() in ParseArrayAddress");
}
if (!FieldSeqStore::IsPseudoField(fldSeqIter->m_fieldHnd))
{
if (*pFldSeq == nullptr)
{
*pFldSeq = fldSeqIter;
}
CORINFO_CLASS_HANDLE fldCls = nullptr;
noway_assert(fldSeqIter->m_fieldHnd != nullptr);
CorInfoType cit = comp->info.compCompHnd->getFieldType(fldSeqIter->m_fieldHnd, &fldCls);
fieldOffsets += comp->compGetTypeSize(cit, fldCls);
}
fldSeqIter = fldSeqIter->m_next;
}
// Is there some portion of the "offset" beyond the first-elem offset and the struct field suffix we just computed?
if (!FitsIn<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset) ||
!FitsIn<target_ssize_t>(arrayInfo->m_elemSize))
{
// This seems unlikely, but no harm in being safe...
*pInxVN = comp->GetValueNumStore()->VNForExpr(nullptr, TYP_INT);
return;
}
// Otherwise...
target_ssize_t offsetAccountedFor = static_cast<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset);
target_ssize_t elemSize = static_cast<target_ssize_t>(arrayInfo->m_elemSize);
target_ssize_t constIndOffset = offset - offsetAccountedFor;
// This should be divisible by the element size...
assert((constIndOffset % elemSize) == 0);
target_ssize_t constInd = constIndOffset / elemSize;
ValueNumStore* vnStore = comp->GetValueNumStore();
if (inxVN == ValueNumStore::NoVN)
{
// Must be a constant index.
*pInxVN = vnStore->VNForPtrSizeIntCon(constInd);
}
else
{
//
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
//
// The value associated with the index value number (inxVN) is the offset into the array,
// which has been scaled by element size. We need to recover the array index from that offset
if (vnStore->IsVNConstant(inxVN))
{
target_ssize_t index = vnStore->CoercedConstantValue<target_ssize_t>(inxVN);
noway_assert(elemSize > 0 && ((index % elemSize) == 0));
*pInxVN = vnStore->VNForPtrSizeIntCon((index / elemSize) + constInd);
}
else
{
bool canFoldDiv = false;
// If the index VN is a MUL by elemSize, see if we can eliminate it instead of adding
// the division by elemSize.
VNFuncApp funcApp;
if (vnStore->GetVNFunc(inxVN, &funcApp) && funcApp.m_func == (VNFunc)GT_MUL)
{
ValueNum vnForElemSize = vnStore->VNForLongCon(elemSize);
// One of the multiply operand is elemSize, so the resulting
// index VN should simply be the other operand.
if (funcApp.m_args[1] == vnForElemSize)
{
*pInxVN = funcApp.m_args[0];
canFoldDiv = true;
}
else if (funcApp.m_args[0] == vnForElemSize)
{
*pInxVN = funcApp.m_args[1];
canFoldDiv = true;
}
}
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
if (!canFoldDiv)
{
ValueNum vnForElemSize = vnStore->VNForPtrSizeIntCon(elemSize);
ValueNum vnForScaledInx = vnStore->VNForFunc(TYP_I_IMPL, VNFunc(GT_DIV), inxVN, vnForElemSize);
*pInxVN = vnForScaledInx;
}
if (constInd != 0)
{
ValueNum vnForConstInd = comp->GetValueNumStore()->VNForPtrSizeIntCon(constInd);
VNFunc vnFunc = VNFunc(GT_ADD);
*pInxVN = comp->GetValueNumStore()->VNForFunc(TYP_I_IMPL, vnFunc, *pInxVN, vnForConstInd);
}
}
}
}
void GenTree::ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq)
{
if (TypeGet() == TYP_REF)
{
// This must be the array pointer.
*pArr = this;
assert(inputMul == 1); // Can't multiply the array pointer by anything.
}
else
{
switch (OperGet())
{
case GT_CNS_INT:
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, AsIntCon()->gtFieldSeq);
assert(!AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
*pOffset += (inputMul * (target_ssize_t)(AsIntCon()->gtIconVal));
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
if (OperGet() == GT_SUB)
{
inputMul = -inputMul;
}
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
case GT_MUL:
{
// If one op is a constant, continue parsing down.
target_ssize_t subMul = 0;
GenTree* nonConst = nullptr;
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
AsOp()->gtOp2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
else
{
assert(!AsOp()->gtOp1->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp1->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp2;
}
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
if (nonConst != nullptr)
{
nonConst->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
}
break;
case GT_LSH:
// If one op is a constant, continue parsing down.
if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
target_ssize_t shiftVal = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
target_ssize_t subMul = target_ssize_t{1} << shiftVal;
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
break;
case GT_COMMA:
// We don't care about exceptions for this purpose.
if (AsOp()->gtOp1->OperIs(GT_BOUNDS_CHECK) || AsOp()->gtOp1->IsNothingNode())
{
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
break;
default:
break;
}
// If we didn't return above, must be a contribution to the non-constant part of the index VN.
ValueNum vn = comp->GetValueNumStore()->VNLiberalNormalValue(gtVNPair);
if (inputMul != 1)
{
ValueNum mulVN = comp->GetValueNumStore()->VNForLongCon(inputMul);
vn = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_MUL), mulVN, vn);
}
if (*pInxVN == ValueNumStore::NoVN)
{
*pInxVN = vn;
}
else
{
*pInxVN = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_ADD), *pInxVN, vn);
}
}
}
bool GenTree::ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
if (OperIsIndir())
{
if (gtFlags & GTF_IND_ARR_INDEX)
{
bool b = comp->GetArrayInfoMap()->Lookup(this, arrayInfo);
assert(b);
return true;
}
// Otherwise...
GenTree* addr = AsIndir()->Addr();
return addr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
else
{
return false;
}
}
bool GenTree::ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_ADD:
{
GenTree* arrAddr = nullptr;
GenTree* offset = nullptr;
if (AsOp()->gtOp1->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp1;
offset = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp2;
offset = AsOp()->gtOp1;
}
else
{
return false;
}
if (!offset->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return arrAddr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
case GT_ADDR:
{
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->OperGet() != GT_IND)
{
return false;
}
else
{
// The "Addr" node might be annotated with a zero-offset field sequence.
FieldSeqNode* zeroOffsetFldSeq = nullptr;
if (comp->GetZeroOffsetFieldMap()->Lookup(this, &zeroOffsetFldSeq))
{
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, zeroOffsetFldSeq);
}
return addrArg->ParseArrayElemForm(comp, arrayInfo, pFldSeq);
}
}
default:
return false;
}
}
bool GenTree::ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_CNS_INT:
{
GenTreeIntCon* icon = AsIntCon();
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, icon->gtFieldSeq);
return true;
}
case GT_ADD:
if (!AsOp()->gtOp1->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return AsOp()->gtOp2->ParseOffsetForm(comp, pFldSeq);
default:
return false;
}
}
void GenTree::LabelIndex(Compiler* comp, bool isConst)
{
switch (OperGet())
{
case GT_CNS_INT:
// If we got here, this is a contribution to the constant part of the index.
if (isConst)
{
AsIntCon()->gtFieldSeq =
comp->GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
return;
case GT_LCL_VAR:
gtFlags |= GTF_VAR_ARR_INDEX;
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->LabelIndex(comp, isConst);
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
case GT_CAST:
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
case GT_ARR_LENGTH:
gtFlags |= GTF_ARRLEN_ARR_IDX;
return;
default:
// For all other operators, peel off one constant; and then label the other if it's also a constant.
if (OperIsArithmetic() || OperIsCompare())
{
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
}
else if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
}
// Otherwise continue downward on both, labeling vars.
AsOp()->gtOp1->LabelIndex(comp, false);
AsOp()->gtOp2->LabelIndex(comp, false);
}
break;
}
}
// Note that the value of the below field doesn't matter; it exists only to provide a distinguished address.
//
// static
FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr);
// FieldSeqStore methods.
FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
}
FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd)
{
FieldSeqNode fsn(fieldHnd, nullptr);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
{
if (a == nullptr)
{
return b;
}
else if (a == NotAField())
{
return NotAField();
}
else if (b == nullptr)
{
return a;
}
else if (b == NotAField())
{
return NotAField();
// Extremely special case for ConstantIndex pseudo-fields -- appending consecutive such
// together collapse to one.
}
else if (a->m_next == nullptr && a->m_fieldHnd == ConstantIndexPseudoField &&
b->m_fieldHnd == ConstantIndexPseudoField)
{
return b;
}
else
{
// We should never add a duplicate FieldSeqNode
assert(a != b);
FieldSeqNode* tmp = Append(a->m_next, b);
FieldSeqNode fsn(a->m_fieldHnd, tmp);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
}
// Static vars.
int FieldSeqStore::FirstElemPseudoFieldStruct;
int FieldSeqStore::ConstantIndexPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::FirstElemPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::FirstElemPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::ConstantIndexPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::ConstantIndexPseudoFieldStruct;
bool FieldSeqNode::IsFirstElemFieldSeq()
{
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField;
}
bool FieldSeqNode::IsConstantIndexFieldSeq()
{
return m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
bool FieldSeqNode::IsPseudoField() const
{
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField || m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
#ifdef FEATURE_SIMD
GenTreeSIMD* Compiler::gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, op2, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
//-------------------------------------------------------------------
// SetOpLclRelatedToSIMDIntrinsic: Determine if the tree has a local var that needs to be set
// as used by a SIMD intrinsic, and if so, set that local var appropriately.
//
// Arguments:
// op - The tree, to be an operand of a new GT_SIMD node, to check.
//
void Compiler::SetOpLclRelatedToSIMDIntrinsic(GenTree* op)
{
if (op == nullptr)
{
return;
}
if (op->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(op);
}
else if (op->OperIs(GT_OBJ))
{
GenTree* addr = op->AsIndir()->Addr();
if (addr->OperIs(GT_ADDR))
{
GenTree* addrOp1 = addr->AsOp()->gtGetOp1();
if (addrOp1->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(addrOp1);
}
}
}
}
bool GenTree::isCommutativeSIMDIntrinsic()
{
assert(gtOper == GT_SIMD);
switch (AsSIMD()->GetSIMDIntrinsicId())
{
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
return true;
default:
return false;
}
}
void GenTreeMultiOp::ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount)
{
size_t oldOperandCount = GetOperandCount();
GenTree** oldOperands = GetOperandArray();
if (newOperandCount > oldOperandCount)
{
if (newOperandCount <= inlineOperandCount)
{
assert(oldOperandCount <= inlineOperandCount);
assert(oldOperands == inlineOperands);
}
else
{
// The most difficult case: we need to recreate the dynamic array.
assert(compiler != nullptr);
m_operands = compiler->getAllocator(CMK_ASTNode).allocate<GenTree*>(newOperandCount);
}
}
else
{
// We are shrinking the array and may in process switch to an inline representation.
// We choose to do so for simplicity ("if a node has <= InlineOperandCount operands,
// then it stores them inline"), but actually it may be more profitable to not do that,
// it will save us a copy and a potential cache miss (though the latter seems unlikely).
if ((newOperandCount <= inlineOperandCount) && (oldOperands != inlineOperands))
{
m_operands = inlineOperands;
}
}
#ifdef DEBUG
for (size_t i = 0; i < newOperandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
SetOperandCount(newOperandCount);
}
/* static */ bool GenTreeMultiOp::OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2)
{
if (op1->GetOperandCount() != op2->GetOperandCount())
{
return false;
}
for (size_t i = 1; i <= op1->GetOperandCount(); i++)
{
if (!Compare(op1->Op(i), op2->Op(i)))
{
return false;
}
}
return true;
}
void GenTreeMultiOp::InitializeOperands(GenTree** operands, size_t operandCount)
{
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = operands[i];
gtFlags |= (operands[i]->gtFlags & GTF_ALL_EFFECT);
}
SetOperandCount(operandCount);
}
var_types GenTreeJitIntrinsic::GetAuxiliaryType() const
{
CorInfoType auxiliaryJitType = GetAuxiliaryJitType();
if (auxiliaryJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(auxiliaryJitType);
}
var_types GenTreeJitIntrinsic::GetSimdBaseType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(simdBaseJitType);
}
// Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeSIMD::OperIsMemoryLoad() const
{
if (GetSIMDIntrinsicId() == SIMDIntrinsicInitArray)
{
return true;
}
return false;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeSIMD::Equals(GenTreeSIMD* op1, GenTreeSIMD* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetSIMDIntrinsicId() == op2->GetSIMDIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool GenTree::isCommutativeHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
return HWIntrinsicInfo::IsCommutative(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isContainableHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_SSE_LoadAlignedVector128:
case NI_SSE_LoadScalarVector128:
case NI_SSE_LoadVector128:
case NI_SSE2_LoadAlignedVector128:
case NI_SSE2_LoadScalarVector128:
case NI_SSE2_LoadVector128:
case NI_AVX_LoadAlignedVector256:
case NI_AVX_LoadVector256:
case NI_AVX_ExtractVector128:
case NI_AVX2_ExtractVector128:
{
return true;
}
default:
{
return false;
}
}
#elif TARGET_ARM64
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_Vector64_get_Zero:
case NI_Vector128_get_Zero:
{
return true;
}
default:
{
return false;
}
}
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isRMWHWIntrinsic(Compiler* comp)
{
assert(gtOper == GT_HWINTRINSIC);
assert(comp != nullptr);
#if defined(TARGET_XARCH)
if (!comp->canUseVexEncoding())
{
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
}
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
// TODO-XArch-Cleanup: Move this switch block to be table driven.
case NI_SSE42_Crc32:
case NI_SSE42_X64_Crc32:
case NI_FMA_MultiplyAdd:
case NI_FMA_MultiplyAddNegated:
case NI_FMA_MultiplyAddNegatedScalar:
case NI_FMA_MultiplyAddScalar:
case NI_FMA_MultiplyAddSubtract:
case NI_FMA_MultiplySubtract:
case NI_FMA_MultiplySubtractAdd:
case NI_FMA_MultiplySubtractNegated:
case NI_FMA_MultiplySubtractNegatedScalar:
case NI_FMA_MultiplySubtractScalar:
{
return true;
}
default:
{
return false;
}
}
#elif defined(TARGET_ARM64)
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2, op3);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
SetOpLclRelatedToSIMDIntrinsic(op4);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic, op1, op2, op3, op4);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount);
for (size_t i = 0; i < operandCount; i++)
{
nodeBuilder.AddOperand(i, operands[i]);
SetOpLclRelatedToSIMDIntrinsic(operands[i]);
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++)
{
SetOpLclRelatedToSIMDIntrinsic(nodeBuilder.GetOperand(i));
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeGet() == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
if (varTypeIsUnsigned(simdBaseType))
{
return op1;
}
#if defined(TARGET_XARCH)
if (varTypeIsFloating(simdBaseType))
{
// Abs(v) = v & ~new vector<T>(-0.0);
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
GenTree* bitMask = gtNewDconNode(-0.0, simdBaseType);
bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
if ((simdBaseType != TYP_LONG) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_SSSE3)))
{
NamedIntrinsic intrinsic = (simdSize == 32) ? NI_AVX2_Abs : NI_SSSE3_Abs;
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
GenTree* tmp;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
// op1 = op1 < Zero
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// tmp = Zero - op1Dup1
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, tmp, op1Dup2)
return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
NamedIntrinsic intrinsic = NI_AdvSimd_Abs;
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
else if (varTypeIsLong(simdBaseType))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif
}
GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op1 != nullptr);
assert(op1->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
assert(op2 != nullptr);
if ((op == GT_LSH) || (op == GT_RSH) || (op == GT_RSZ))
{
assert(op2->TypeIs(TYP_INT));
}
else
{
assert(op2->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
}
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_ADD:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Add;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Add;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Add;
}
else
{
intrinsic = NI_SSE2_Add;
}
break;
}
case GT_AND:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_And;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_And;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_And;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_And;
}
else
{
intrinsic = NI_SSE2_And;
}
break;
}
case GT_AND_NOT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_AndNot;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_AndNot;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_AndNot;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_AndNot;
}
else
{
intrinsic = NI_SSE2_AndNot;
}
// GT_AND_NOT expects `op1 & ~op2`, but xarch does `~op1 & op2`
std::swap(op1, op2);
break;
}
case GT_DIV:
{
// TODO-XARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Divide;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Divide;
}
else
{
intrinsic = NI_SSE2_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsByte(simdBaseType));
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT,
16, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (op == GT_LSH)
{
intrinsic = NI_AVX2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AVX2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AVX2_ShiftRightLogical;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_SSE2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_SSE2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_SSE2_ShiftRightLogical;
}
break;
}
case GT_MUL:
{
GenTree** broadcastOp = nullptr;
if (varTypeIsArithmetic(op1))
{
broadcastOp = &op1;
}
else if (varTypeIsArithmetic(op2))
{
broadcastOp = &op2;
}
if (broadcastOp != nullptr)
{
*broadcastOp =
gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else
{
intrinsic = NI_SSE2_MultiplyLow;
}
break;
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_MultiplyLow;
}
else
{
// op1Dup = op1
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector multiply"));
// op2Dup = op2
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector multiply"));
// op1 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Multiply(op2.AsUInt32(), op1.AsUInt32()).AsInt32()
op2 = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Shuffle(op2, (0, 0, 2, 0))
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32()
op1 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_Multiply, CORINFO_TYPE_ULONG,
simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Shuffle(op1, (0, 0, 2, 0))
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = Sse2.UnpackLow(op1, op2)
intrinsic = NI_SSE2_UnpackLow;
}
break;
}
case TYP_FLOAT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE2_Multiply;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Or;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Or;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Or;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Or;
}
else
{
intrinsic = NI_SSE2_Or;
}
break;
}
case GT_SUB:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Subtract;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Subtract;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Subtract;
}
else
{
intrinsic = NI_SSE2_Subtract;
}
break;
}
case GT_XOR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Xor;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Xor;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Xor;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Xor;
}
else
{
intrinsic = NI_SSE2_Xor;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_ADD:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AddScalar : NI_AdvSimd_Arm64_Add;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_AddScalar;
}
else
{
intrinsic = NI_AdvSimd_Add;
}
break;
}
case GT_AND:
{
intrinsic = NI_AdvSimd_And;
break;
}
case GT_AND_NOT:
{
intrinsic = NI_AdvSimd_BitwiseClear;
break;
}
case GT_DIV:
{
// TODO-AARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_DivideScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmeticScalar;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogical;
}
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
if (op != GT_LSH)
{
op2 = gtNewOperNode(GT_NEG, TYP_INT, op2);
}
op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmeticScalar;
}
else
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftLogical;
}
}
break;
}
case GT_MUL:
{
assert(!varTypeIsLong(simdBaseType));
GenTree** scalarOp = nullptr;
if (varTypeIsArithmetic(op1))
{
// MultiplyByScalar requires the scalar op to be op2
std::swap(op1, op2);
scalarOp = &op2;
}
else if (varTypeIsArithmetic(op2))
{
scalarOp = &op2;
}
switch (JitType2PreciseVarType(simdBaseJitType))
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (scalarOp != nullptr)
{
*scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
intrinsic = NI_AdvSimd_Multiply;
break;
}
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_CreateScalarUnsafe,
simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_Arm64_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_Create, simdBaseJitType,
8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Arm64_Multiply;
}
if (simdSize == 8)
{
intrinsic = NI_AdvSimd_MultiplyScalar;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
intrinsic = NI_AdvSimd_Or;
break;
}
case GT_SUB:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_SubtractScalar : NI_AdvSimd_Arm64_Subtract;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_SubtractScalar;
}
else
{
intrinsic = NI_AdvSimd_Subtract;
}
break;
}
case GT_XOR:
{
intrinsic = NI_AdvSimd_Xor;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Ceiling;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Ceiling;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_CeilingScalar : NI_AdvSimd_Arm64_Ceiling;
}
else
{
intrinsic = NI_AdvSimd_Ceiling;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareEqual;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareEqual;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_CompareEqual;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// tmp = (op1 == op2) i.e. compare for equality as if op1 and op2 are vector of int
// op1 = tmp
// op2 = Shuffle(tmp, (2, 3, 0, 1))
// result = BitwiseAnd(op1, op2)
//
// Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of
// respective long elements.
GenTree* tmp =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp for vector Equals"));
op2 = gtNewSimdHWIntrinsicNode(type, tmp, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareEqual;
}
break;
}
case GT_GE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareGreaterThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = GreaterThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_GT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports > for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector GreaterThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareGreaterThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareGreaterThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// GreaterThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector GreaterThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareLessThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = LessThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_LT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports < for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector LessThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareLessThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareLessThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// LessThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector LessThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareLessThan;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareEqualScalar : NI_AdvSimd_Arm64_CompareEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareEqual;
}
break;
}
case GT_GE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareGreaterThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThanOrEqual;
}
break;
}
case GT_GT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic =
(simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanScalar : NI_AdvSimd_Arm64_CompareGreaterThan;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareLessThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThanOrEqual;
}
break;
}
case GT_LT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanScalar : NI_AdvSimd_Arm64_CompareLessThan;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThan;
}
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
}
else
{
intrinsic = NI_Vector128_op_Equality;
}
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
getAllBitsSet = NI_Vector256_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Equality : NI_Vector128_op_Equality;
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 8)
{
intrinsic = NI_Vector64_op_Equality;
getAllBitsSet = NI_Vector64_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
assert(op3 != nullptr);
assert(op3->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
// TODO-XARCH-CQ: It's likely beneficial to have a dedicated CndSel node so we
// can special case when the condition is the result of various compare operations.
//
// When it is, the condition is AllBitsSet or Zero on a per-element basis and we
// could change this to be a Blend operation in lowering as an optimization.
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector conditional select"));
// op2 = op2 & op1
op2 = gtNewSimdBinOpNode(GT_AND, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op3 = op3 & ~op1Dup
op3 = gtNewSimdBinOpNode(GT_AND_NOT, type, op3, op1Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op2 | op3
return gtNewSimdBinOpNode(GT_OR, type, op2, op3, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_Create;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#if defined(TARGET_XARCH)
#if defined(TARGET_X86)
if (varTypeIsLong(simdBaseType) && !op1->IsIntegralConst())
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
// works on 32-bit x86 systems.
unreached();
}
#endif // TARGET_X86
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_Create;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
hwIntrinsicID = NI_Vector64_Create;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsArithmetic(type));
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(JITtype2varType(simdBaseJitType) == type);
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
if (simdSize == 32)
{
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_Dot;
}
else
{
assert(((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) ||
compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_Vector128_Dot;
}
#elif defined(TARGET_ARM64)
assert(!varTypeIsLong(simdBaseType));
intrinsic = (simdSize == 8) ? NI_Vector64_Dot : NI_Vector128_Dot;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
intrinsic = NI_AVX_Floor;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Floor;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_FloorScalar : NI_AdvSimd_Arm64_Floor;
}
else
{
intrinsic = NI_AdvSimd_Floor;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic intrinsicId = NI_Vector128_GetElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
intrinsicId = NI_Vector256_GetElement;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
intrinsicId = NI_Vector64_GetElement;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
int immUpperBound = getSIMDVectorLength(simdSize, simdBaseType) - 1;
bool rangeCheckNeeded = !op2->OperIsConst();
if (!rangeCheckNeeded)
{
ssize_t imm8 = op2->AsIntCon()->IconValue();
rangeCheckNeeded = (imm8 < 0) || (imm8 > immUpperBound);
}
if (rangeCheckNeeded)
{
op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound);
}
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Max;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Max;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Max(op1, op2)
op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Max;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Max;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MaxScalar : NI_AdvSimd_Arm64_Max;
}
else
{
intrinsic = NI_AdvSimd_Max;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max"));
// op1 = op1 > op2
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Min;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Min;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Min(op1, op2)
op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Min;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Min;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MinScalar : NI_AdvSimd_Arm64_Min;
}
else
{
intrinsic = NI_AdvSimd_Min;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min"));
// op1 = op1 < op2
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
GenTree* tmp1;
GenTree* tmp2;
#if defined(TARGET_XARCH)
GenTree* tmp3;
GenTree* tmp4;
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// This is the same in principle to the other comments below, however due to
// code formatting, its too long to reasonably display here.
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U | 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8L, 8U, 9L, 9U, AL, AU, BL, BU | CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, -- | 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, -- | CL, --, DL, --, EL, --, FL, --
// tmp4 = Elements 0L, 1L, 2L, 3L, 8L, 9L, AL, BL | 4L, 5L, 6L, 7L, CL, DL, EL, FL
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L | 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector256.Create(0x0000FFFF).AsInt16();
// var tmp2 = Avx2.And(op1.AsInt16(), tmp1);
// var tmp3 = Avx2.And(op2.AsInt16(), tmp1);
// var tmp4 = Avx2.PackUnsignedSaturate(tmp2, tmp3);
// return Avx2.Permute4x64(tmp4.AsUInt64(), SHUFFLE_WYZX).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate, CORINFO_TYPE_USHORT,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0, 1 | 2, 3; 0L, 0U, 1L, 1U | 2L, 2U, 3L, 3U
// op2 = Elements 4, 5 | 6, 7; 4L, 4U, 5L, 5U | 6L, 6U, 7L, 7U
//
// tmp1 = Elements 0L, 4L, 0U, 4U | 2L, 6L, 2U, 6U
// tmp2 = Elements 1L, 5L, 1U, 5U | 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 1L, 4L, 5L | 2L, 3L, 6L, 7L
// return Elements 0L, 1L, 2L, 3L | 4L, 5L, 6L, 7L
//
// var tmp1 = Avx2.UnpackLow(op1, op2);
// var tmp2 = Avx2.UnpackHigh(op1, op2);
// var tmp3 = Avx2.UnpackLow(tmp1, tmp2);
// return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
opBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1 | 2, 3
// op2 = Elements 4, 5 | 6, 7
//
// tmp1 = Elements 0, 1, 2, 3 | -, -, -, -
// tmp1 = Elements 4, 5, 6, 7
// return Elements 0, 1, 2, 3 | 4, 5, 6, 7
//
// var tmp1 = Avx.ConvertToVector128Single(op1).ToVector256Unsafe();
// var tmp2 = Avx.ConvertToVector128Single(op2);
// return Avx.InsertVector128(tmp1, tmp2, 1);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, gtNewIconNode(1), NI_AVX_InsertVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
// op1 = Elements 0, 1, 2, 3, 4, 5, 6, 7; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U, 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8, 9, A, B, C, D, E, F; 8L, 8U, 9L, 9U, AL, AU, BL, BU, CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --, 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, --, CL, --, DL, --, EL, --, FL, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector128.Create((ushort)(0x00FF)).AsSByte();
// var tmp2 = Sse2.And(op1.AsSByte(), tmp1);
// var tmp3 = Sse2.And(op2.AsSByte(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
// op1 = Elements 0, 1, 2, 3; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U
// op2 = Elements 4, 5, 6, 7; 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
//
// ...
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
// ...
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --
// tmp3 = Elements 4L, --, 5L, --, 6L, --, 7L, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Vector128.Create(0x0000FFFF).AsInt16();
// var tmp2 = Sse2.And(op1.AsInt16(), tmp1);
// var tmp3 = Sse2.And(op2.AsInt16(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp2, tmp3).As<T>();
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate,
CORINFO_TYPE_USHORT, simdSize, isSimdAsHWIntrinsic);
}
else
{
// ...
//
// tmp1 = Elements 0L, 4L, 0U, 4U, 1L, 5L, 1U, 5U
// tmp2 = Elements 2L, 6L, 2U, 6U, 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 2L, 4L, 6L, 0U, 2U, 4U, 6U
// tmp4 = Elements 1L, 3L, 5L, 7L, 1U, 3U, 5U, 7U
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt16(), op2.AsUInt16());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt16(), op2.AsUInt16());
// var tmp3 = Sse2.UnpackLow(tmp1, tmp2);
// var tmp4 = Sse2.UnpackHigh(tmp1, tmp2);
// return Sse2.UnpackLow(tmp3, tmp4).As<T>();
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
GenTree* tmp2Dup;
tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp2 for vector narrow"));
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
}
case TYP_INT:
case TYP_UINT:
{
// op1 = Elements 0, 1; 0L, 0U, 1L, 1U
// op2 = Elements 2, 3; 2L, 2U, 3L, 3U
//
// tmp1 = Elements 0L, 2L, 0U, 2U
// tmp2 = Elements 1L, 3L, 1U, 3U
// return Elements 0L, 1L, 2L, 3L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt32(), op2.AsUInt32());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt32(), op2.AsUInt32());
// return Sse2.UnpackLow(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1
// op2 = Elements 2, 3
//
// tmp1 = Elements 0, 1, -, -
// tmp1 = Elements 2, 3, -, -
// return Elements 0, 1, 2, 3
//
// var tmp1 = Sse2.ConvertToVector128Single(op1);
// var tmp2 = Sse2.ConvertToVector128Single(op2);
// return Sse.MoveLowToHigh(tmp1, tmp2);
CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1);
// return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = AdvSimd.ExtractNarrowingLower(op1);
// return AdvSimd.ExtractNarrowingUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
else if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = op1.ToVector128Unsafe();
// return AdvSimd.Arm64.ConvertToSingleLower(tmp1);
CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = op1.ToVector128Unsafe();
// var tmp2 = AdvSimd.InsertScalar(tmp1.AsUInt64(), 1, op2.AsUInt64());
// return AdvSimd.ExtractNarrowingUpper(tmp2).As<T>();
CorInfoType tmp2BaseJitType = varTypeIsSigned(simdBaseType) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Sqrt;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Sqrt;
}
else
{
intrinsic = NI_SSE2_Sqrt;
}
#elif defined(TARGET_ARM64)
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_SqrtScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Sqrt;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp = nullptr;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType);
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
// HorizontalAdd combines pairs so we need log2(vectorLength) passes to sum all elements together.
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
if (simdSize == 32)
{
// Minus 1 because for the last pass we split the vector to low / high and add them together.
haddCount -= 1;
if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_HorizontalAdd;
}
}
else if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE3));
intrinsic = NI_SSE3_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSSE3));
intrinsic = NI_SSSE3_HorizontalAdd;
}
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add;
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT), NI_AVX_ExtractVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdHWIntrinsicNode(simdType, tmp, NI_Vector256_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
{
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 8)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
if (simdSize == 8)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_DOUBLE:
case TYP_LONG:
case TYP_ULONG:
{
if (simdSize == 16)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* op2 = nullptr;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_NEG:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
}
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// Zero - op1
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case GT_NOT:
{
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = (simdSize == 32) ? NI_Vector256_get_AllBitsSet : NI_Vector128_get_AllBitsSet;
op2 = gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 ^ AllBitsSet
return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
case GT_NEG:
{
if (varTypeIsSigned(simdBaseType))
{
if (simdBaseType == TYP_LONG)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else
{
intrinsic = NI_AdvSimd_Negate;
}
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
// Zero - op1
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
case GT_NOT:
{
return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
}
GenTree* Compiler::gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 =
gtNewSimdHWIntrinsicNode(type, op1, NI_Vector256_GetLower, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if ((simdBaseType == TYP_FLOAT) || compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE2_ConvertToVector128Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen lower"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
tmp1 = op1;
}
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8, isSimdAsHWIntrinsic);
if (simdSize == 8)
{
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return tmp1;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(1), NI_AVX_ExtractVector128, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_SSE2_ShiftRightLogical128BitLane,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
GenTree* zero;
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDoubleUpper;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningUpper;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningUpper;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
ssize_t index = 8 / genTypeSize(simdBaseType);
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
zero = gtNewSimdZeroNode(TYP_SIMD16, simdBaseJitType, 16, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128,
simdBaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op2->IsCnsIntOrI());
ssize_t imm8 = op2->AsIntCon()->IconValue();
ssize_t count = simdSize / genTypeSize(simdBaseType);
assert((0 <= imm8) && (imm8 < count));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41_X64));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_WithElement;
}
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
if (simdSize == 8)
{
return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
break;
case TYP_FLOAT:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
break;
default:
unreached();
}
hwIntrinsicID = NI_AdvSimd_Insert;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
intrinsic = (simdSize == 32) ? NI_Vector256_get_Zero : NI_Vector128_get_Zero;
#elif defined(TARGET_ARM64)
intrinsic = (simdSize > 8) ? NI_Vector128_get_Zero : NI_Vector64_get_Zero;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2, op3);
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoad() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
NamedIntrinsic intrinsicId = GetHWIntrinsicId();
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
if (category == HW_Category_MemoryLoad)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryLoad(GetHWIntrinsicId()))
{
// Some intrinsics (without HW_Category_MemoryLoad) also have MemoryLoad semantics
// This is generally because they have both vector and pointer overloads, e.g.,
// * Vector128<byte> BroadcastScalarToVector128(Vector128<byte> value)
// * Vector128<byte> BroadcastScalarToVector128(byte* source)
// So, we need to check the argument's type is memory-reference or Vector128
if ((category == HW_Category_SimpleSIMD) || (category == HW_Category_SIMDScalar))
{
assert(GetOperandCount() == 1);
switch (intrinsicId)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
CorInfoType auxiliaryType = GetAuxiliaryJitType();
if (auxiliaryType == CORINFO_TYPE_PTR)
{
return true;
}
assert(auxiliaryType == CORINFO_TYPE_UNDEF);
return false;
}
default:
{
unreached();
}
}
}
else if (category == HW_Category_IMM)
{
// Do we have less than 3 operands?
if (GetOperandCount() < 3)
{
return false;
}
else if (HWIntrinsicInfo::isAVX2GatherIntrinsic(GetHWIntrinsicId()))
{
return true;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(GetHWIntrinsicId());
if (category == HW_Category_MemoryStore)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryStore(GetHWIntrinsicId()) &&
(category == HW_Category_IMM || category == HW_Category_Scalar))
{
// Some intrinsics (without HW_Category_MemoryStore) also have MemoryStore semantics
// Bmi2/Bmi2.X64.MultiplyNoFlags may return the lower half result by a out argument
// unsafe ulong MultiplyNoFlags(ulong left, ulong right, ulong* low)
//
// So, the 3-argument form is MemoryStore
if (GetOperandCount() == 3)
{
switch (GetHWIntrinsicId())
{
case NI_BMI2_MultiplyNoFlags:
case NI_BMI2_X64_MultiplyNoFlags:
return true;
default:
return false;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad or MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoadOrStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return OperIsMemoryLoad() || OperIsMemoryStore();
#else
return false;
#endif
}
NamedIntrinsic GenTreeHWIntrinsic::GetHWIntrinsicId() const
{
NamedIntrinsic id = gtHWIntrinsicId;
int numArgs = HWIntrinsicInfo::lookupNumArgs(id);
bool numArgsUnknown = numArgs < 0;
assert((static_cast<size_t>(numArgs) == GetOperandCount()) || numArgsUnknown);
return id;
}
void GenTreeHWIntrinsic::SetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
#ifdef DEBUG
size_t oldOperandCount = GetOperandCount();
int newOperandCount = HWIntrinsicInfo::lookupNumArgs(intrinsicId);
bool newCountUnknown = newOperandCount < 0;
// We'll choose to trust the programmer here.
assert((oldOperandCount == static_cast<size_t>(newOperandCount)) || newCountUnknown);
#endif // DEBUG
gtHWIntrinsicId = intrinsicId;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeHWIntrinsic::Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetHWIntrinsicId() == op2->GetHWIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
(op1->GetAuxiliaryType() == op2->GetAuxiliaryType()) && (op1->GetOtherReg() == op2->GetOtherReg()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_HW_INTRINSICS
//---------------------------------------------------------------------------------------
// gtNewMustThrowException:
// create a throw node (calling into JIT helper) that must be thrown.
// The result would be a comma node: COMMA(jithelperthrow(void), x) where x's type should be specified.
//
// Arguments
// helper - JIT helper ID
// type - return type of the node
//
// Return Value
// pointer to the throw node
//
GenTree* Compiler::gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd)
{
GenTreeCall* node = gtNewHelperCallNode(helper, TYP_VOID);
node->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
if (type != TYP_VOID)
{
unsigned dummyTemp = lvaGrabTemp(true DEBUGARG("dummy temp of must thrown exception"));
if (type == TYP_STRUCT)
{
lvaSetStruct(dummyTemp, clsHnd, false);
type = lvaTable[dummyTemp].lvType; // struct type is normalized
}
else
{
lvaTable[dummyTemp].lvType = type;
}
GenTree* dummyNode = gtNewLclvNode(dummyTemp, type);
return gtNewOperNode(GT_COMMA, type, node, dummyNode);
}
return node;
}
//---------------------------------------------------------------------------------------
// InitializeStructReturnType:
// Initialize the Return Type Descriptor for a method that returns a struct type
//
// Arguments
// comp - Compiler Instance
// retClsHnd - VM handle to the struct type returned by the method
//
// Return Value
// None
//
void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension callConv)
{
assert(!m_inited);
#if FEATURE_MULTIREG_RET
assert(retClsHnd != NO_CLASS_HANDLE);
unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
Compiler::structPassingKind howToReturnStruct;
var_types returnType = comp->getReturnTypeForStruct(retClsHnd, callConv, &howToReturnStruct, structSize);
switch (howToReturnStruct)
{
case Compiler::SPK_EnclosingType:
m_isEnclosingType = true;
FALLTHROUGH;
case Compiler::SPK_PrimitiveType:
{
assert(returnType != TYP_UNKNOWN);
assert(returnType != TYP_STRUCT);
m_regType[0] = returnType;
break;
}
case Compiler::SPK_ByValueAsHfa:
{
assert(varTypeIsStruct(returnType));
var_types hfaType = comp->GetHfaType(retClsHnd);
// We should have an hfa struct type
assert(varTypeIsValidHfaType(hfaType));
// Note that the retail build issues a warning about a potential divsion by zero without this Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
// The size of this struct should be evenly divisible by elemSize
assert((structSize % elemSize) == 0);
unsigned hfaCount = (structSize / elemSize);
for (unsigned i = 0; i < hfaCount; ++i)
{
m_regType[i] = hfaType;
}
if (comp->compFloatingPointUsed == false)
{
comp->compFloatingPointUsed = true;
}
break;
}
case Compiler::SPK_ByValue:
{
assert(varTypeIsStruct(returnType));
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
comp->eeGetSystemVAmd64PassStructInRegisterDescriptor(retClsHnd, &structDesc);
assert(structDesc.passedInRegisters);
for (int i = 0; i < structDesc.eightByteCount; i++)
{
assert(i < MAX_RET_REG_COUNT);
m_regType[i] = comp->GetEightByteType(structDesc, i);
}
#elif defined(TARGET_ARM64)
// a non-HFA struct returned using two registers
//
assert((structSize > TARGET_POINTER_SIZE) && (structSize <= (2 * TARGET_POINTER_SIZE)));
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#elif defined(TARGET_X86)
// an 8-byte struct returned using two registers
assert(structSize == 8);
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#else // TARGET_XXX
// This target needs support here!
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
#endif // UNIX_AMD64_ABI
break; // for case SPK_ByValue
}
case Compiler::SPK_ByReference:
// We are returning using the return buffer argument
// There are no return registers
break;
default:
unreached(); // By the contract of getReturnTypeForStruct we should never get here.
} // end of switch (howToReturnStruct)
#endif // FEATURE_MULTIREG_RET
#ifdef DEBUG
m_inited = true;
#endif
}
//---------------------------------------------------------------------------------------
// InitializeLongReturnType:
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
//
void ReturnTypeDesc::InitializeLongReturnType()
{
assert(!m_inited);
#if defined(TARGET_X86) || defined(TARGET_ARM)
// Setups up a ReturnTypeDesc for returning a long using two registers
//
assert(MAX_RET_REG_COUNT >= 2);
m_regType[0] = TYP_INT;
m_regType[1] = TYP_INT;
#else // not (TARGET_X86 or TARGET_ARM)
m_regType[0] = TYP_LONG;
#endif // TARGET_X86 or TARGET_ARM
#ifdef DEBUG
m_inited = true;
#endif
}
//-------------------------------------------------------------------
// GetABIReturnReg: Return i'th return register as per target ABI
//
// Arguments:
// idx - Index of the return register.
// The first return register has an index of 0 and so on.
//
// Return Value:
// Returns i'th return register as per target ABI.
//
// Notes:
// x86 and ARM return long in multiple registers.
// ARM and ARM64 return HFA struct in multiple registers.
//
regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) const
{
unsigned count = GetReturnRegCount();
assert(idx < count);
regNumber resultReg = REG_NA;
#ifdef UNIX_AMD64_ABI
var_types regType0 = GetReturnRegType(0);
if (idx == 0)
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET;
}
else
{
noway_assert(varTypeUsesFloatReg(regType0));
resultReg = REG_FLOATRET;
}
}
else if (idx == 1)
{
var_types regType1 = GetReturnRegType(1);
if (varTypeIsIntegralOrI(regType1))
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET_1;
}
else
{
resultReg = REG_INTRET;
}
}
else
{
noway_assert(varTypeUsesFloatReg(regType1));
if (varTypeUsesFloatReg(regType0))
{
resultReg = REG_FLOATRET_1;
}
else
{
resultReg = REG_FLOATRET;
}
}
}
#elif defined(TARGET_X86)
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
#elif defined(TARGET_ARM)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
// Ints are returned in one return register.
// Longs are returned in two return registers.
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
}
else
{
// Floats are returned in one return register (f0).
// Doubles are returned in one return register (d0).
// Structs are returned in four registers with HFAs.
assert(idx < MAX_RET_REG_COUNT); // Up to 4 return registers for HFA's
if (regType == TYP_DOUBLE)
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx * 2); // d0, d1, d2 or d3
}
else
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // f0, f1, f2 or f3
}
}
#elif defined(TARGET_ARM64)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
noway_assert(idx < 2); // Up to 2 return registers for 16-byte structs
resultReg = (idx == 0) ? REG_INTRET : REG_INTRET_1; // X0 or X1
}
else
{
noway_assert(idx < 4); // Up to 4 return registers for HFA's
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // V0, V1, V2 or V3
}
#endif // TARGET_XXX
assert(resultReg != REG_NA);
return resultReg;
}
//--------------------------------------------------------------------------------
// GetABIReturnRegs: get the mask of return registers as per target arch ABI.
//
// Arguments:
// None
//
// Return Value:
// reg mask of return registers in which the return type is returned.
//
// Note:
// This routine can be used when the caller is not particular about the order
// of return registers and wants to know the set of return registers.
//
// static
regMaskTP ReturnTypeDesc::GetABIReturnRegs() const
{
regMaskTP resultMask = RBM_NONE;
unsigned count = GetReturnRegCount();
for (unsigned i = 0; i < count; ++i)
{
resultMask |= genRegMask(GetABIReturnReg(i));
}
return resultMask;
}
//------------------------------------------------------------------------
// The following functions manage the gtRsvdRegs set of temporary registers
// created by LSRA during code generation.
//------------------------------------------------------------------------
// AvailableTempRegCount: return the number of available temporary registers in the (optional) given set
// (typically, RBM_ALLINT or RBM_ALLFLOAT).
//
// Arguments:
// mask - (optional) Check for available temporary registers only in this set.
//
// Return Value:
// Count of available temporary registers in given set.
//
unsigned GenTree::AvailableTempRegCount(regMaskTP mask /* = (regMaskTP)-1 */) const
{
return genCountBits(gtRsvdRegs & mask);
}
//------------------------------------------------------------------------
// GetSingleTempReg: There is expected to be exactly one available temporary register
// in the given mask in the gtRsvdRegs set. Get that register. No future calls to get
// a temporary register are expected. Removes the register from the set, but only in
// DEBUG to avoid doing unnecessary work in non-DEBUG builds.
//
// Arguments:
// mask - (optional) Get an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::GetSingleTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) == 1);
regNumber tempReg = genRegNumFromMask(availableSet);
INDEBUG(gtRsvdRegs &= ~availableSet;) // Remove the register from the set, so it can't be used again.
return tempReg;
}
//------------------------------------------------------------------------
// ExtractTempReg: Find the lowest number temporary register from the gtRsvdRegs set
// that is also in the optional given mask (typically, RBM_ALLINT or RBM_ALLFLOAT),
// and return it. Remove this register from the temporary register set, so it won't
// be returned again.
//
// Arguments:
// mask - (optional) Extract an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::ExtractTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) >= 1);
regMaskTP tempRegMask = genFindLowestBit(availableSet);
gtRsvdRegs &= ~tempRegMask;
return genRegNumFromMask(tempRegMask);
}
//------------------------------------------------------------------------
// GetLclOffs: if `this` is a field or a field address it returns offset
// of the field inside the struct, for not a field it returns 0.
//
// Return Value:
// The offset value.
//
uint16_t GenTreeLclVarCommon::GetLclOffs() const
{
if (OperIsLocalField())
{
return AsLclFld()->GetLclOffs();
}
else
{
return 0;
}
}
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GetResultOpNumForFMA: check if the result is written into one of the operands.
// In the case that none of the operand is overwritten, check if any of them is lastUse.
//
// Return Value:
// The operand number overwritten or lastUse. 0 is the default value, where the result is written into
// a destination that is not one of the source operands and there is no last use op.
//
unsigned GenTreeHWIntrinsic::GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3)
{
// only FMA intrinsic node should call into this function
assert(HWIntrinsicInfo::lookupIsa(gtHWIntrinsicId) == InstructionSet_FMA);
if (use != nullptr && use->OperIs(GT_STORE_LCL_VAR))
{
// For store_lcl_var, check if any op is overwritten
GenTreeLclVarCommon* overwritten = use->AsLclVarCommon();
unsigned overwrittenLclNum = overwritten->GetLclNum();
if (op1->IsLocal() && op1->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 1;
}
else if (op2->IsLocal() && op2->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 2;
}
else if (op3->IsLocal() && op3->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 3;
}
}
// If no overwritten op, check if there is any last use op
// https://github.com/dotnet/runtime/issues/62215
if (op1->OperIs(GT_LCL_VAR) && op1->IsLastUse(0))
return 1;
else if (op2->OperIs(GT_LCL_VAR) && op2->IsLastUse(0))
return 2;
else if (op3->OperIs(GT_LCL_VAR) && op3->IsLastUse(0))
return 3;
return 0;
}
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// IsOffsetMisaligned: check if the field needs a special handling on arm.
//
// Return Value:
// true if it is a float field with a misaligned offset, false otherwise.
//
bool GenTreeLclFld::IsOffsetMisaligned() const
{
if (varTypeIsFloating(gtType))
{
return ((m_lclOffs % emitTypeSize(TYP_FLOAT)) != 0);
}
return false;
}
#endif // TARGET_ARM
bool GenTree::IsInvariant() const
{
return OperIsConst() || Compiler::impIsAddressInLocal(this);
}
//------------------------------------------------------------------------
// IsNeverNegative: returns true if the given tree is known to be never
// negative, i. e. the upper bit will always be zero.
// Only valid for integral types.
//
// Arguments:
// comp - Compiler object, needed for IntegralRange::ForNode
//
// Return Value:
// true if the given tree is known to be never negative
//
bool GenTree::IsNeverNegative(Compiler* comp) const
{
assert(varTypeIsIntegral(this));
if (IsIntegralConst())
{
return AsIntConCommon()->IntegralValue() >= 0;
}
// TODO-Casts: extend IntegralRange to handle constants
return IntegralRange::ForNode((GenTree*)this, comp).IsPositive();
}
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/gentree.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XX This is the node in the semantic tree graph. It represents the operation XX
XX corresponding to the node, and other information during code-gen. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _GENTREE_H_
#define _GENTREE_H_
/*****************************************************************************/
#include "vartype.h" // For "var_types"
#include "target.h" // For "regNumber"
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "valuenumtype.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
#include "layout.h"
#include "debuginfo.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
// build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
// making the debugging value supplied by them useless).
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
// The SpecialCodeKind enum is used to indicate the type of special (unique)
// target block that will be targeted by an instruction.
// These are used by:
// GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
// - these nodes have a field (gtThrowKind) to indicate which kind
// GenTreeOps nodes, for which codegen will generate the branch
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
//
enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
SCK_ARITH_EXCPN, // target on arithmetic exception
SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
SCK_COUNT
};
/*****************************************************************************/
enum genTreeOps : BYTE
{
#define GTNODE(en, st, cm, ok) GT_##en,
#include "gtlist.h"
GT_COUNT,
#ifdef TARGET_64BIT
// GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
// For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
GT_CNS_NATIVELONG = GT_CNS_INT,
#else
// For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
// In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
};
// The following enum defines a set of bit flags that can be used
// to classify expression tree nodes.
//
enum GenTreeOperKind
{
GTK_SPECIAL = 0x00, // special operator
GTK_LEAF = 0x01, // leaf operator
GTK_UNOP = 0x02, // unary operator
GTK_BINOP = 0x04, // binary operator
GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
GTK_COMMUTE = 0x08, // commutative operator
GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
// by adding non-node fields to unary or binary operator.
GTK_NOVALUE = 0x20, // node does not produce a value
GTK_MASK = 0xFF
};
// The following enum defines a set of bit flags that describe opers for the purposes
// of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
// making the table for those larger in Release builds. However, it resides in the same
// "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
//
enum GenTreeDebugOperKind
{
DBK_FIRST_FLAG = GTK_MASK + 1,
DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
DBK_MASK = ~GTK_MASK
};
/*****************************************************************************/
enum gtCallTypes : BYTE
{
CT_USER_FUNC, // User function
CT_HELPER, // Jit-helper
CT_INDIRECT, // Indirect call
CT_COUNT // fake entry (must be last)
};
#ifdef DEBUG
/*****************************************************************************
*
* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
* The values are such that they don't overlap with helper's or user function's handle.
*/
enum TargetHandleType : BYTE
{
THT_Unknown = 2,
THT_GSCookieCheck = 4,
THT_SetGSCookie = 6,
THT_IntializeArrayIntrinsics = 8
};
#endif
/*****************************************************************************/
struct BasicBlock;
enum BasicBlockFlags : unsigned __int64;
struct InlineCandidateInfo;
struct GuardedDevirtualizationCandidateInfo;
struct ClassProfileCandidateInfo;
struct LateDevirtualizationInfo;
typedef unsigned short AssertionIndex;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
//------------------------------------------------------------------------
// GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
//
// Arguments:
// index - 0-based index
// Return Value:
// 1-based AssertionIndex.
inline AssertionIndex GetAssertionIndex(unsigned index)
{
return (AssertionIndex)(index + 1);
}
class AssertionInfo
{
// true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
unsigned short m_isNextEdgeAssertion : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
: m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
public:
AssertionInfo() : AssertionInfo(false, 0)
{
}
AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
{
}
static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
{
// Ignore the edge information if there's no assertion
bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
return AssertionInfo(isNextEdge, assertionIndex);
}
void Clear()
{
m_isNextEdgeAssertion = 0;
m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
{
return m_assertionIndex != NO_ASSERTION_INDEX;
}
AssertionIndex GetAssertionIndex() const
{
return m_assertionIndex;
}
bool IsNextEdgeAssertion() const
{
return m_isNextEdgeAssertion;
}
};
/*****************************************************************************/
// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
// represents a (possibly) empty sequence of fields. The fields are in the order
// in which they are dereferenced. The first field may be an object field or a struct field;
// all subsequent fields must be struct fields.
struct FieldSeqNode
{
CORINFO_FIELD_HANDLE m_fieldHnd;
FieldSeqNode* m_next;
FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next) : m_fieldHnd(fieldHnd), m_next(next)
{
}
// returns true when this is the pseudo #FirstElem field sequence
bool IsFirstElemFieldSeq();
// returns true when this is the pseudo #ConstantIndex field sequence
bool IsConstantIndexFieldSeq();
// returns true when this is the the pseudo #FirstElem field sequence or the pseudo #ConstantIndex field sequence
bool IsPseudoField() const;
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(!IsPseudoField() && (m_fieldHnd != nullptr));
return m_fieldHnd;
}
FieldSeqNode* GetTail()
{
FieldSeqNode* tail = this;
while (tail->m_next != nullptr)
{
tail = tail->m_next;
}
return tail;
}
// Make sure this provides methods that allow it to be used as a KeyFuncs type in SimplerHash.
static int GetHashCode(FieldSeqNode fsn)
{
return static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_fieldHnd)) ^
static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
{
return fsn1.m_fieldHnd == fsn2.m_fieldHnd && fsn1.m_next == fsn2.m_next;
}
};
// This class canonicalizes field sequences.
class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
// Dummy variables to provide the addresses for the "pseudo field handle" statics below.
static int FirstElemPseudoFieldStruct;
static int ConstantIndexPseudoFieldStruct;
public:
FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd);
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
static FieldSeqNode* NotAField()
{
return &s_notAField;
}
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
// they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
// are the "NotAField" value, so is the result.
FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
// We have a few "pseudo" field handles:
// This treats the constant offset of the first element of something as if it were a field.
// Works for method table offsets of boxed structs, or first elem offset of arrays/strings.
static CORINFO_FIELD_HANDLE FirstElemPseudoField;
// If there is a constant index, we make a psuedo field to correspond to the constant added to
// offset of the indexed field. This keeps the field sequence structure "normalized", especially in the
// case where the element type is a struct, so we might add a further struct field offset.
static CORINFO_FIELD_HANDLE ConstantIndexPseudoField;
static bool IsPseudoField(CORINFO_FIELD_HANDLE hnd)
{
return hnd == FirstElemPseudoField || hnd == ConstantIndexPseudoField;
}
};
class GenTreeUseEdgeIterator;
class GenTreeOperandIterator;
struct Statement;
/*****************************************************************************/
// Forward declarations of the subtypes
#define GTSTRUCT_0(fn, en) struct GenTree##fn;
#define GTSTRUCT_1(fn, en) struct GenTree##fn;
#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
/*****************************************************************************/
// Don't format the GenTreeFlags declaration
// clang-format off
//------------------------------------------------------------------------
// GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
//
enum GenTreeFlags : unsigned int
{
GTF_EMPTY = 0,
//---------------------------------------------------------------------
// The first set of flags can be used with a large set of nodes, and
// thus they must all have distinct values. That is, one can test any
// expression node for one of these flags.
//---------------------------------------------------------------------
GTF_ASG = 0x00000001, // sub-expression contains an assignment
GTF_CALL = 0x00000002, // sub-expression contains a func. call
GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
// otherwise the C# (run csc /o-) code:
// var v = side_eff_operation
// with no use of `v` will drop your tree on the floor.
GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
GTF_SPILLED = 0x00000080, // the value has been spilled
GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
GTF_USE_FLAGS = 0x00000400, // Indicates that this node uses the flags bits.
GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
GTF_NODE_MASK = GTF_COLON_COND,
GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
// With operators: the specified node is an unsigned operator
GTF_LATE_ARG = 0x00010000, // The specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
GTF_SPILL = 0x00020000, // Needs to be spilled here
// The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
// that we are calling in the context of performing a CSE, thus we
// should allow the run-once side effects of running a class constructor.
//
// The only requirement of this flag is that it not overlap any of the
// side-effect flags. The actual bit used is otherwise arbitrary.
GTF_IS_IN_CSE = GTF_BOOLEAN,
GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
// register assigned to this node, so the code generator does not have to generate
// code to produce the value. It is currently used only on constant nodes.
// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
// it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
//---------------------------------------------------------------------
// The following flags can be used only with a small set of nodes, and
// thus their values need not be distinct (other than within the set
// that goes with a particular node/nodes, of course). That is, one can
// only test for one of these flags if the 'gtOper' value is tested as
// well to make sure it's the right operator for the particular flag.
//---------------------------------------------------------------------
// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags.
// These flags are also used by GT_LCL_FLD, and the last-use (DEATH) flags are also used by GenTreeCopyOrReload.
GTF_VAR_DEF = 0x80000000, // GT_LCL_VAR -- this is a definition
GTF_VAR_USEASG = 0x40000000, // GT_LCL_VAR -- this is a partial definition, a use of the previous definition is implied
// A partial definition usually occurs when a struct field is assigned to (s.f = ...) or
// when a scalar typed variable is assigned to via a narrow store (*((byte*)&i) = ...).
// Last-use bits.
// Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
// so we don't need the equivalent of GTF_VAR_USEASG.
GTF_VAR_MULTIREG_DEATH0 = 0x04000000, // GT_LCL_VAR -- The last-use bit for a lclVar (the first register if it is multireg).
GTF_VAR_DEATH = GTF_VAR_MULTIREG_DEATH0,
GTF_VAR_MULTIREG_DEATH1 = 0x08000000, // GT_LCL_VAR -- The last-use bit for the second register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH2 = 0x10000000, // GT_LCL_VAR -- The last-use bit for the third register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH3 = 0x20000000, // GT_LCL_VAR -- The last-use bit for the fourth register of a multireg lclVar.
GTF_VAR_DEATH_MASK = GTF_VAR_MULTIREG_DEATH0 | GTF_VAR_MULTIREG_DEATH1 | GTF_VAR_MULTIREG_DEATH2 | GTF_VAR_MULTIREG_DEATH3,
// This is the amount we have to shift, plus the regIndex, to get the last use bit we want.
#define MULTIREG_LAST_USE_SHIFT 26
GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
// to/from a multireg source or destination (e.g. a call arg or return, or an op
// that returns its result in multiple registers such as a long multiply).
GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
GTF_VAR_CAST = 0x01000000, // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
GTF_VAR_ITERATOR = 0x00800000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
GTF_VAR_CLONED = 0x00400000, // GT_LCL_VAR -- this node has been cloned or is a clone
GTF_VAR_CONTEXT = 0x00200000, // GT_LCL_VAR -- this node is part of a runtime lookup
GTF_VAR_FOLDED_IND = 0x00100000, // GT_LCL_VAR -- this node was folded from *(typ*)&lclVar expression tree in fgMorphSmpOp()
// where 'typ' is a small type and 'lclVar' corresponds to a normalized-on-store local variable.
// This flag identifies such nodes in order to make sure that fgDoNormalizeOnStore() is called
// on their parents in post-order morph.
// Relevant for inlining optimizations (see fgInlinePrependStatements)
GTF_VAR_ARR_INDEX = 0x00000020, // The variable is part of (the index portion of) an array index expression.
// Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
// For additional flags for GT_CALL node see GTF_CALL_M_*
GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
GTF_FLD_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_FLD_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- field access requires preceding class/static init helper
GTF_INX_RNGCHK = 0x80000000, // GT_INDEX/GT_INDEX_ADDR -- the array reference should be range-checked.
GTF_INX_STRING_LAYOUT = 0x40000000, // GT_INDEX -- this uses the special string array layout
GTF_INX_NOFAULT = 0x20000000, // GT_INDEX -- the INDEX does not throw an exception (morph to GTF_IND_NONFAULTING)
GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
GTF_IND_VOLATILE = 0x40000000, // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
GTF_IND_NONFAULTING = 0x20000000, // Operations for which OperIsIndir() is true -- An indir that cannot fault.
// Same as GTF_ARRLEN_NONFAULTING.
GTF_IND_TGTANYWHERE = 0x10000000, // GT_IND -- the target could be anywhere
GTF_IND_TLS_REF = 0x08000000, // GT_IND -- the target is accessed via TLS
GTF_IND_ASG_LHS = 0x04000000, // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
// assignment; don't evaluate it independently.
GTF_IND_REQ_ADDR_IN_REG = GTF_IND_ASG_LHS, // GT_IND -- requires its addr operand to be evaluated
// into a register. This flag is useful in cases where it
// is required to generate register indirect addressing mode.
// One such case is virtual stub calls on xarch. This is only
// valid in the backend, where GTF_IND_ASG_LHS is not necessary
// (all such indirections will be lowered to GT_STOREIND).
GTF_IND_UNALIGNED = 0x02000000, // GT_IND -- the load or store is unaligned (we assume worst case
// alignment of 1 byte)
GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
GTF_IND_ARR_INDEX = 0x00800000, // GT_IND -- the indirection represents an (SZ) array index
GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
GTF_IND_FLAGS = GTF_IND_VOLATILE | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF | \
GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_NONNULL | GTF_IND_ARR_INDEX | GTF_IND_TGT_NOT_HEAP,
GTF_CLS_VAR_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_CLS_VAR_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_FLD_INITCLASS
GTF_CLS_VAR_ASG_LHS = 0x04000000, // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
// of an assignment; don't evaluate it independently.
GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
// addressing mode
GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
// with explicit "loop test" in the header block.
GTF_RELOP_SJUMP_OPT = 0x04000000, // GT_<relop> -- Swap signed jl/jge with js/jns during emitter, reuses flags
// from previous instruction.
GTF_JCMP_EQ = 0x80000000, // GTF_JCMP_EQ -- Branch on equal rather than not equal
GTF_JCMP_TST = 0x40000000, // GTF_JCMP_TST -- Use bit test instruction rather than compare against zero instruction
GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
// castclass or instanceof?
GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a string handle
GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// the address of the static field itself, and in other cases
// there's an extra layer of indirection and it is the address
// of the cell that the runtime will fill in with the address
// of the static field; in both of those cases, the constant
// is what gets flagged.
GTF_BLK_VOLATILE = GTF_IND_VOLATILE, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is a volatile block operation
GTF_BLK_UNALIGNED = GTF_IND_UNALIGNED, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is an unaligned block operation
GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
// Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proved this check is always in-bounds
GTF_ARRLEN_ARR_IDX = 0x80000000, // GT_ARR_LENGTH -- Length which feeds into an array index expression
GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
// rather than from gtGetStructHandleForHWSIMD.
// Flag used by assertion prop to indicate that a type is a TYP_LONG
#ifdef TARGET_64BIT
GTF_ASSERTION_PROP_LONG = 0x00000001,
#endif // TARGET_64BIT
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
{
return (GenTreeFlags)(~(unsigned int)a);
}
inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
}
// Can any side-effects be observed externally, say by a caller method?
// For assignments, only assignments to global memory can be observed
// externally, whereas simple assignments to local variables can not.
//
// Be careful when using this inside a "try" protected region as the
// order of assignments to local variables would need to be preserved
// wrt side effects if the variables are alive on entry to the
// "catch/finally" region. In such cases, even assignments to locals
// will have to be restricted.
#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
(((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
#if defined(DEBUG)
//------------------------------------------------------------------------
// GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
//
enum GenTreeDebugFlags : unsigned int
{
GTF_DEBUG_NONE = 0x00000000, // No debug flags.
GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
GTF_DEBUG_NODE_SMALL = 0x00000002,
GTF_DEBUG_NODE_LARGE = 0x00000004,
GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
};
inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
{
return (GenTreeDebugFlags)(~(unsigned int)a);
}
inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
#endif // defined(DEBUG)
// clang-format on
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper)
{
return (firstOper + 1) == secondOper;
}
template <typename... Opers>
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper, Opers... otherOpers)
{
return OpersAreContiguous(firstOper, secondOper) && OpersAreContiguous(secondOper, otherOpers...);
}
#ifndef HOST_64BIT
#include <pshpack4.h>
#endif
struct GenTree
{
// We use GT_STRUCT_0 only for the category of simple ops.
#define GTSTRUCT_0(fn, en) \
GenTree##fn* As##fn() \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_N(fn, ...) \
GenTree##fn* As##fn() \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
genTreeOps gtOper; // enum subtype BYTE
var_types gtType; // enum subtype BYTE
genTreeOps OperGet() const
{
return gtOper;
}
var_types TypeGet() const
{
return gtType;
}
#ifdef DEBUG
genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#define NO_CSE (0)
#define IS_CSE_INDEX(x) ((x) != 0)
#define IS_CSE_USE(x) ((x) > 0)
#define IS_CSE_DEF(x) ((x) < 0)
#define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
#define TO_CSE_DEF(x) (-(x))
signed char gtCSEnum; // 0 or the CSE index (negated if def)
// valid only for CSE expressions
unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
AssertionInfo gtAssertionInfo;
bool GeneratesAssertion() const
{
return gtAssertionInfo.HasAssertion();
}
void ClearAssertion()
{
gtAssertionInfo.Clear();
}
AssertionInfo GetAssertionInfo() const
{
return gtAssertionInfo;
}
void SetAssertionInfo(AssertionInfo info)
{
gtAssertionInfo = info;
}
//
// Cost metrics on the node. Don't allow direct access to the variable for setting.
//
public:
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
// Obviously, this information does need to be initialized when a node is created.
// This is public so the dumpers can see it.
bool gtCostsInitialized;
#endif // DEBUG
#define MAX_COST UCHAR_MAX
#define IND_COST_EX 3 // execution cost for an indirection
unsigned char GetCostEx() const
{
assert(gtCostsInitialized);
return _gtCostEx;
}
unsigned char GetCostSz() const
{
assert(gtCostsInitialized);
return _gtCostSz;
}
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
INDEBUG(gtCostsInitialized = true;)
_gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
// Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
// happening.
void CopyCosts(const GenTree* const tree)
{
// If the 'tree' costs aren't initialized, we'll hit an assert below.
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->GetCostEx();
_gtCostSz = tree->GetCostSz();
}
// Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
_gtCostSz = tree->_gtCostSz;
}
private:
unsigned char _gtCostEx; // estimate of expression execution cost
unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
public:
enum genRegTag
{
GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
GT_REGTAG_REG // _gtRegNum has been assigned
};
genRegTag GetRegTag() const
{
assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
return gtRegTag;
}
private:
genRegTag gtRegTag; // What is in _gtRegNum?
#endif // DEBUG
private:
// This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
regNumberSmall _gtRegNum;
// Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
friend struct GenTreeMultiOp;
uint8_t m_operandCount;
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
#endif
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
bool isContainedIndir() const;
bool isIndirAddrMode();
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
bool isIndir() const;
bool isContainedIntOrIImmed() const
{
return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
}
bool isContainedFltOrDblImmed() const
{
return isContained() && (OperGet() == GT_CNS_DBL);
}
bool isLclField() const
{
return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
}
bool isUsedFromSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
bool isMemoryOp() const
{
return isIndir() || isLclField();
}
bool isUsedFromMemory() const
{
return ((isContained() && (isMemoryOp() || (OperGet() == GT_LCL_VAR) || (OperGet() == GT_CNS_DBL))) ||
isUsedFromSpillTemp());
}
bool isLclVarUsedFromMemory() const
{
return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
}
bool isLclFldUsedFromMemory() const
{
return isLclField() && (isContained() || isUsedFromSpillTemp());
}
bool isUsedFromReg() const
{
return !isContained() && !isUsedFromSpillTemp();
}
regNumber GetRegNum() const
{
assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
// and fix everyplace that reads undefined
// values
regNumber reg = (regNumber)_gtRegNum;
assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
// undefined values
(reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
assert(reg >= REG_FIRST && reg <= REG_COUNT);
_gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
void ClearRegNum()
{
_gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
}
// Copy the _gtRegNum/gtRegTag fields
void CopyReg(GenTree* from);
bool gtHasReg() const;
int GetRegisterDstCount(Compiler* compiler) const;
regMaskTP gtGetRegMask() const;
GenTreeFlags gtFlags;
#if defined(DEBUG)
GenTreeDebugFlags gtDebugFlags;
#endif // defined(DEBUG)
ValueNumPair gtVNPair;
regMaskSmall gtRsvdRegs; // set of fixed trashed registers
unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
void SetVNsFromNode(GenTree* tree)
{
gtVNPair = tree->gtVNPair;
}
ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
return gtVNPair.GetLiberal();
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.GetConservative();
}
}
void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
return gtVNPair.SetLiberal(vn);
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.SetConservative(vn);
}
}
void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
void ClearVN()
{
gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
GenTree* gtNext;
GenTree* gtPrev;
#ifdef DEBUG
unsigned gtTreeID;
unsigned gtSeqNum; // liveness traversal order within the current statement
int gtUseNum; // use-ordered traversal within the function
#endif
static const unsigned char gtOperKindTable[];
static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
bool IsValue() const
{
if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
{
return false;
}
if (gtType == TYP_VOID)
{
// These are the only operators which can produce either VOID or non-VOID results.
assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsSIMD() ||
OperIsHWIntrinsic());
return false;
}
return true;
}
// LIR flags
// These helper methods, along with the flag values they manipulate, are defined in lir.h
//
// UnusedValue indicates that, although this node produces a value, it is unused.
inline void SetUnusedValue();
inline void ClearUnusedValue();
inline bool IsUnusedValue() const;
// RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
inline bool IsRegOptional() const;
inline void SetRegOptional();
inline void ClearRegOptional();
#ifdef DEBUG
void dumpLIRFlags();
#endif
bool TypeIs(var_types type) const
{
return gtType == type;
}
template <typename... T>
bool TypeIs(var_types type, T... rest) const
{
return TypeIs(type) || TypeIs(rest...);
}
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
{
return operCompare == oper;
}
template <typename... T>
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
{
return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
}
bool OperIs(genTreeOps oper) const
{
return OperGet() == oper;
}
template <typename... T>
bool OperIs(genTreeOps oper, T... rest) const
{
return OperIs(oper) || OperIs(rest...);
}
static bool OperIsConst(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_STR);
}
bool OperIsConst() const
{
return OperIsConst(gtOper);
}
static bool OperIsLeaf(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
bool OperIsLeaf() const
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
static bool OperIsLocal(genTreeOps gtOper)
{
static_assert_no_msg(
OpersAreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
}
static bool OperIsLocalAddr(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
static bool OperIsLocalField(genTreeOps gtOper)
{
return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
static bool OperIsScalarLocal(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
}
static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
static bool OperIsLocalStore(genTreeOps gtOper)
{
return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
static bool OperIsInitVal(genTreeOps gtOper)
{
return (gtOper == GT_INIT_VAL);
}
bool OperIsInitVal() const
{
return OperIsInitVal(OperGet());
}
bool IsConstInitVal() const
{
return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
}
bool OperIsBlkOp();
bool OperIsCopyBlkOp();
bool OperIsInitBlkOp();
static bool OperIsBlk(genTreeOps gtOper)
{
return (gtOper == GT_BLK) || (gtOper == GT_OBJ) || OperIsStoreBlk(gtOper);
}
bool OperIsBlk() const
{
return OperIsBlk(OperGet());
}
static bool OperIsStoreBlk(genTreeOps gtOper)
{
return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYN_BLK);
}
bool OperIsStoreBlk() const
{
return OperIsStoreBlk(OperGet());
}
bool OperIsPutArgSplit() const
{
#if FEATURE_ARG_SPLIT
assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
return gtOper == GT_PUTARG_SPLIT;
#else // !FEATURE_ARG_SPLIT
return false;
#endif
}
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
bool OperIsPutArgStkOrSplit() const
{
return OperIsPutArgStk() || OperIsPutArgSplit();
}
bool OperIsPutArgReg() const
{
return gtOper == GT_PUTARG_REG;
}
bool OperIsPutArg() const
{
return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
}
bool OperIsFieldList() const
{
return OperIs(GT_FIELD_LIST);
}
bool OperIsMultiRegOp() const
{
#if !defined(TARGET_64BIT)
if (OperIs(GT_MUL_LONG))
{
return true;
}
#if defined(TARGET_ARM)
if (OperIs(GT_PUTARG_REG, GT_BITCAST))
{
return true;
}
#endif // TARGET_ARM
#endif // TARGET_64BIT
return false;
}
bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
static bool OperIsCompare(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
}
bool OperIsCompare() const
{
return OperIsCompare(OperGet());
}
static bool OperIsShift(genTreeOps gtOper)
{
return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
bool OperIsShift() const
{
return OperIsShift(OperGet());
}
static bool OperIsShiftLong(genTreeOps gtOper)
{
#ifdef TARGET_64BIT
return false;
#else
return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
#endif
}
bool OperIsShiftLong() const
{
return OperIsShiftLong(OperGet());
}
static bool OperIsRotate(genTreeOps gtOper)
{
return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
}
bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
static bool OperIsMul(genTreeOps gtOper)
{
return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
|| (gtOper == GT_MUL_LONG)
#endif
;
}
bool OperIsMul() const
{
return OperIsMul(gtOper);
}
bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
|| op == GT_UDIV || op == GT_UMOD
|| op == GT_OR || op == GT_XOR || op == GT_AND
|| OperIsShiftOrRotate(op);
}
#ifdef TARGET_XARCH
static bool OperIsRMWMemOp(genTreeOps gtOper)
{
// Return if binary op is one of the supported operations for RMW of memory.
return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
}
bool OperIsRMWMemOp() const
{
// Return if binary op is one of the supported operations for RMW of memory.
return OperIsRMWMemOp(gtOper);
}
#endif // TARGET_XARCH
static bool OperIsUnary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_UNOP) != 0;
}
bool OperIsUnary() const
{
return OperIsUnary(gtOper);
}
static bool OperIsBinary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_BINOP) != 0;
}
bool OperIsBinary() const
{
return OperIsBinary(gtOper);
}
static bool OperIsSimple(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
static bool OperIsSpecial(genTreeOps gtOper)
{
return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
bool OperIsSimple() const
{
return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
bool isCommutativeSIMDIntrinsic();
#else // !
bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool isCommutativeHWIntrinsic() const;
bool isContainableHWIntrinsic() const;
bool isRMWHWIntrinsic(Compiler* comp);
#else
bool isCommutativeHWIntrinsic() const
{
return false;
}
bool isContainableHWIntrinsic() const
{
return false;
}
bool isRMWHWIntrinsic(Compiler* comp)
{
return false;
}
#endif // FEATURE_HW_INTRINSICS
static bool OperIsCommutative(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic()) ||
(OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
}
static bool OperMayOverflow(genTreeOps gtOper)
{
return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
#if !defined(TARGET_64BIT)
|| (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
#endif
);
}
bool OperMayOverflow() const
{
return OperMayOverflow(gtOper);
}
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
static bool OperIsIndir(genTreeOps gtOper)
{
return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || OperIsBlk(gtOper);
}
static bool OperIsIndirOrArrLength(genTreeOps gtOper)
{
return OperIsIndir(gtOper) || (gtOper == GT_ARR_LENGTH);
}
bool OperIsIndir() const
{
return OperIsIndir(gtOper);
}
bool OperIsIndirOrArrLength() const
{
return OperIsIndirOrArrLength(gtOper);
}
bool OperIsImplicitIndir() const;
static bool OperIsAtomicOp(genTreeOps gtOper)
{
switch (gtOper)
{
case GT_XADD:
case GT_XORR:
case GT_XAND:
case GT_XCHG:
case GT_LOCKADD:
case GT_CMPXCHG:
return true;
default:
return false;
}
}
bool OperIsAtomicOp() const
{
return OperIsAtomicOp(gtOper);
}
bool OperIsStore() const
{
return OperIsStore(gtOper);
}
static bool OperIsStore(genTreeOps gtOper)
{
return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
OperIsStoreBlk(gtOper) || OperIsAtomicOp(gtOper));
}
static bool OperIsMultiOp(genTreeOps gtOper)
{
return OperIsSIMD(gtOper) || OperIsHWIntrinsic(gtOper);
}
bool OperIsMultiOp() const
{
return OperIsMultiOp(OperGet());
}
// This is here for cleaner FEATURE_SIMD #ifdefs.
static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
bool OperIsSIMD() const
{
return OperIsSIMD(gtOper);
}
static bool OperIsHWIntrinsic(genTreeOps gtOper)
{
#ifdef FEATURE_HW_INTRINSICS
return gtOper == GT_HWINTRINSIC;
#else
return false;
#endif // FEATURE_HW_INTRINSICS
}
bool OperIsHWIntrinsic() const
{
return OperIsHWIntrinsic(gtOper);
}
bool OperIsSimdOrHWintrinsic() const
{
return OperIsSIMD() || OperIsHWIntrinsic();
}
// This is here for cleaner GT_LONG #ifdefs.
static bool OperIsLong(genTreeOps gtOper)
{
#if defined(TARGET_64BIT)
return false;
#else
return gtOper == GT_LONG;
#endif
}
bool OperIsLong() const
{
return OperIsLong(gtOper);
}
bool OperIsConditionalJump() const
{
return (gtOper == GT_JTRUE) || (gtOper == GT_JCMP) || (gtOper == GT_JCC);
}
#ifdef DEBUG
static const GenTreeDebugOperKind gtDebugOperKindTable[];
static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
{
assert(oper < GT_COUNT);
return gtDebugOperKindTable[oper];
}
GenTreeDebugOperKind DebugOperKind() const
{
return DebugOperKind(OperGet());
}
bool NullOp1Legal() const
{
assert(OperIsSimple());
switch (gtOper)
{
case GT_LEA:
case GT_RETFILT:
case GT_NOP:
case GT_FIELD:
return true;
case GT_RETURN:
return gtType == TYP_VOID;
default:
return false;
}
}
bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
if (!OperIsBinary(gtOper))
{
return true;
}
switch (gtOper)
{
case GT_INTRINSIC:
case GT_LEA:
#if defined(TARGET_ARM)
case GT_PUTARG_REG:
#endif // defined(TARGET_ARM)
return true;
default:
return false;
}
}
bool OperIsLIR() const
{
if (OperIs(GT_NOP))
{
// NOPs may only be present in LIR if they do not produce a value.
return IsNothingNode();
}
return (DebugOperKind() & DBK_NOTLIR) == 0;
}
bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
static bool RequiresNonNullOp2(genTreeOps oper);
bool IsValidCallArgument();
#endif // DEBUG
inline bool IsFPZero() const;
inline bool IsIntegralConst(ssize_t constVal) const;
inline bool IsIntegralConstVector(ssize_t constVal) const;
inline bool IsSIMDZero() const;
inline bool IsFloatPositiveZero() const;
inline bool IsVectorZero() const;
inline bool IsBoxedValue();
inline GenTree* gtGetOp1() const;
// Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
// a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
// node type you have.
inline GenTree* gtGetOp2() const;
// The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
inline GenTree* gtGetOp2IfPresent() const;
bool TryGetUse(GenTree* operand, GenTree*** pUse);
bool TryGetUse(GenTree* operand)
{
GenTree** unusedUse = nullptr;
return TryGetUse(operand, &unusedUse);
}
private:
bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
public:
GenTree* gtGetParent(GenTree*** pUse);
void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
inline GenTree* gtEffectiveVal(bool commaOnly = false);
inline GenTree* gtCommaAssignVal();
// Tunnel through any GT_RET_EXPRs
GenTree* gtRetExprVal(BasicBlockFlags* pbbFlags = nullptr);
inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
inline bool IsMultiRegCall() const;
// Returns true if it is a struct lclVar node residing in multiple registers.
inline bool IsMultiRegLclVar() const;
// Returns true if it is a node returning its value in more than one register
bool IsMultiRegNode() const;
// Returns the number of registers defined by a multireg node.
unsigned GetMultiRegCount() const;
// Returns the regIndex'th register defined by a possibly-multireg node.
regNumber GetRegByIndex(int regIndex);
// Returns the type of the regIndex'th register defined by a multi-reg node.
var_types GetRegTypeByIndex(int regIndex);
// Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
// Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
private:
GenTreeFlags GetLastUseBit(int regIndex);
public:
bool IsLastUse(int regIndex);
bool HasLastUse();
void SetLastUse(int regIndex);
void ClearLastUse(int regIndex);
// Returns true if it is a GT_COPY or GT_RELOAD node
inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
inline bool IsCopyOrReloadOfMultiRegCall() const;
bool OperRequiresAsgFlag();
bool OperRequiresCallFlag(Compiler* comp);
bool OperMayThrow(Compiler* comp);
unsigned GetScaleIndexMul();
unsigned GetScaleIndexShf();
unsigned GetScaledIndex();
public:
static unsigned char s_gtNodeSizes[];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
static unsigned char s_gtTrueSizes[];
#endif
#if COUNT_AST_OPERS
static unsigned s_gtNodeCounts[];
#endif
static void InitNodeSize();
size_t GetNodeSize() const;
bool IsNodeProperlySized() const;
void ReplaceWith(GenTree* src, Compiler* comp);
static genTreeOps ReverseRelop(genTreeOps relop);
static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
//---------------------------------------------------------------------
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* OpName(genTreeOps op);
#endif
#if MEASURE_NODE_SIZE
static const char* OpStructName(genTreeOps op);
#endif
//---------------------------------------------------------------------
bool IsNothingNode() const;
void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
CLEAR_VN, // Clear value number
PRESERVE_VN // Preserve value number
};
void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
// set gtOper and only keep GTF_COMMON_MASK flags
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
void SetOperRaw(genTreeOps oper);
void ChangeType(var_types newType)
{
var_types oldType = gtType;
gtType = newType;
GenTree* node = this;
while (node->gtOper == GT_COMMA)
{
node = node->gtGetOp2();
if (node->gtType != newType)
{
assert(node->gtType == oldType);
node->gtType = newType;
}
}
}
template <typename T>
void BashToConst(T value, var_types type = TYP_UNDEF);
void BashToZeroConst(var_types type);
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
static void ReportOperBashing(FILE* fp);
#else
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{ /* do nothing */
}
static void ReportOperBashing(FILE* fp)
{ /* do nothing */
}
#endif
bool IsLocal() const
{
return OperIsLocal(OperGet());
}
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
// Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
// variable, or just a portion of it.
bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
bool IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset = nullptr);
// Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
// Determine if this tree represents the value of an entire implicit byref parameter,
// and if so return the tree for the parameter.
GenTreeLclVar* IsImplicitByrefParameterValue(Compiler* compiler);
// Determine if this is a LclVarCommon node and return some additional info about it in the
// two out parameters.
bool IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of an array (the child of a GT_IND labeled with GTF_IND_ARR_INDEX).
// Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some
// element).
// Sets "*pArrayType" to the class handle for the array type.
// Sets "*inxVN" to the value number inferred for the array index.
// Sets "*pFldSeq" to the sequence, if any, of struct fields used to index into the array element.
void ParseArrayAddress(
Compiler* comp, struct ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq);
// Helper method for the above.
void ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq);
// Requires "this" to be a GT_IND. Requires the outermost caller to set "*pFldSeq" to nullptr.
// Returns true if it is an array index expression, or access to a (sequence of) struct field(s)
// within a struct array element. If it returns true, sets *arrayInfo to the array information, and sets *pFldSeq
// to the sequence of struct field accesses.
bool ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of a (possible) array element (or struct field within that).
// If it is, sets "*arrayInfo" to the array access info, "*pFldSeq" to the sequence of struct fields
// accessed within the array element, and returns true. If not, returns "false".
bool ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be an int expression. If it is a sequence of one or more integer constants added together,
// returns true and sets "*pFldSeq" to the sequence of fields with which those constants are annotated.
bool ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq);
// Labels "*this" as an array index expression: label all constants and variables that could contribute, as part of
// an affine expression, to the value of the of the index.
void LabelIndex(Compiler* comp, bool isConst = true);
// Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
// statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
// operation. Returns "true" if "this" is an address of (or within)
// a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
}
regNumber GetReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
}
#endif
static bool IsContained(unsigned flags)
{
return ((flags & GTF_CONTAINED) != 0);
}
void SetContained()
{
assert(IsValue());
gtFlags |= GTF_CONTAINED;
assert(isContained());
}
void ClearContained()
{
assert(IsValue());
gtFlags &= ~GTF_CONTAINED;
ClearRegOptional();
}
bool CanCSE() const
{
return ((gtFlags & GTF_DONT_CSE) == 0);
}
void SetDoNotCSE()
{
gtFlags |= GTF_DONT_CSE;
}
void ClearDoNotCSE()
{
gtFlags &= ~GTF_DONT_CSE;
}
bool IsReverseOp() const
{
return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
void SetReverseOp()
{
gtFlags |= GTF_REVERSE_OPS;
}
void ClearReverseOp()
{
gtFlags &= ~GTF_REVERSE_OPS;
}
bool IsUnsigned() const
{
return ((gtFlags & GTF_UNSIGNED) != 0);
}
void SetUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
gtFlags |= GTF_UNSIGNED;
}
void ClearUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
gtFlags &= ~GTF_UNSIGNED;
}
void SetOverflow()
{
assert(OperMayOverflow());
gtFlags |= GTF_OVERFLOW;
}
void ClearOverflow()
{
assert(OperMayOverflow());
gtFlags &= ~GTF_OVERFLOW;
}
bool Is64RsltMul() const
{
return (gtFlags & GTF_MUL_64RSLT) != 0;
}
void Set64RsltMul()
{
gtFlags |= GTF_MUL_64RSLT;
}
void Clear64RsltMul()
{
gtFlags &= ~GTF_MUL_64RSLT;
}
void SetAllEffectsFlags(GenTree* source)
{
SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSouce)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSouce->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTreeFlags sourceFlags)
{
assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
gtFlags &= ~GTF_ALL_EFFECT;
gtFlags |= sourceFlags;
}
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
inline bool IsCnsFltOrDbl() const;
inline bool IsCnsNonZeroFltOrDbl() const;
bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
bool IsIconHandle(GenTreeFlags handleType) const
{
assert(gtOper == GT_CNS_INT);
assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
GenTreeFlags GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
bool IsArgPlaceHolderNode() const
{
return OperGet() == GT_ARGPLACE;
}
bool IsCall() const
{
return OperGet() == GT_CALL;
}
inline bool IsHelperCall();
bool gtOverflow() const;
bool gtOverflowEx() const;
bool gtSetFlags() const;
bool gtRequestSetFlags();
#ifdef DEBUG
static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
#endif
// cast operations
inline var_types CastFromType();
inline var_types& CastToType();
// Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
bool IsPhiNode();
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
// Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
// Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
GenTree()
{
}
// Returns an iterator that will produce the use edge to each operand of this node. Differs
// from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
// and block op nodes.
GenTreeUseEdgeIterator UseEdgesBegin();
GenTreeUseEdgeIterator UseEdgesEnd();
IteratorPair<GenTreeUseEdgeIterator> UseEdges();
// Returns an iterator that will produce each operand of this node, in execution order.
GenTreeOperandIterator OperandsBegin();
GenTreeOperandIterator OperandsEnd();
// Returns a range that will produce the operands of this node in execution order.
IteratorPair<GenTreeOperandIterator> Operands();
enum class VisitResult
{
Abort = false,
Continue = true
};
// Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
// `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
//
// unsigned operandCount = 0;
// node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
// {
// operandCount++;
// return GenTree::VisitResult::Continue;
// });
//
// This function is generally more efficient that the operand iterator and should be preferred over that API for
// hot code, as it affords better opportunities for inlining and acheives shorter dynamic path lengths when
// deciding how operands need to be accessed.
//
// Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
// in HIR if for some reason you need to visit operands in the order in which they will execute.
template <typename TVisitor>
void VisitOperands(TVisitor visitor);
private:
template <typename TVisitor>
void VisitBinOpOperands(TVisitor visitor);
public:
bool Precedes(GenTree* other);
bool IsInvariant() const;
bool IsNeverNegative(Compiler* comp) const;
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
if (IsInvariant() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
return false;
}
void SetReuseRegVal()
{
assert(IsInvariant());
gtFlags |= GTF_REUSE_REG_VAL;
}
void ResetReuseRegVal()
{
assert(IsInvariant());
gtFlags &= ~GTF_REUSE_REG_VAL;
}
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
static void DumpNodeSizes(FILE* fp);
#endif
#ifdef DEBUG
private:
GenTree& operator=(const GenTree& gt)
{
assert(!"Don't copy");
return *this;
}
#endif // DEBUG
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
virtual void DummyVirt()
{
}
typedef void* VtablePtr;
VtablePtr GetVtableForOper(genTreeOps oper);
void SetVtableForOper(genTreeOps oper);
static VtablePtr s_vtablesForOpers[GT_COUNT];
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
// Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
// All PHI_ARG nodes must represent uses of the same local variable and
// the PHI node's type must be the same as the local variable's type.
//
// The PHI node does not represent a definition by itself, it is always
// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR
// node, that is a definition for the same local variable referenced by
// all the used PHI_ARG nodes:
//
// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7)))
//
// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the
// ASG node.
//
// The order of the PHI_ARG uses is not currently relevant and it may be
// the same or not as the order of the predecessor blocks.
//
struct GenTreePhi final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node->OperIs(GT_PHI_ARG));
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node->OperIs(GT_PHI_ARG));
return m_node;
}
void SetNode(GenTree* node)
{
assert(node->OperIs(GT_PHI_ARG));
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtUses;
GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
{
}
UseList Uses()
{
return UseList(gtUses);
}
//--------------------------------------------------------------------------
// Equals: Checks if 2 PHI nodes are equal.
//
// Arguments:
// phi1 - The first PHI node
// phi2 - The second PHI node
//
// Return Value:
// true if the 2 PHI nodes have the same type, number of uses, and the
// uses are equal.
//
// Notes:
// The order of uses must be the same for equality, even if the
// order is not usually relevant and is not guaranteed to reflect
// a particular order of the predecessor blocks.
//
static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
{
if (phi1->TypeGet() != phi2->TypeGet())
{
return false;
}
GenTreePhi::UseIterator i1 = phi1->Uses().begin();
GenTreePhi::UseIterator end1 = phi1->Uses().end();
GenTreePhi::UseIterator i2 = phi2->Uses().begin();
GenTreePhi::UseIterator end2 = phi2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
#if DEBUGGABLE_GENTREE
GenTreePhi() : GenTree()
{
}
#endif
};
// Represents a list of fields constituting a struct, when it is passed as an argument.
//
struct GenTreeFieldList : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
uint16_t m_offset;
var_types m_type;
public:
Use(GenTree* node, unsigned offset, var_types type)
: m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
{
// We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
// only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
// SIMD vector size (32 bytes).
assert(offset <= UINT16_MAX);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
unsigned GetOffset() const
{
return m_offset;
}
var_types GetType() const
{
return m_type;
}
void SetType(var_types type)
{
m_type = type;
}
};
class UseIterator
{
Use* use;
public:
UseIterator(Use* use) : use(use)
{
}
Use& operator*()
{
return *use;
}
Use* operator->()
{
return use;
}
void operator++()
{
use = use->GetNext();
}
bool operator==(const UseIterator& other)
{
return use == other.use;
}
bool operator!=(const UseIterator& other)
{
return use != other.use;
}
};
class UseList
{
Use* m_head;
Use* m_tail;
public:
UseList() : m_head(nullptr), m_tail(nullptr)
{
}
Use* GetHead() const
{
return m_head;
}
UseIterator begin() const
{
return m_head;
}
UseIterator end() const
{
return nullptr;
}
void AddUse(Use* newUse)
{
assert(newUse->GetNext() == nullptr);
if (m_head == nullptr)
{
m_head = newUse;
}
else
{
m_tail->SetNext(newUse);
}
m_tail = newUse;
}
void InsertUse(Use* insertAfter, Use* newUse)
{
assert(newUse->GetNext() == nullptr);
newUse->SetNext(insertAfter->GetNext());
insertAfter->SetNext(newUse);
if (m_tail == insertAfter)
{
m_tail = newUse;
}
}
void Reverse()
{
m_tail = m_head;
m_head = nullptr;
for (Use *next, *use = m_tail; use != nullptr; use = next)
{
next = use->GetNext();
use->SetNext(m_head);
m_head = use;
}
}
bool IsSorted() const
{
unsigned offset = 0;
for (GenTreeFieldList::Use& use : *this)
{
if (use.GetOffset() < offset)
{
return false;
}
offset = use.GetOffset();
}
return true;
}
};
private:
UseList m_uses;
public:
GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
{
SetContained();
}
UseList& Uses()
{
return m_uses;
}
// Add a new field use to the end of the use list and update side effect flags.
void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Add a new field use to the end of the use list without updating side effect flags.
void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use and update side effect flags.
void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use without updating side effect flags.
void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
//--------------------------------------------------------------------------
// Equals: Check if 2 FIELD_LIST nodes are equal.
//
// Arguments:
// list1 - The first FIELD_LIST node
// list2 - The second FIELD_LIST node
//
// Return Value:
// true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
// uses are equal.
//
static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
{
assert(list1->TypeGet() == TYP_STRUCT);
assert(list2->TypeGet() == TYP_STRUCT);
UseIterator i1 = list1->Uses().begin();
UseIterator end1 = list1->Uses().end();
UseIterator i2 = list2->Uses().begin();
UseIterator end2 = list2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
(i1->GetType() != i2->GetType()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
};
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
// they are used.
//
// Operand iteration is common enough in the back end of the compiler that the implementation of this type has
// traded some simplicity for speed:
// - As much work as is reasonable is done in the constructor rather than during operand iteration
// - Node-specific functionality is handled by a small class of "advance" functions called by operator++
// rather than making operator++ itself handle all nodes
// - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
// binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
//
// Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
//
class GenTreeUseEdgeIterator final
{
friend class GenTreeOperandIterator;
friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
enum
{
CALL_INSTANCE = 0,
CALL_ARGS = 1,
CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
CALL_COOKIE = 4,
CALL_ADDRESS = 5,
CALL_TERMINAL = 6,
};
typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
AdvanceFn m_advance;
GenTree* m_node;
GenTree** m_edge;
// Pointer sized state storage, GenTreePhi::Use* or GenTreeCall::Use*
// or the exclusive end/beginning of GenTreeMultiOp's operand array.
void* m_statePtr;
// Integer sized state storage, usually the operand index for non-list based nodes.
int m_state;
GenTreeUseEdgeIterator(GenTree* node);
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
void AdvanceArrOffset();
void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
template <bool ReverseOperands>
void AdvanceBinOp();
void SetEntryStateForBinOp();
// The advance function for call nodes
template <int state>
void AdvanceCall();
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void AdvanceMultiOp();
void AdvanceReversedMultiOp();
void SetEntryStateForMultiOp();
#endif
void Terminate();
public:
GenTreeUseEdgeIterator();
inline GenTree** operator*()
{
assert(m_state != -1);
return m_edge;
}
inline GenTree** operator->()
{
assert(m_state != -1);
return m_edge;
}
inline bool operator==(const GenTreeUseEdgeIterator& other) const
{
if (m_state == -1 || other.m_state == -1)
{
return m_state == other.m_state;
}
return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
(m_state == other.m_state);
}
inline bool operator!=(const GenTreeUseEdgeIterator& other) const
{
return !(operator==(other));
}
GenTreeUseEdgeIterator& operator++();
};
//------------------------------------------------------------------------
// GenTreeOperandIterator: an iterator that will produce each operand of a
// GenTree node in the order in which they are
// used. This uses `GenTreeUseEdgeIterator` under
// the covers.
//
// Note: valid values of this type may be obtained by calling
// `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
class GenTreeOperandIterator final
{
friend GenTreeOperandIterator GenTree::OperandsBegin();
friend GenTreeOperandIterator GenTree::OperandsEnd();
GenTreeUseEdgeIterator m_useEdges;
GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
{
}
public:
GenTreeOperandIterator() : m_useEdges()
{
}
inline GenTree* operator*()
{
return *(*m_useEdges);
}
inline GenTree* operator->()
{
return *(*m_useEdges);
}
inline bool operator==(const GenTreeOperandIterator& other) const
{
return m_useEdges == other.m_useEdges;
}
inline bool operator!=(const GenTreeOperandIterator& other) const
{
return !(operator==(other));
}
inline GenTreeOperandIterator& operator++()
{
++m_useEdges;
return *this;
}
};
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
struct GenTreeUnOp : public GenTree
{
GenTree* gtOp1;
protected:
GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
{
}
GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
{
assert(op1 != nullptr || NullOp1Legal());
if (op1 != nullptr)
{ // Propagate effects flags from child.
gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
}
#if DEBUGGABLE_GENTREE
GenTreeUnOp() : GenTree(), gtOp1(nullptr)
{
}
#endif
};
struct GenTreeOp : public GenTreeUnOp
{
GenTree* gtOp2;
GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
{
// comparisons are always integral types
assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
// Binary operators, with a few exceptions, require a non-nullptr
// second argument.
assert(op2 != nullptr || NullOp2Legal());
// Unary operators, on the other hand, require a null second argument.
assert(!OperIsUnary(oper) || op2 == nullptr);
// Propagate effects flags from child. (UnOp handled this for first child.)
if (op2 != nullptr)
{
gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
}
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
{
// Unary operators with optional arguments:
assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
}
// returns true if we will use the division by constant optimization for this node.
bool UsesDivideByConstOptimized(Compiler* comp);
// checks if we will use the division by constant optimization this node
// then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
void CheckDivideByConstOptimized(Compiler* comp);
// True if this node is marked as using the division by constant optimization
bool MarkedDivideByConstOptimized() const
{
return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
bool IsValidLongMul();
#endif
#if !defined(TARGET_64BIT) && defined(DEBUG)
void DebugCheckLongMul();
#endif
#if DEBUGGABLE_GENTREE
GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
{
}
#endif
// True if this relop is marked for a transform during the emitter
// phase, e.g., jge => jns
bool MarkedForSignJumpOpt() const
{
return (gtFlags & GTF_RELOP_SJUMP_OPT) != 0;
}
};
struct GenTreeVal : public GenTree
{
size_t gtVal1;
GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
{
}
#if DEBUGGABLE_GENTREE
GenTreeVal() : GenTree()
{
}
#endif
};
struct GenTreeIntConCommon : public GenTree
{
inline INT64 LngValue() const;
inline void SetLngValue(INT64 val);
inline ssize_t IconValue() const;
inline void SetIconValue(ssize_t val);
inline INT64 IntegralValue() const;
inline void SetIntegralValue(int64_t value);
template <typename T>
inline void SetValueTruncating(T value);
GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode))
{
}
bool FitsInI8() // IconValue() fits into 8-bit signed storage
{
return FitsInI8(IconValue());
}
static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
{
return (int8_t)val == val;
}
bool FitsInI32() // IconValue() fits into 32-bit signed storage
{
return FitsInI32(IconValue());
}
static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
{
#ifdef TARGET_64BIT
return (int32_t)val == val;
#else
return true;
#endif
}
bool ImmedValNeedsReloc(Compiler* comp);
bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef TARGET_XARCH
bool FitsInAddrBase(Compiler* comp);
bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
GenTreeIntConCommon() : GenTree()
{
}
#endif
};
// node representing a read from a physical register
struct GenTreePhysReg : public GenTree
{
// physregs need a field beyond GetRegNum() because
// GetRegNum() indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
GenTreePhysReg() : GenTree()
{
}
#endif
};
/* gtIntCon -- integer constant (GT_CNS_INT) */
struct GenTreeIntCon : public GenTreeIntConCommon
{
/*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
// are mutually exclusive, and they could be put in a union. Or else we should separate
// this type into three subtypes.
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
FieldSeqNode* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address, holds the method handle to that target which is used
// to fetch target method name and display in the disassembled code.
size_t gtTargetHandle = 0;
#endif
GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(FieldSeqStore::NotAField())
{
}
GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon()
{
}
#endif
};
/* gtLngCon -- long constant (GT_CNS_LNG) */
struct GenTreeLngCon : public GenTreeIntConCommon
{
INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
}
INT32 HiVal()
{
return (INT32)(gtLconVal >> 32);
}
GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
{
SetLngValue(val);
}
#if DEBUGGABLE_GENTREE
GenTreeLngCon() : GenTreeIntConCommon()
{
}
#endif
};
inline INT64 GenTreeIntConCommon::LngValue() const
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
return AsLngCon()->gtLconVal;
#else
return IconValue();
#endif
}
inline void GenTreeIntConCommon::SetLngValue(INT64 val)
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
AsLngCon()->gtLconVal = val;
#else
// Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
SetIconValue(ssize_t(val));
#endif
}
inline ssize_t GenTreeIntConCommon::IconValue() const
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
inline INT64 GenTreeIntConCommon::IntegralValue() const
{
#ifdef TARGET_64BIT
return LngValue();
#else
return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
#endif // TARGET_64BIT
}
inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
{
#ifdef TARGET_64BIT
SetIconValue(value);
#else
if (OperIs(GT_CNS_LNG))
{
SetLngValue(value);
}
else
{
assert(FitsIn<int32_t>(value));
SetIconValue(static_cast<int32_t>(value));
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
//
// The function will truncate the supplied value to a 32 bit signed
// integer if the node's type is not TYP_LONG, otherwise setting it
// as-is. Note that this function intentionally does not check for
// small types (such nodes are created in lowering) for TP reasons.
//
// This function is intended to be used where its truncating behavior is
// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
// wider integers, which is typical when compiling on 64 bit hosts, as
// most aritmetic is done in ssize_t's aka int64_t's in that case, while
// the node itself can be of a narrower type.
//
// Arguments:
// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
//
// Notes:
// This function is templated so that it works well with compiler warnings of
// the form "Operation may overflow before being assigned to a wider type", in
// case "value" is of type ssize_t, which is common.
//
template <typename T>
inline void GenTreeIntConCommon::SetValueTruncating(T value)
{
static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value));
if (TypeIs(TYP_LONG))
{
SetLngValue(value);
}
else
{
SetIconValue(static_cast<int32_t>(value));
}
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
struct GenTreeDblCon : public GenTree
{
double gtDconVal;
bool isBitwiseEqual(GenTreeDblCon* other)
{
unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val)
{
assert(varTypeIsFloating(type));
}
#if DEBUGGABLE_GENTREE
GenTreeDblCon() : GenTree()
{
}
#endif
};
/* gtStrCon -- string constant (GT_CNS_STR) */
#define EMPTY_STRING_SCON (unsigned)-1
struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Returns true if this GT_CNS_STR was imported for String.Empty field
bool IsStringEmptyField()
{
return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
}
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
: GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
{
}
#if DEBUGGABLE_GENTREE
GenTreeStrCon() : GenTree()
{
}
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
unsigned _gtSsaNum; // The SSA number.
public:
GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
unsigned GetLclNum() const
{
return _gtLclNum;
}
void SetLclNum(unsigned lclNum)
{
_gtLclNum = lclNum;
_gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
}
uint16_t GetLclOffs() const;
unsigned GetSsaNum() const
{
return _gtSsaNum;
}
void SetSsaNum(unsigned ssaNum)
{
_gtSsaNum = ssaNum;
}
bool HasSsaName()
{
return (GetSsaNum() != SsaConfig::RESERVED_SSA_NUM);
}
#if DEBUGGABLE_GENTREE
GenTreeLclVarCommon() : GenTreeUnOp()
{
}
#endif
};
//------------------------------------------------------------------------
// MultiRegSpillFlags
//
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flags of each register
// are stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
//
typedef unsigned char MultiRegSpillFlags;
static const unsigned PACKED_GTF_SPILL = 1;
static const unsigned PACKED_GTF_SPILLED = 2;
//----------------------------------------------------------------------
// GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
// idx - Position or index of the return register
//
// Return Value:
// Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
//
inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
GenTreeFlags spillFlags = GTF_EMPTY;
if (bits & PACKED_GTF_SPILL)
{
spillFlags |= GTF_SPILL;
}
if (bits & PACKED_GTF_SPILLED)
{
spillFlags |= GTF_SPILLED;
}
return spillFlags;
}
//----------------------------------------------------------------------
// SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
//
// Arguments:
// oldFlags - The current value of the MultiRegSpillFlags for a node.
// flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
// Note that these are the flags used on non-multireg nodes,
// and this method adds the appropriate flags to the
// incoming MultiRegSpillFlags and returns it.
// idx - Position or index of the register
//
// Return Value:
// The new value for the node's MultiRegSpillFlags.
//
inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
MultiRegSpillFlags newFlags = oldFlags;
unsigned bits = 0;
if (flagsToSet & GTF_SPILL)
{
bits |= PACKED_GTF_SPILL;
}
if (flagsToSet & GTF_SPILLED)
{
bits |= PACKED_GTF_SPILLED;
}
const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
// Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
return newFlags;
}
// gtLclVar -- load/store/addr of local variable
struct GenTreeLclVar : public GenTreeLclVarCommon
{
private:
regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
public:
INDEBUG(IL_OFFSET gtLclILoffs;) // instr offset of ref (only for JIT dumps)
// Multireg support
bool IsMultiReg() const
{
return ((gtFlags & GTF_VAR_MULTIREG) != 0);
}
void ClearMultiReg()
{
gtFlags &= ~GTF_VAR_MULTIREG;
}
void SetMultiReg()
{
gtFlags |= GTF_VAR_MULTIREG;
ClearOtherRegFlags();
}
regNumber GetRegNumByIdx(int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
}
void SetRegNumByIdx(regNumber reg, int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
if (regIndex == 0)
{
SetRegNum(reg);
}
else
{
gtOtherReg[regIndex - 1] = regNumberSmall(reg);
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
}
unsigned int GetFieldCount(Compiler* compiler) const;
var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given LclVar node.
//
// Arguments:
// fromCall - GenTreeLclVar node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeLclVar* from)
{
this->gtSpillFlags = from->gtSpillFlags;
}
GenTreeLclVar(genTreeOps oper,
var_types type,
unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
: GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
{
assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
}
#if DEBUGGABLE_GENTREE
GenTreeLclVar() : GenTreeLclVarCommon()
{
}
#endif
};
// gtLclFld -- load/store/addr of local variable field
struct GenTreeLclFld : public GenTreeLclVarCommon
{
private:
uint16_t m_lclOffs; // offset into the variable to access
FieldSeqNode* m_fieldSeq; // This LclFld node represents some sequences of accesses.
public:
GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
: GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs)), m_fieldSeq(nullptr)
{
assert(lclOffs <= UINT16_MAX);
}
uint16_t GetLclOffs() const
{
return m_lclOffs;
}
void SetLclOffs(unsigned lclOffs)
{
assert(lclOffs <= UINT16_MAX);
m_lclOffs = static_cast<uint16_t>(lclOffs);
}
FieldSeqNode* GetFieldSeq() const
{
return m_fieldSeq;
}
void SetFieldSeq(FieldSeqNode* fieldSeq)
{
m_fieldSeq = fieldSeq;
}
#ifdef TARGET_ARM
bool IsOffsetMisaligned() const;
#endif // TARGET_ARM
#if DEBUGGABLE_GENTREE
GenTreeLclFld() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtCast -- conversion to a different type (GT_CAST) */
struct GenTreeCast : public GenTreeOp
{
GenTree*& CastOp()
{
return gtOp1;
}
var_types gtCastType;
GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
: GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
{
// We do not allow casts from floating point types to be treated as from
// unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
// CastOp's type changes.
assert(!varTypeIsFloating(op) || !fromUnsigned);
gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
}
#if DEBUGGABLE_GENTREE
GenTreeCast() : GenTreeOp()
{
}
#endif
};
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
GenTree*& BoxOp()
{
return gtOp1;
}
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
// type
Statement* gtAsgStmtWhenInlinedBoxValue;
// And this is the statement that copies from the value being boxed to the box payload
Statement* gtCopyStmtWhenInlinedBoxValue;
GenTreeBox(var_types type,
GenTree* boxOp,
Statement* asgStmtWhenInlinedBoxValue,
Statement* copyStmtWhenInlinedBoxValue)
: GenTreeUnOp(GT_BOX, type, boxOp)
, gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
, gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
{
}
#if DEBUGGABLE_GENTREE
GenTreeBox() : GenTreeUnOp()
{
}
#endif
};
// GenTreeField -- data member ref (GT_FIELD)
struct GenTreeField : public GenTreeUnOp
{
CORINFO_FIELD_HANDLE gtFldHnd;
DWORD gtFldOffset;
bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
GenTreeField(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
: GenTreeUnOp(GT_FIELD, type, obj), gtFldHnd(fldHnd), gtFldOffset(offs), gtFldMayOverlap(false)
{
#ifdef FEATURE_READYTORUN
gtFieldLookup.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeField() : GenTreeUnOp()
{
}
#endif
// The object this field belongs to. Will be "nullptr" for static fields.
// Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
GenTree* GetFldObj() const
{
return gtOp1;
}
// True if this field is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_FLD_VOLATILE) != 0;
}
};
// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
struct GenTreeColon : public GenTreeOp
{
GenTree*& ThenNode()
{
return gtOp2;
}
GenTree*& ElseNode()
{
return gtOp1;
}
#if DEBUGGABLE_GENTREE
GenTreeColon() : GenTreeOp()
{
}
#endif
GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
{
}
};
// gtCall -- method call (GT_CALL)
enum class InlineObservation;
//------------------------------------------------------------------------
// GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
//
// clang-format off
enum GenTreeCallFlags : unsigned int
{
GTF_CALL_M_EMPTY = 0,
GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
GTF_CALL_M_VARARGS = 0x00000004, // the call uses varargs ABI
GTF_CALL_M_RETBUFFARG = 0x00000008, // call has a return buffer argument
GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
// in special cases. Used to optimize fast way out in morphing
GTF_CALL_M_UNMGD_THISCALL = 0x00000080, // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
#if FEATURE_TAILCALL_OPT
GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
#endif
GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
// A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
// an IL Stub dynamically generated for a PInvoke declaration is flagged as
// a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
// know when these flags are set.
GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
// as the first argument for calli. It is CoreRT replacement for instantiating
// stubs, because executable code cannot be generated at runtime.
GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
// the comma result is unused.
GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be tranformed into CFG for the dynamic dictionary expansion feature.
GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
GTF_CALL_M_LATE_DEVIRT = 0x10000000, // this call has late devirtualzation info
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
{
return (GenTreeCallFlags)(~(unsigned int)a);
}
inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
// clang-format on
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
// - count of return registers in which the value is returned
//
// TODO-ARM: Update this to meet the needs of Arm64 and Arm32
//
// TODO-AllArch: Right now it is used for describing multi-reg returned types.
// Eventually we would want to use it for describing even single-reg
// returned types (e.g. structs returned in single register x64/arm).
// This would allow us not to lie or normalize single struct return
// values in importer/morph.
struct ReturnTypeDesc
{
private:
var_types m_regType[MAX_RET_REG_COUNT];
bool m_isEnclosingType;
#ifdef DEBUG
bool m_inited;
#endif
public:
ReturnTypeDesc()
{
Reset();
}
// Initialize the Return Type Descriptor for a method that returns a struct type
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
void InitializeLongReturnType();
// Reset type descriptor to defaults
void Reset()
{
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
m_regType[i] = TYP_UNKNOWN;
}
m_isEnclosingType = false;
#ifdef DEBUG
m_inited = false;
#endif
}
#ifdef DEBUG
// NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
// has been initialized.
unsigned TryGetReturnRegCount() const
{
return m_inited ? GetReturnRegCount() : 0;
}
#endif // DEBUG
//--------------------------------------------------------------------------------------------
// GetReturnRegCount: Get the count of return registers in which the return value is returned.
//
// Arguments:
// None
//
// Return Value:
// Count of return registers.
// Returns 0 if the return type is not returned in registers.
unsigned GetReturnRegCount() const
{
assert(m_inited);
int regCount = 0;
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
if (m_regType[i] == TYP_UNKNOWN)
{
break;
}
// otherwise
regCount++;
}
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
#endif
return regCount;
}
//-----------------------------------------------------------------------
// IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
// Arguments:
// None
//
// Return Value:
// Returns true if the type is returned in multiple return registers.
// False otherwise.
// Note that we only have to examine the first two values to determine this
//
bool IsMultiRegRetType() const
{
if (MAX_RET_REG_COUNT < 2)
{
return false;
}
else
{
assert(m_inited);
return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
//
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
//
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
var_types GetReturnRegType(unsigned index) const
{
var_types result = m_regType[index];
assert(result != TYP_UNKNOWN);
return result;
}
// True if this value is returned in integer register
// that is larger than the type itself.
bool IsEnclosingType() const
{
return m_isEnclosingType;
}
// Get ith ABI return register
regNumber GetABIReturnReg(unsigned idx) const;
// Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs() const;
};
class TailCallSiteInfo
{
bool m_isCallvirt : 1;
bool m_isCalli : 1;
CORINFO_SIG_INFO m_sig;
CORINFO_RESOLVED_TOKEN m_token;
public:
// Is the tailcall a callvirt instruction?
bool IsCallvirt()
{
return m_isCallvirt;
}
// Is the tailcall a calli instruction?
bool IsCalli()
{
return m_isCalli;
}
// Get the token of the callee
CORINFO_RESOLVED_TOKEN* GetToken()
{
assert(!IsCalli());
return &m_token;
}
// Get the signature of the callee
CORINFO_SIG_INFO* GetSig()
{
return &m_sig;
}
// Mark the tailcall as a calli with the given signature
void SetCalli(CORINFO_SIG_INFO* sig)
{
m_isCallvirt = false;
m_isCalli = true;
m_sig = *sig;
}
// Mark the tailcall as a callvirt with the given signature and token
void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = true;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
// Mark the tailcall as a call with the given signature and token
void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = false;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
};
class fgArgInfo;
enum class NonStandardArgKind : unsigned
{
None,
PInvokeFrame,
PInvokeTarget,
PInvokeCookie,
WrapperDelegateCell,
ShiftLow,
ShiftHigh,
FixedRetBuffer,
VirtualStubCell,
R2RIndirectionCell,
ValidateIndirectCallTarget,
// If changing this enum also change getNonStandardArgKindName and isNonStandardArgAddedLate in fgArgInfo
};
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind);
#endif
enum class CFGCallKind
{
ValidateAndCall,
Dispatch,
};
struct GenTreeCall final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node != nullptr);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node != nullptr);
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
Use* GetUse() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtCallThisArg; // The instance argument ('this' pointer)
Use* gtCallArgs; // The list of arguments in original evaluation order
Use* gtCallLateArgs; // On x86: The register arguments in an optimal order
// On ARM/x64: - also includes any outgoing arg space arguments
// - that were evaluated into a temp LclVar
fgArgInfo* fgArgInfo;
UseList Args()
{
return UseList(gtCallArgs);
}
UseList LateArgs()
{
return UseList(gtCallLateArgs);
}
#ifdef DEBUG
// Used to register callsites with the EE
CORINFO_SIG_INFO* callSig;
#endif
union {
TailCallSiteInfo* tailCallInfo;
// Only used for unmanaged calls, which cannot be tail-called
CorInfoCallConvExtension unmgdCallConv;
};
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
ReturnTypeDesc gtReturnTypeDesc;
// GetRegNum() would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
#endif // FEATURE_MULTIREG_RET
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
//
// Arguments:
// None
//
// Returns
// Type descriptor of the value returned by call
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
const ReturnTypeDesc* GetReturnTypeDesc() const
{
#if FEATURE_MULTIREG_RET
return >ReturnTypeDesc;
#else
return nullptr;
#endif
}
void InitializeLongReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeLongReturnType();
#endif
}
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
void ResetReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.Reset();
#endif
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get ith return register allocated to this call node.
//
// Arguments:
// idx - index of the return register
//
// Return Value:
// Return regNumber of ith return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set ith return register of this call node
//
// Arguments:
// reg - reg number
// idx - index of the return register
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
unreached();
#endif
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given call node to this node
//
// Arguments:
// fromCall - GenTreeCall node from which to copy multi-reg state
//
// Return Value:
// None
//
void CopyOtherRegs(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
}
#endif
}
// Get reg mask of all the valid registers of gtOtherRegs array
regMaskTP GetOtherRegMask() const;
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
#if FEATURE_MULTIREG_RET
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
#else
assert(!"unreached");
return GTF_EMPTY;
#endif
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = 0;
#endif
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given call node.
//
// Arguments:
// fromCall - GenTreeCall node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
this->gtSpillFlags = fromCall->gtSpillFlags;
#endif
}
bool IsUnmanaged() const
{
return (gtFlags & GTF_CALL_UNMANAGED) != 0;
}
bool NeedsNullCheck() const
{
return (gtFlags & GTF_CALL_NULLCHECK) != 0;
}
bool CallerPop() const
{
return (gtFlags & GTF_CALL_POP_ARGS) != 0;
}
bool IsVirtual() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
}
bool IsVirtualStub() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
}
bool IsVirtualVtable() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
}
bool IsInlineCandidate() const
{
return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
}
bool IsR2ROrVirtualStubRelativeIndir()
{
#if defined(FEATURE_READYTORUN)
if (IsR2RRelativeIndir())
{
return true;
}
#endif
return IsVirtualStubRelativeIndir();
}
bool HasNonStandardAddedArgs(Compiler* compiler) const;
int GetNonStandardAddedArgCount(Compiler* compiler) const;
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
bool HasFixedRetBufArg() const
{
if (!(hasFixedRetBuffReg() && HasRetBufArg()))
{
return false;
}
#if !defined(TARGET_ARM)
return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
#else
return true;
#endif
}
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
// Arguments:
// None
//
// Return Value:
// True if the call is returning a multi-reg return value. False otherwise.
//
bool HasMultiRegRetVal() const
{
#ifdef FEATURE_MULTIREG_RET
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (varTypeIsLong(gtType))
{
return true;
}
#endif
if (!varTypeIsStruct(gtType) || HasRetBufArg())
{
return false;
}
// Now it is a struct that is returned in registers.
return GetReturnTypeDesc()->IsMultiRegRetType();
#else // !FEATURE_MULTIREG_RET
return false;
#endif // !FEATURE_MULTIREG_RET
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
bool IsPInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
}
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
bool IsTailPrefixedCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
}
// Returns true if this call didn't have an explicit tail. prefix in the IL
// but was marked as an explicit tail call because of tail call stress mode.
bool IsStressTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
}
// This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
bool IsTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
}
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
bool CanTailCall() const
{
return IsTailPrefixedCall() || IsImplicitTailCall();
}
// Check whether this is a tailcall dispatched via JIT helper. We only use
// this mechanism on x86 as it is faster than our other more general
// tailcall mechanism.
bool IsTailCallViaJitHelper() const
{
#ifdef TARGET_X86
return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return false;
#endif
}
#if FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
#ifdef TARGET_X86
return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return IsTailCall();
#endif
}
#else // !FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
bool IsImplicitTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
}
bool IsTailCallConvertibleToLoop() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
}
#else // !FEATURE_TAILCALL_OPT
bool IsImplicitTailCall() const
{
return false;
}
bool IsTailCallConvertibleToLoop() const
{
return false;
}
#endif // !FEATURE_TAILCALL_OPT
bool NormalizesSmallTypesOnReturn()
{
return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
}
bool IsSameThis() const
{
return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
}
bool IsDelegateInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
}
bool IsVirtualStubRelativeIndir() const
{
return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
}
bool IsR2RRelativeIndir() const
{
#ifdef FEATURE_READYTORUN
return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
#else
return false;
#endif
}
#ifdef FEATURE_READYTORUN
void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
{
gtEntryPoint = entryPoint;
if (gtEntryPoint.accessType == IAT_PVALUE)
{
gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
}
}
#endif // FEATURE_READYTORUN
bool IsVarargs() const
{
return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
}
bool IsNoReturn() const
{
return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
}
bool IsFatPointerCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
}
bool IsGuardedDevirtualizationCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
}
bool IsPure(Compiler* compiler) const;
bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
void ClearFatPointerCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
}
void SetFatPointerCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
}
bool IsDevirtualized() const
{
return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
}
bool IsGuarded() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
}
bool IsUnboxed() const
{
return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
}
bool IsSuppressGCTransition() const
{
return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
}
void ClearGuardedDevirtualizationCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_GUARDED_DEVIRT;
}
void SetGuardedDevirtualizationCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED_DEVIRT;
}
void SetIsGuarded()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED;
}
void SetExpRuntimeLookup()
{
gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
void ClearExpRuntimeLookup()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
bool IsExpRuntimeLookup() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
}
void SetExpandedEarly()
{
gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
}
void ClearExpandedEarly()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
}
bool IsExpandedEarly() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
}
//-----------------------------------------------------------------------------------------
// GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
//
// Arguments:
// None
//
// Return Value:
// The kind (either R2RIndirectionCell or VirtualStubCell),
// or NonStandardArgKind::None if this call does not have an indirection cell.
//
NonStandardArgKind GetIndirectionCellArgKind() const
{
if (IsVirtualStub())
{
return NonStandardArgKind::VirtualStubCell;
}
#if defined(TARGET_ARMARCH)
// For ARM architectures, we always use an indirection cell for R2R calls.
if (IsR2RRelativeIndir())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#elif defined(TARGET_XARCH)
// On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
if (IsR2RRelativeIndir() && IsFastTailCall())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#endif
return NonStandardArgKind::None;
}
CFGCallKind GetCFGCallKind()
{
#if defined(TARGET_AMD64)
// On x64 the dispatcher is more performant, but we cannot use it when
// we need to pass indirection cells as those go into registers that
// are clobbered by the dispatch helper.
bool mayUseDispatcher = GetIndirectionCellArgKind() == NonStandardArgKind::None;
bool shouldUseDispatcher = true;
#elif defined(TARGET_ARM64)
bool mayUseDispatcher = true;
// Branch predictors on ARM64 generally do not handle the dispatcher as
// well as on x64 hardware, so only use the validator by default.
bool shouldUseDispatcher = false;
#else
// Other platforms do not even support the dispatcher.
bool mayUseDispatcher = false;
bool shouldUseDispatcher = false;
#endif
#ifdef DEBUG
switch (JitConfig.JitCFGUseDispatcher())
{
case 0:
shouldUseDispatcher = false;
break;
case 1:
shouldUseDispatcher = true;
break;
default:
break;
}
#endif
return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
}
void ResetArgInfo();
GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
var_types gtReturnType : 5; // exact return type
CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
GenTree* gtCallCookie;
// gtInlineCandidateInfo is only used when inlining methods
InlineCandidateInfo* gtInlineCandidateInfo;
GuardedDevirtualizationCandidateInfo* gtGuardedDevirtualizationCandidateInfo;
ClassProfileCandidateInfo* gtClassProfileCandidateInfo;
LateDevirtualizationInfo* gtLateDevirtualizationInfo;
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
GenTree* gtControlExpr;
union {
CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
GenTree* gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// For non-inline candidates, track the first observation
// that blocks candidacy.
InlineObservation gtInlineObservation;
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
// In DEBUG we report even non inline candidates in the inline tree in
// fgNoteNonInlineCandidate. We need to keep around the inline context for
// this as normally it's part of the candidate info.
class InlineContext* gtInlineContext;
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
void ReplaceCallOperand(GenTree** operandUseEdge, GenTree* replacement);
bool AreArgsComplete() const;
CorInfoCallConvExtension GetUnmanagedCallConv() const
{
return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
}
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
fgArgInfo = nullptr;
}
#if DEBUGGABLE_GENTREE
GenTreeCall() : GenTree()
{
}
#endif
};
struct GenTreeCmpXchg : public GenTree
{
GenTree* gtOpLocation;
GenTree* gtOpValue;
GenTree* gtOpComparand;
GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
: GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
{
// There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
// have global effects.
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
// Merge in flags from operands
gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
}
#if DEBUGGABLE_GENTREE
GenTreeCmpXchg() : GenTree()
{
}
#endif
};
#if !defined(TARGET_64BIT)
struct GenTreeMultiRegOp : public GenTreeOp
{
regNumber gtOtherReg;
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
MultiRegSpillFlags gtSpillFlags;
GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
: GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
{
ClearOtherRegFlags();
}
unsigned GetRegCount() const
{
return (TypeGet() == TYP_LONG) ? 2 : 1;
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get ith register allocated to this struct argument.
//
// Arguments:
// idx - index of the register
//
// Return Value:
// Return regNumber of ith register of this register argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < 2);
if (idx == 0)
{
return GetRegNum();
}
return gtOtherReg;
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index)
{
assert(index < 2);
// The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
// reg.
// The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
// in which case we need to separate them into int for each index.
var_types result = TypeGet();
if (result == TYP_LONG)
{
result = TYP_INT;
}
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreeMultiRegOp() : GenTreeOp()
{
}
#endif
};
#endif // !defined(TARGET_64BIT)
struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
bool gtFptrDelegateTarget;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
: GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
gtEntryPoint.accessType = IAT_VALUE;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeFptrVal() : GenTree()
{
}
#endif
};
/* gtQmark */
struct GenTreeQmark : public GenTreeOp
{
GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
{
// These must follow a specific form.
assert((cond != nullptr) && cond->TypeIs(TYP_INT));
assert((colon != nullptr) && colon->OperIs(GT_COLON));
}
#if DEBUGGABLE_GENTREE
GenTreeQmark() : GenTreeOp()
{
}
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
struct GenTreeIntrinsic : public GenTreeOp
{
NamedIntrinsic gtIntrinsicName;
CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
GenTreeIntrinsic(
var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
#if DEBUGGABLE_GENTREE
GenTreeIntrinsic() : GenTreeOp()
{
}
#endif
};
// GenTreeMultiOp - a node with a flexible count of operands stored in an array.
// The array can be an inline one, or a dynamic one, or both, with switching
// between them supported. See GenTreeJitIntrinsic for an example of a node
// utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
// with two operands.
//
struct GenTreeMultiOp : public GenTree
{
public:
class Iterator
{
protected:
GenTree** m_use;
Iterator(GenTree** use) : m_use(use)
{
}
public:
Iterator& operator++()
{
m_use++;
return *this;
}
bool operator==(const Iterator& other) const
{
return m_use == other.m_use;
}
bool operator!=(const Iterator& other) const
{
return m_use != other.m_use;
}
};
class OperandsIterator final : public Iterator
{
public:
OperandsIterator(GenTree** use) : Iterator(use)
{
}
GenTree* operator*()
{
return *m_use;
}
};
class UseEdgesIterator final : public Iterator
{
public:
UseEdgesIterator(GenTree** use) : Iterator(use)
{
}
GenTree** operator*()
{
return m_use;
}
};
private:
GenTree** m_operands;
protected:
template <unsigned InlineOperandCount, typename... Operands>
GenTreeMultiOp(genTreeOps oper,
var_types type,
CompAllocator allocator,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
Operands... operands)
: GenTree(oper, type DEBUGARG(largeNode))
{
const size_t OperandCount = sizeof...(Operands);
m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
// "OperandCount + 1" so that it works well when OperandCount is 0.
GenTree* operandsArray[OperandCount + 1]{operands...};
InitializeOperands(operandsArray, OperandCount);
}
// Note that this constructor takes the owndership of the "operands" array.
template <unsigned InlineOperandCount>
GenTreeMultiOp(genTreeOps oper,
var_types type,
GenTree** operands,
size_t operandCount,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
: GenTree(oper, type DEBUGARG(largeNode))
{
m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
InitializeOperands(operands, operandCount);
}
public:
#if DEBUGGABLE_GENTREE
GenTreeMultiOp() : GenTree()
{
}
#endif
GenTree*& Op(size_t index)
{
size_t actualIndex = index - 1;
assert(actualIndex < m_operandCount);
assert(m_operands[actualIndex] != nullptr);
return m_operands[actualIndex];
}
GenTree* Op(size_t index) const
{
return const_cast<GenTreeMultiOp*>(this)->Op(index);
}
// Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<OperandsIterator> Operands()
{
return MakeIteratorPair(OperandsIterator(GetOperandArray()),
OperandsIterator(GetOperandArray() + GetOperandCount()));
}
// Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<UseEdgesIterator> UseEdges()
{
return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
UseEdgesIterator(GetOperandArray() + GetOperandCount()));
}
size_t GetOperandCount() const
{
return m_operandCount;
}
GenTree** GetOperandArray(size_t startIndex = 0) const
{
return m_operands + startIndex;
}
protected:
// Reconfigures the operand array, leaving it in a "dirty" state.
void ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount);
static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
private:
void InitializeOperands(GenTree** operands, size_t operandCount);
void SetOperandCount(size_t newOperandCount)
{
assert(FitsIn<uint8_t>(newOperandCount));
m_operandCount = static_cast<uint8_t>(newOperandCount);
}
};
// Helper class used to implement the constructor of GenTreeJitIntrinsic which
// transfers the ownership of the passed-in array to the underlying MultiOp node.
class IntrinsicNodeBuilder final
{
friend struct GenTreeJitIntrinsic;
GenTree** m_operands;
size_t m_operandCount;
GenTree* m_inlineOperands[2];
public:
IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
{
m_operands =
(operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
#ifdef DEBUG
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
}
IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
{
m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
: allocator.allocate<GenTree*>(m_operandCount);
for (size_t i = 0; i < m_operandCount; i++)
{
m_operands[i] = source->Op(i + 1);
}
}
void AddOperand(size_t index, GenTree* operand)
{
assert(index < m_operandCount);
assert(m_operands[index] == nullptr);
m_operands[index] = operand;
}
GenTree* GetOperand(size_t index) const
{
assert(index < m_operandCount);
assert(m_operands[index] != nullptr);
return m_operands[index];
}
size_t GetOperandCount() const
{
return m_operandCount;
}
private:
GenTree** GetBuiltOperands()
{
#ifdef DEBUG
for (size_t i = 0; i < m_operandCount; i++)
{
assert(m_operands[i] != nullptr);
}
#endif // DEBUG
return m_operands;
}
};
struct GenTreeJitIntrinsic : public GenTreeMultiOp
{
protected:
GenTree* gtInlineOperands[2];
uint16_t gtLayoutNum;
unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
NamedIntrinsic gtHWIntrinsicId;
};
#else
NamedIntrinsic gtHWIntrinsicId;
#endif
public:
unsigned GetLayoutNum() const
{
return gtLayoutNum;
}
void SetLayoutNum(unsigned layoutNum)
{
assert(FitsIn<uint16_t>(layoutNum));
gtLayoutNum = static_cast<uint16_t>(layoutNum);
}
regNumber GetOtherReg() const
{
return (regNumber)gtOtherReg;
}
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
CorInfoType GetAuxiliaryJitType() const
{
return (CorInfoType)gtAuxiliaryJitType;
}
void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
assert(gtAuxiliaryJitType == auxiliaryJitType);
}
var_types GetAuxiliaryType() const;
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)gtSimdBaseJitType;
}
CorInfoType GetNormalizedSimdBaseJitType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
switch (simdBaseJitType)
{
case CORINFO_TYPE_NATIVEINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_LONG;
#else
return CORINFO_TYPE_INT;
#endif
}
case CORINFO_TYPE_NATIVEUINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_ULONG;
#else
return CORINFO_TYPE_UINT;
#endif
}
default:
return simdBaseJitType;
}
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
gtSimdBaseJitType = (unsigned char)simdBaseJitType;
assert(gtSimdBaseJitType == simdBaseJitType);
}
var_types GetSimdBaseType() const;
unsigned char GetSimdSize() const
{
return gtSimdSize;
}
void SetSimdSize(unsigned simdSize)
{
gtSimdSize = (unsigned char)simdSize;
assert(gtSimdSize == simdSize);
}
template <typename... Operands>
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
CompAllocator allocator,
CorInfoType simdBaseJitType,
unsigned simdSize,
Operands... operands)
: GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
#if DEBUGGABLE_GENTREE
GenTreeJitIntrinsic() : GenTreeMultiOp()
{
}
#endif
protected:
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeMultiOp(oper,
type,
nodeBuilder.GetBuiltOperands(),
nodeBuilder.GetOperandCount(),
gtInlineOperands DEBUGARG(false))
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
public:
bool isSIMD() const
{
return gtSimdSize != 0;
}
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
GenTreeSIMD(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1, op2)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
#if DEBUGGABLE_GENTREE
GenTreeSIMD() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
SIMDIntrinsicID GetSIMDIntrinsicId() const
{
return gtSIMDIntrinsicID;
}
static bool Equals(GenTreeSIMD* op1, GenTreeSIMD* op2);
};
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
GenTreeHWIntrinsic(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
SetHWIntrinsicId(hwIntrinsicID);
if (OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
template <typename... Operands>
GenTreeHWIntrinsic(var_types type,
CompAllocator allocator,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
SetHWIntrinsicId(hwIntrinsicID);
if ((sizeof...(Operands) > 0) && OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
#if DEBUGGABLE_GENTREE
GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
bool OperIsMemoryStore() const; // Returns true for the HW Intrinsic instructions that have MemoryStore semantics,
// false otherwise
bool OperIsMemoryLoadOrStore() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad or
// MemoryStore semantics, false otherwise
bool IsSimdAsHWIntrinsic() const
{
return (gtFlags & GTF_SIMDASHW_OP) != 0;
}
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
NamedIntrinsic GetHWIntrinsicId() const;
//---------------------------------------------------------------------------------------
// ChangeHWIntrinsicId: Change the intrinsic id for this node.
//
// This method just sets the intrinsic id, asserting that the new intrinsic
// has the same number of operands as the old one, optionally setting some of
// the new operands. Intrinsics with an unknown number of operands are exempt
// from the "do I have the same number of operands" check however, so this method must
// be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
// the node for a different intrinsic, with a possibly different number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// operands - optional operands to set while changing the id
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
{
const size_t OperandCount = sizeof...(Operands);
assert(OperandCount <= GetOperandCount());
SetHWIntrinsicId(intrinsicId);
GenTree* operandsArray[OperandCount + 1]{operands...};
GenTree** operandsStore = GetOperandArray();
for (size_t i = 0; i < OperandCount; i++)
{
operandsStore[i] = operandsArray[i];
}
}
//---------------------------------------------------------------------------------------
// ResetHWIntrinsicId: Reset the intrinsic id for this node.
//
// This method resets the intrinsic id, fully reconfiguring the node. It must
// be supplied with all the operands the new node needs, and can allocate a
// new dynamic array if the operands do not fit into in an inline one, in which
// case a compiler argument is used to get the memory allocator.
//
// This method is similar to "ChangeHWIntrinsicId" but is more versatile and
// thus more expensive. Use it when you need to bash to an intrinsic id with
// a different number of operands than what the original node had, or, which
// is equivalent, when you do not know the original number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// compiler - compiler to allocate memory with, can be "nullptr" if the
// number of new operands does not exceed the length of the
// inline array (so, there are 2 or fewer of them)
// operands - *all* operands for the new node
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
{
const size_t NewOperandCount = sizeof...(Operands);
assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
ChangeHWIntrinsicId(intrinsicId, operands...);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
}
static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
};
#endif // FEATURE_HW_INTRINSICS
/* gtIndex -- array access */
struct GenTreeIndex : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
unsigned gtIndElemSize; // size of elements in the array
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
GenTreeIndex(var_types type, GenTree* arr, GenTree* ind, unsigned indElemSize)
: GenTreeOp(GT_INDEX, type, arr, ind)
, gtIndElemSize(indElemSize)
, gtStructElemClass(nullptr) // We always initialize this after construction.
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndex() : GenTreeOp()
{
}
#endif
};
// gtIndexAddr: given an array object and an index, checks that the index is within the bounds of the array if
// necessary and produces the address of the value at that index of the array.
struct GenTreeIndexAddr : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
var_types gtElemType; // The element type of the array.
unsigned gtElemSize; // size of elements in the array
unsigned gtLenOffset; // The offset from the array's base address to its length.
unsigned gtElemOffset; // The offset from the array's base address to its first element.
GenTreeIndexAddr(GenTree* arr,
GenTree* ind,
var_types elemType,
CORINFO_CLASS_HANDLE structElemClass,
unsigned elemSize,
unsigned lenOffset,
unsigned elemOffset)
: GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
, gtStructElemClass(structElemClass)
, gtIndRngFailBB(nullptr)
, gtElemType(elemType)
, gtElemSize(elemSize)
, gtLenOffset(lenOffset)
, gtElemOffset(elemOffset)
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndexAddr() : GenTreeOp()
{
}
#endif
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
struct GenTreeArrLen : public GenTreeUnOp
{
GenTree*& ArrRef()
{
return gtOp1;
} // the array address node
private:
int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
inline int ArrLenOffset()
{
return gtArrLenOffset;
}
GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
: GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArrLen() : GenTreeUnOp()
{
}
#endif
};
// This takes:
// - a length value
// - an index value, and
// - the label to jump to if the index is out of range.
// - the "kind" of the throw block to branch to on failure
// It generates no result.
//
struct GenTreeBoundsChk : public GenTreeOp
{
BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
: GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length), gtIndRngFailBB(nullptr), gtThrowKind(kind)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeBoundsChk() : GenTreeOp()
{
}
#endif
// If this check is against GT_ARR_LENGTH, returns array reference, else "NULL".
GenTree* GetArray() const
{
return GetArrayLength()->OperIs(GT_ARR_LENGTH) ? GetArrayLength()->AsArrLen()->ArrRef() : nullptr;
}
// The index expression.
GenTree* GetIndex() const
{
return gtOp1;
}
// An expression for the length.
GenTree* GetArrayLength() const
{
return gtOp2;
}
};
// GenTreeArrElem - bounds checked address (byref) of a general array element,
// for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
//
struct GenTreeArrElem : public GenTree
{
GenTree* gtArrObj;
#define GT_ARR_MAX_RANK 3
GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
unsigned char gtArrRank; // Rank of the array
unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
// on the optimization path of array intrisics.
// It stores the size of array elements WHEN it can fit
// into an "unsigned char".
// This has caused VSW 571394.
var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
GenTreeArrElem(
var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTree** inds)
: GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
{
gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
for (unsigned char i = 0; i < rank; i++)
{
gtArrInds[i] = inds[i];
gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
}
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrElem() : GenTree()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
//
// Notes:
// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
// The reason that this node incorporates the check into the effective index computation is
// to avoid duplicating the codegen, as the effective index is required to compute the
// offset anyway.
// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
// /--* <arrObj>
// +--* <index0>
// +--* ArrIndex[i, ]
// with something like:
// /--* <arrObj>
// /--* ArrLowerBound[i, ]
// | /--* <arrObj>
// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
// +--* <index0>
// +--* ArrIndex[i, ]
// Which could, for example, be optimized to the following when known to be within bounds:
// /--* TempForLowerBoundDim0
// +--* <index0>
// +--* - (GT_SUB)
//
struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
GenTree*& ArrObj()
{
return gtOp1;
}
// The index expression - may be any integral expression.
GenTree*& IndexExpr()
{
return gtOp2;
}
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrIndex(var_types type,
GenTree* arrObj,
GenTree* indexExpr,
unsigned char currDim,
unsigned char arrRank,
var_types elemType)
: GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
, gtCurrDim(currDim)
, gtArrRank(arrRank)
, gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeArrIndex() : GenTreeOp()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
// Notes:
// The result of this expression is (gtOffset * dimSize) + gtIndex
// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
// within a loop body where the expression for the 0th dimension may be invariant.
//
// Here is an example of how the tree might look for a two-dimension array reference:
// /--* const 0
// | /--* <arrObj>
// | +--* <index0>
// +--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
struct GenTreeArrOffs : public GenTree
{
GenTree* gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
// will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
GenTree* gtIndex; // The effective index for the current dimension - must be non-negative
// and can be any expression (though it is likely to be either a GenTreeArrIndex,
// node, a lclVar, or a constant).
GenTree* gtArrObj; // The array object - may be any expression producing an Array reference,
// but is likely to be a lclVar.
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrOffs(var_types type,
GenTree* offset,
GenTree* index,
GenTree* arrObj,
unsigned char currDim,
unsigned char rank,
var_types elemType)
: GenTree(GT_ARR_OFFSET, type)
, gtOffset(offset)
, gtIndex(index)
, gtArrObj(arrObj)
, gtCurrDim(currDim)
, gtArrRank(rank)
, gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrOffs() : GenTree()
{
}
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
//
// Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
// Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
// Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
// Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
// Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
// Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
// Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
//
// So, for example:
// 1. Base + Index is legal with Scale==1
// 2. If Index is null, Scale should be zero (or unintialized / unused)
// 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
// "Index*Scale + Offset".
// First operand is base address/pointer
bool HasBase() const
{
return gtOp1 != nullptr;
}
GenTree*& Base()
{
return gtOp1;
}
void SetBase(GenTree* base)
{
gtOp1 = base;
}
// Second operand is scaled index value
bool HasIndex() const
{
return gtOp2 != nullptr;
}
GenTree*& Index()
{
return gtOp2;
}
void SetIndex(GenTree* index)
{
gtOp2 = index;
}
unsigned GetScale() const
{
return gtScale;
}
void SetScale(unsigned scale)
{
gtScale = scale;
}
int Offset()
{
return static_cast<int>(gtOffset);
}
void SetOffset(int offset)
{
gtOffset = offset;
}
unsigned gtScale; // The scale factor
private:
ssize_t gtOffset; // The offset to add
public:
GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
: GenTreeOp(GT_LEA, type, base, index)
{
assert(base != nullptr || index != nullptr);
gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeAddrMode() : GenTreeOp()
{
}
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
struct GenTreeIndir : public GenTreeOp
{
// The address for the indirection.
GenTree*& Addr()
{
return gtOp1;
}
void SetAddr(GenTree* addr)
{
assert(addr != nullptr);
assert(addr->TypeIs(TYP_I_IMPL, TYP_BYREF));
gtOp1 = addr;
}
// these methods provide an interface to the indirection node which
bool HasBase();
bool HasIndex();
GenTree* Base();
GenTree* Index();
unsigned Scale();
ssize_t Offset();
GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
// True if this indirection is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_IND_VOLATILE) != 0;
}
// True if this indirection is an unaligned memory operation.
bool IsUnaligned() const
{
return (gtFlags & GTF_IND_UNALIGNED) != 0;
}
#if DEBUGGABLE_GENTREE
// Used only for GenTree::GetVtableForOper()
GenTreeIndir() : GenTreeOp()
{
}
#else
// Used by XARCH codegen to construct temporary trees to pass to the emitter.
GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
{
}
#endif
};
// gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
//
// This is the base type for all of the nodes that represent block or struct
// values.
// Since it can be a store, it includes gtBlkOpKind to specify the type of
// code generation that will be used for the block operation.
struct GenTreeBlk : public GenTreeIndir
{
private:
ClassLayout* m_layout;
public:
ClassLayout* GetLayout() const
{
return m_layout;
}
void SetLayout(ClassLayout* layout)
{
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
m_layout = layout;
}
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
return gtOp2;
}
void SetData(GenTree* dataNode)
{
gtOp2 = dataNode;
}
// The size of the buffer to be copied.
unsigned Size() const
{
assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
enum
{
BlkOpKindInvalid,
#ifndef TARGET_X86
BlkOpKindHelper,
#endif
#ifdef TARGET_XARCH
BlkOpKindRepInstr,
#endif
BlkOpKindUnroll,
} gtBlkOpKind;
#ifndef JIT32_GCENCODER
bool gtBlkOpGcUnsafe;
#endif
#ifdef TARGET_XARCH
bool IsOnHeapAndContainsReferences()
{
return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
}
#endif
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeBlk() : GenTreeIndir()
{
}
#endif // DEBUGGABLE_GENTREE
};
// gtObj -- 'object' (GT_OBJ).
//
// This node is used for block values that may have GC pointers.
struct GenTreeObj : public GenTreeBlk
{
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
GenTreeLclVarCommon* lcl = Addr()->IsLocalAddrExpr();
if ((lcl == nullptr) || ((lcl->gtFlags & GTF_GLOB_EFFECT) != 0))
{
gtFlags |= GTF_GLOB_REF;
}
noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
#if DEBUGGABLE_GENTREE
GenTreeObj() : GenTreeBlk()
{
}
#endif
};
// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
//
// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
// IL instructions are implemented with it. Note that such stores assume the input has no GC
// pointers in it, and as such do not ever use write barriers.
//
// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
// the zero constant/INIT_VAL for "initblk".
//
struct GenTreeStoreDynBlk : public GenTreeBlk
{
public:
GenTree* gtDynamicSize;
GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
: GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
{
// Conservatively the 'dstAddr' could be null or point into the global heap.
// Likewise, this is a store and so must be marked with the GTF_ASG flag.
gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeStoreDynBlk() : GenTreeBlk()
{
}
#endif // DEBUGGABLE_GENTREE
};
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
// Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
};
#ifdef DEBUG
inline const char* RMWStatusDescription(RMWStatus status)
{
switch (status)
{
case STOREIND_RMW_STATUS_UNKNOWN:
return "RMW status unknown";
case STOREIND_RMW_DST_IS_OP1:
return "dst candidate is op1";
case STOREIND_RMW_DST_IS_OP2:
return "dst candidate is op2";
case STOREIND_RMW_UNSUPPORTED_ADDR:
return "address mode is not supported";
case STOREIND_RMW_UNSUPPORTED_OPER:
return "oper is not supported";
case STOREIND_RMW_UNSUPPORTED_TYPE:
return "type is not supported";
case STOREIND_RMW_INDIR_UNEQUAL:
return "read indir is not equivalent to write indir";
default:
unreached();
}
}
#endif
// StoreInd is just a BinOp, with additional RMW status
struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
RMWStatus gtRMWStatus;
bool IsRMWStatusUnknown()
{
return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
}
bool IsNonRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
}
bool IsRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
bool IsRMWDstOp1()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
bool IsRMWDstOp2()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
#endif //! CPU_LOAD_STORE_ARCH
RMWStatus GetRMWStatus()
{
#if !CPU_LOAD_STORE_ARCH
return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatus(RMWStatus status)
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = status;
#endif
}
GenTree*& Data()
{
return gtOp2;
}
GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeStoreInd() : GenTreeIndir()
{
SetRMWStatusDefault();
}
#endif
};
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
struct GenTreeRetExpr : public GenTree
{
GenTree* gtInlineCandidate;
BasicBlockFlags bbFlags;
CORINFO_CLASS_HANDLE gtRetClsHnd;
GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
{
}
#if DEBUGGABLE_GENTREE
GenTreeRetExpr() : GenTree()
{
}
#endif
};
// In LIR there are no longer statements so debug information is inserted linearly using these nodes.
struct GenTreeILOffset : public GenTree
{
DebugInfo gtStmtDI; // debug info
#ifdef DEBUG
IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
: GenTree(GT_IL_OFFSET, TYP_VOID)
, gtStmtDI(di)
#ifdef DEBUG
, gtStmtLastILoffs(lastOffset)
#endif
{
}
#if DEBUGGABLE_GENTREE
GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
{
}
#endif
};
// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
// using range-based `for`, normally used via Statement::TreeList(), e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
class GenTreeList
{
GenTree* m_trees;
// Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
//
class iterator
{
GenTree* m_tree;
public:
iterator(GenTree* tree) : m_tree(tree)
{
}
GenTree* operator*() const
{
return m_tree;
}
iterator& operator++()
{
m_tree = m_tree->gtNext;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_tree != i.m_tree;
}
};
public:
GenTreeList(GenTree* trees) : m_trees(trees)
{
}
iterator begin() const
{
return iterator(m_trees);
}
iterator end() const
{
return iterator(nullptr);
}
};
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
struct Statement
{
public:
Statement(GenTree* expr DEBUGARG(unsigned stmtID))
: m_rootNode(expr)
, m_treeList(nullptr)
, m_next(nullptr)
, m_prev(nullptr)
#ifdef DEBUG
, m_lastILOffset(BAD_IL_OFFSET)
, m_stmtID(stmtID)
#endif
{
}
GenTree* GetRootNode() const
{
return m_rootNode;
}
GenTree** GetRootNodePointer()
{
return &m_rootNode;
}
void SetRootNode(GenTree* treeRoot)
{
m_rootNode = treeRoot;
}
GenTree* GetTreeList() const
{
return m_treeList;
}
void SetTreeList(GenTree* treeHead)
{
m_treeList = treeHead;
}
// TreeList: convenience method for enabling range-based `for` iteration over the
// execution order of the GenTree linked list, e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
GenTreeList TreeList() const
{
return GenTreeList(GetTreeList());
}
const DebugInfo& GetDebugInfo() const
{
return m_debugInfo;
}
void SetDebugInfo(const DebugInfo& di)
{
m_debugInfo = di;
di.Validate();
}
#ifdef DEBUG
IL_OFFSET GetLastILOffset() const
{
return m_lastILOffset;
}
void SetLastILOffset(IL_OFFSET lastILOffset)
{
m_lastILOffset = lastILOffset;
}
unsigned GetID() const
{
return m_stmtID;
}
#endif // DEBUG
Statement* GetNextStmt() const
{
return m_next;
}
void SetNextStmt(Statement* nextStmt)
{
m_next = nextStmt;
}
Statement* GetPrevStmt() const
{
return m_prev;
}
void SetPrevStmt(Statement* prevStmt)
{
m_prev = prevStmt;
}
bool IsPhiDefnStmt() const
{
return m_rootNode->IsPhiDefn();
}
unsigned char GetCostSz() const
{
return m_rootNode->GetCostSz();
}
unsigned char GetCostEx() const
{
return m_rootNode->GetCostEx();
}
private:
// The root of the expression tree.
// Note: It will be the last node in evaluation order.
GenTree* m_rootNode;
// The tree list head (for forward walks in evaluation order).
// The value is `nullptr` until we have set the sequencing of the nodes.
GenTree* m_treeList;
// The statement nodes are doubly-linked. The first statement node in a block points
// to the last node in the block via its `m_prev` link. Note that the last statement node
// does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
Statement* m_next;
Statement* m_prev;
DebugInfo m_debugInfo;
#ifdef DEBUG
IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
unsigned m_stmtID;
#endif
};
// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
// normally used via BasicBlock::Statements(), e.g.:
// for (Statement* const stmt : block->Statements()) ...
// or:
// for (Statement* const stmt : block->NonPhiStatements()) ...
//
class StatementList
{
Statement* m_stmts;
// Forward iterator for the statement linked list.
//
class iterator
{
Statement* m_stmt;
public:
iterator(Statement* stmt) : m_stmt(stmt)
{
}
Statement* operator*() const
{
return m_stmt;
}
iterator& operator++()
{
m_stmt = m_stmt->GetNextStmt();
return *this;
}
bool operator!=(const iterator& i) const
{
return m_stmt != i.m_stmt;
}
};
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
iterator begin() const
{
return iterator(m_stmts);
}
iterator end() const
{
return iterator(nullptr);
}
};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
/* AsClsVar() -- 'static data member' (GT_CLS_VAR) */
struct GenTreeClsVar : public GenTree
{
CORINFO_FIELD_HANDLE gtClsVarHnd;
FieldSeqNode* gtFieldSeq;
GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
gtFlags |= GTF_GLOB_REF;
}
GenTreeClsVar(genTreeOps oper, var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(oper, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
assert((oper == GT_CLS_VAR) || (oper == GT_CLS_VAR_ADDR));
gtFlags |= GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeClsVar() : GenTree()
{
}
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
struct GenTreeArgPlace : public GenTree
{
CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArgPlace() : GenTree()
{
}
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
struct GenTreePhiArg : public GenTreeLclVarCommon
{
BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
: GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
{
SetSsaNum(ssaNum);
}
#if DEBUGGABLE_GENTREE
GenTreePhiArg() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
struct GenTreePutArgStk : public GenTreeUnOp
{
private:
unsigned m_byteOffset;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
#endif
public:
#if defined(DEBUG_ARG_SLOTS)
unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#endif
#endif
#if defined(UNIX_X86_ABI)
unsigned gtPadAlign; // Number of padding slots for stack alignment
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
GenTreeCall* gtCall; // the call node to which this argument belongs
#endif
#if FEATURE_FASTTAILCALL
bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
// By default this is false and will be placed in out-going arg area.
// Fast tail calls set this to true.
// In future if we need to add more such bool fields consider bit fields.
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
// block node.
enum class Kind : __int8{
Invalid, RepInstr, PartialRepInstr, Unroll, Push, PushAllSlots,
};
Kind gtPutArgStkKind;
#endif
GenTreePutArgStk(genTreeOps oper,
var_types type,
GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
GenTreeCall* callNode,
bool putInIncomingArgArea)
: GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
, m_byteOffset(stackByteOffset)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, m_byteSize(stackByteSize)
#endif
#if defined(DEBUG_ARG_SLOTS)
, gtSlotNum(slotNum)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtNumSlots(numSlots)
#endif
#endif
#if defined(UNIX_X86_ABI)
, gtPadAlign(0)
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
#endif
#if FEATURE_FASTTAILCALL
, gtPutInIncomingArgArea(putInIncomingArgArea)
#endif // FEATURE_FASTTAILCALL
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtPutArgStkKind(Kind::Invalid)
#endif
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset == slotNum * TARGET_POINTER_SIZE);
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
DEBUG_ARG_SLOTS_ASSERT(m_byteSize == gtNumSlots * TARGET_POINTER_SIZE);
#endif
}
GenTree*& Data()
{
return gtOp1;
}
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return gtPutInIncomingArgArea;
}
#else // !FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
unsigned getArgOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == gtSlotNum);
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset % TARGET_POINTER_SIZE == 0);
return m_byteOffset;
}
#if defined(UNIX_X86_ABI)
unsigned getArgPadding() const
{
return gtPadAlign;
}
void setArgPadding(unsigned padAlign)
{
gtPadAlign = padAlign;
}
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const
{
return m_byteSize;
}
// Return true if this is a PutArgStk of a SIMD12 struct.
// This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
unsigned isSIMD12() const
{
return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
}
bool isPushKind() const
{
return (gtPutArgStkKind == Kind::Push) || (gtPutArgStkKind == Kind::PushAllSlots);
}
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const;
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if DEBUGGABLE_GENTREE
GenTreePutArgStk() : GenTreeUnOp()
{
}
#endif
};
#if FEATURE_ARG_SPLIT
// Represent the struct argument: split value in register(s) and stack
struct GenTreePutArgSplit : public GenTreePutArgStk
{
unsigned gtNumRegs;
GenTreePutArgSplit(GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
unsigned numRegs,
GenTreeCall* callNode,
bool putIncomingArgArea)
: GenTreePutArgStk(GT_PUTARG_SPLIT,
TYP_STRUCT,
op1,
stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
numSlots,
#endif
#endif
callNode,
putIncomingArgArea)
, gtNumRegs(numRegs)
{
ClearOtherRegs();
ClearOtherRegFlags();
}
// Type required to support multi-reg struct arg.
var_types m_regType[MAX_REG_ARG];
// First reg of struct is always given by GetRegNum().
// gtOtherRegs holds the other reg numbers of struct.
regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
MultiRegSpillFlags gtSpillFlags;
//---------------------------------------------------------------------------
// GetRegNumByIdx: get ith register allocated to this struct argument.
//
// Arguments:
// idx - index of the struct
//
// Return Value:
// Return regNumber of ith register of this struct argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
return GetRegNum();
}
return (regNumber)gtOtherRegs[idx - 1];
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set ith register of this struct argument
//
// Arguments:
// reg - reg number
// idx - index of the struct
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
SetRegNum(reg);
}
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index) const
{
assert(index < gtNumRegs);
var_types result = m_regType[index];
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreePutArgSplit() : GenTreePutArgStk()
{
}
#endif
};
#endif // FEATURE_ARG_SPLIT
// Represents GT_COPY or GT_RELOAD node
//
// As it turns out, these are only needed on targets that happen to have multi-reg returns.
// However, they are actually needed on any target that has any multi-reg ops. It is just
// coincidence that those are the same (and there isn't a FEATURE_MULTIREG_OPS).
//
struct GenTreeCopyOrReload : public GenTreeUnOp
{
#if FEATURE_MULTIREG_RET
// State required to support copy/reload of a multi-reg call node.
// The first register is always given by GetRegNum().
//
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
#endif
//----------------------------------------------------------
// ClearOtherRegs: set gtOtherRegs to REG_NA.
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//-----------------------------------------------------------
// GetRegNumByIdx: Get regNumber of ith position.
//
// Arguments:
// idx - register position.
//
// Return Value:
// Returns regNumber assigned to ith position.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//-----------------------------------------------------------
// SetRegNumByIdx: Set the regNumber for ith position.
//
// Arguments:
// reg - reg number
// idx - register position.
//
// Return Value:
// None.
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
else
{
unreached();
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
// node.
//
// Arguments:
// from - GenTree node from which to copy multi-reg state
//
// Return Value:
// None
//
// TODO-ARM: Implement this routine for Arm64 and Arm32
// TODO-X86: Implement this routine for x86
void CopyOtherRegs(GenTreeCopyOrReload* from)
{
assert(OperGet() == from->OperGet());
#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
}
#endif
}
unsigned GetRegCount() const
{
#if FEATURE_MULTIREG_RET
// We need to return the highest index for which we have a valid register.
// Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
// If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
// Note that for most nodes, the set of valid registers must be contiguous,
// but for COPY or RELOAD there is only a valid register for the register positions
// that must be copied or reloaded.
//
for (unsigned i = MAX_RET_REG_COUNT; i > 1; i--)
{
if (gtOtherRegs[i - 2] != REG_NA)
{
return i;
}
}
#endif
// We should never have a COPY or RELOAD with no valid registers.
assert(GetRegNum() != REG_NA);
return 1;
}
GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
assert(type != TYP_STRUCT || op1->IsMultiRegNode());
SetRegNum(REG_NA);
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
GenTreeCopyOrReload() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_ALLOCOBJ node
struct GenTreeAllocObj final : public GenTreeUnOp
{
unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
bool gtHelperHasSideEffects;
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeAllocObj(
var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
: GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
, // This node in most cases will be changed to a call node
gtNewHelper(helper)
, gtHelperHasSideEffects(helperHasSideEffects)
, gtAllocObjClsHnd(clsHnd)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeAllocObj() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_RUNTIMELOOKUP node
struct GenTreeRuntimeLookup final : public GenTreeUnOp
{
CORINFO_GENERIC_HANDLE gtHnd;
CorInfoGenericHandleType gtHndType;
GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
: GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
{
assert(hnd != nullptr);
}
#if DEBUGGABLE_GENTREE
GenTreeRuntimeLookup() : GenTreeUnOp()
{
}
#endif
// Return reference to the actual tree that does the lookup
GenTree*& Lookup()
{
return gtOp1;
}
bool IsClassHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_CLASS;
}
bool IsMethodHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_METHOD;
}
bool IsFieldHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_FIELD;
}
// Note these operations describe the handle that is input to the
// lookup, not the handle produced by the lookup.
CORINFO_CLASS_HANDLE GetClassHandle() const
{
assert(IsClassHandle());
return (CORINFO_CLASS_HANDLE)gtHnd;
}
CORINFO_METHOD_HANDLE GetMethodHandle() const
{
assert(IsMethodHandle());
return (CORINFO_METHOD_HANDLE)gtHnd;
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(IsMethodHandle());
return (CORINFO_FIELD_HANDLE)gtHnd;
}
};
// Represents the condition of a GT_JCC or GT_SETCC node.
struct GenCondition
{
// clang-format off
enum Code : unsigned char
{
OperMask = 7,
Unsigned = 8,
Unordered = Unsigned,
Float = 16,
// 0 would be the encoding of "signed EQ" but since equality is sign insensitive
// we'll use 0 as invalid/uninitialized condition code. This will also leave 1
// as a spare code.
NONE = 0,
SLT = 2,
SLE = 3,
SGE = 4,
SGT = 5,
S = 6,
NS = 7,
EQ = Unsigned | 0, // = 8
NE = Unsigned | 1, // = 9
ULT = Unsigned | SLT, // = 10
ULE = Unsigned | SLE, // = 11
UGE = Unsigned | SGE, // = 12
UGT = Unsigned | SGT, // = 13
C = Unsigned | S, // = 14
NC = Unsigned | NS, // = 15
FEQ = Float | 0, // = 16
FNE = Float | 1, // = 17
FLT = Float | SLT, // = 18
FLE = Float | SLE, // = 19
FGE = Float | SGE, // = 20
FGT = Float | SGT, // = 21
O = Float | S, // = 22
NO = Float | NS, // = 23
FEQU = Unordered | FEQ, // = 24
FNEU = Unordered | FNE, // = 25
FLTU = Unordered | FLT, // = 26
FLEU = Unordered | FLE, // = 27
FGEU = Unordered | FGE, // = 28
FGTU = Unordered | FGT, // = 29
P = Unordered | O, // = 30
NP = Unordered | NO, // = 31
};
// clang-format on
private:
Code m_code;
public:
Code GetCode() const
{
return m_code;
}
bool IsFlag() const
{
return (m_code & OperMask) >= S;
}
bool IsUnsigned() const
{
return (ULT <= m_code) && (m_code <= UGT);
}
bool IsFloat() const
{
return !IsFlag() && (m_code & Float) != 0;
}
bool IsUnordered() const
{
return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
}
bool Is(Code cond) const
{
return m_code == cond;
}
template <typename... TRest>
bool Is(Code c, TRest... rest) const
{
return Is(c) || Is(rest...);
}
// Indicate whether the condition should be swapped in order to avoid generating
// multiple branches. This happens for certain floating point conditions on XARCH,
// see GenConditionDesc and its associated mapping table for more details.
bool PreferSwap() const
{
#ifdef TARGET_XARCH
return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
#else
return false;
#endif
}
const char* Name() const
{
// clang-format off
static const char* names[]
{
"NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
"UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
"FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
"FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
};
// clang-format on
assert(m_code < ArrLen(names));
return names[m_code];
}
GenCondition() : m_code()
{
}
GenCondition(Code cond) : m_code(cond)
{
}
static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
static GenCondition FromRelop(GenTree* relop)
{
assert(relop->OperIsCompare());
if (varTypeIsFloating(relop->gtGetOp1()))
{
return FromFloatRelop(relop);
}
else
{
return FromIntegralRelop(relop);
}
}
static GenCondition FromFloatRelop(GenTree* relop)
{
assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
}
static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
{
assert(GenTree::OperIsCompare(oper));
unsigned code = oper - GT_EQ;
assert(code <= SGT);
code |= Float;
if (isUnordered)
{
code |= Unordered;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition FromIntegralRelop(GenTree* relop)
{
assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
}
static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
{
assert(GenTree::OperIsCompare(oper));
// GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE
unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ);
if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
{
code |= Unsigned;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition Reverse(GenCondition condition)
{
// clang-format off
static const Code reverse[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
NE, EQ, UGE, UGT, ULT, ULE, NC, C,
FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
};
// clang-format on
assert(condition.m_code < ArrLen(reverse));
return GenCondition(reverse[condition.m_code]);
}
static GenCondition Swap(GenCondition condition)
{
// clang-format off
static const Code swap[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
EQ, NE, UGT, UGE, ULE, ULT, C, NC,
FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
};
// clang-format on
assert(condition.m_code < ArrLen(swap));
return GenCondition(swap[condition.m_code]);
}
};
// Represents a GT_JCC or GT_SETCC node.
struct GenTreeCC final : public GenTree
{
GenCondition gtCondition;
GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID)
: GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIs(GT_JCC, GT_SETCC));
}
#if DEBUGGABLE_GENTREE
GenTreeCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
};
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
inline bool GenTree::OperIsBlkOp()
{
return ((gtOper == GT_ASG) && varTypeIsStruct(AsOp()->gtOp1)) || OperIsStoreBlk();
}
inline bool GenTree::OperIsInitBlkOp()
{
if (!OperIsBlkOp())
{
return false;
}
GenTree* src;
if (gtOper == GT_ASG)
{
src = gtGetOp2();
}
else
{
src = AsBlk()->Data()->gtSkipReloadOrCopy();
}
return src->OperIsInitVal() || src->OperIsConst();
}
inline bool GenTree::OperIsCopyBlkOp()
{
return OperIsBlkOp() && !OperIsInitBlkOp();
}
//------------------------------------------------------------------------
// IsFPZero: Checks whether this is a floating point constant with value 0.0
//
// Return Value:
// Returns true iff the tree is an GT_CNS_DBL, with value of 0.0.
inline bool GenTree::IsFPZero() const
{
if ((gtOper == GT_CNS_DBL) && (AsDblCon()->gtDconVal == 0.0))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsIntegralConst: Checks whether this is a constant node with the given value
//
// Arguments:
// constVal - the value of interest
//
// Return Value:
// Returns true iff the tree is an integral constant opcode, with
// the given value.
//
// Notes:
// Like gtIconVal, the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
{
if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
{
return true;
}
if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
{
return true;
}
return false;
}
//-------------------------------------------------------------------
// IsIntegralConstVector: returns true if this this is a SIMD vector
// with all its elements equal to an integral constant.
//
// Arguments:
// constVal - const value of vector element
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsIntegralConstVector(ssize_t constVal) const
{
#ifdef FEATURE_SIMD
// SIMDIntrinsicInit intrinsic with a const value as initializer
// represents a const vector.
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit) &&
AsSIMD()->Op(1)->IsIntegralConst(constVal))
{
assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(AsSIMD()->GetOperandCount() == 1);
return true;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
if ((node->GetOperandCount() == 0) && (constVal == 0))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
else if ((node->GetOperandCount() == 1) && node->Op(1)->IsIntegralConst(constVal))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_Create) || (intrinsicId == NI_Vector256_Create);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_Create) || (intrinsicId == NI_Vector128_Create);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//-------------------------------------------------------------------
// IsSIMDZero: returns true if this this is a SIMD vector
// with all its elements equal to zero.
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsSIMDZero() const
{
#ifdef FEATURE_SIMD
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit))
{
return (AsSIMD()->Op(1)->IsIntegralConst(0) || AsSIMD()->Op(1)->IsFPZero());
}
#endif
return false;
}
//-------------------------------------------------------------------
// IsFloatPositiveZero: returns true if this is exactly a const float value of postive zero (+0.0)
//
// Returns:
// True if this represents a const floating-point value of exactly positive zero (+0.0).
// Will return false if the value is negative zero (-0.0).
//
inline bool GenTree::IsFloatPositiveZero() const
{
if (IsCnsFltOrDbl())
{
// This implementation is almost identical to IsCnsNonZeroFltOrDbl
// but it is easier to parse out
// rather than using !IsCnsNonZeroFltOrDbl.
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue == 0;
}
return false;
}
//-------------------------------------------------------------------
// IsVectorZero: returns true if this node is a HWIntrinsic that is Vector*_get_Zero.
//
// Returns:
// True if this represents a HWIntrinsic node that is Vector*_get_Zero.
//
// TODO: We already have IsSIMDZero() and IsIntegralConstVector(0),
// however, IsSIMDZero() does not cover hardware intrinsics, and IsIntegralConstVector(0) does not cover floating
// point. In order to not risk adverse behaviour by modifying those, this function 'IsVectorZero' was introduced.
// At some point, it makes sense to normalize this logic to be a single function call rather than have several
// separate ones; preferably this one.
inline bool GenTree::IsVectorZero() const
{
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
inline bool GenTree::IsBoxedValue()
{
assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
#ifdef DEBUG
//------------------------------------------------------------------------
// IsValidCallArgument: Given an GenTree node that represents an argument
// enforce (or don't enforce) the following invariant.
//
// Arguments:
// instance method for a GenTree node
//
// Return values:
// true: the GenTree node is accepted as a valid argument
// false: the GenTree node is not accepted as a valid argumeny
//
// Notes:
// For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
//
// Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes the current ARM64 target),
// or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
// These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
inline bool GenTree::IsValidCallArgument()
{
if (OperIs(GT_FIELD_LIST))
{
#if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
return false;
#else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
// We allow this GT_FIELD_LIST as an argument
return true;
#endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
}
// We don't have either kind of list, so it satisfies the invariant.
return true;
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp1() const
{
return AsOp()->gtOp1;
}
#ifdef DEBUG
/* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
case GT_ADD:
case GT_SUB:
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
case GT_INDEX:
case GT_ASG:
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
return true;
default:
return false;
}
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp2() const
{
assert(OperIsBinary());
GenTree* op2 = AsOp()->gtOp2;
// Only allow null op2 if the node type allows it, e.g. GT_LEA.
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtGetOp2IfPresent() const
{
/* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
// This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
// This helps prefix in its analysis of code which calls gtGetOp2()
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
effectiveVal = effectiveVal->AsOp()->gtOp1;
}
else
{
return effectiveVal;
}
}
}
//-------------------------------------------------------------------------
// gtCommaAssignVal - find value being assigned to a comma wrapped assigment
//
// Returns:
// tree representing value being assigned if this tree represents a
// comma-wrapped local definition and use.
//
// original tree, of not.
//
inline GenTree* GenTree::gtCommaAssignVal()
{
GenTree* result = this;
if (OperIs(GT_COMMA))
{
GenTree* commaOp1 = AsOp()->gtOp1;
GenTree* commaOp2 = AsOp()->gtOp2;
if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_ASG))
{
GenTree* asgOp1 = commaOp1->AsOp()->gtOp1;
GenTree* asgOp2 = commaOp1->AsOp()->gtOp2;
if (asgOp1->OperIs(GT_LCL_VAR) && (asgOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
{
result = asgOp2;
}
}
}
return result;
}
//-------------------------------------------------------------------------
// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
//
// Returns:
// the original tree or its child if it was a PUTARG_TYPE.
//
// Notes:
// PUTARG_TYPE should be skipped when we are doing transformations
// that are not affected by ABI, for example: inlining, implicit byref morphing.
//
inline GenTree* GenTree::gtSkipPutArgType()
{
if (OperIs(GT_PUTARG_TYPE))
{
GenTree* res = AsUnOp()->gtGetOp1();
assert(!res->OperIs(GT_PUTARG_TYPE));
return res;
}
return this;
}
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
{
assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
return gtGetOp1();
}
return this;
}
//-----------------------------------------------------------------------------------
// IsMultiRegCall: whether a call node returning its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register returning call
inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
return AsCall()->HasMultiRegRetVal();
}
return false;
}
inline bool GenTree::IsMultiRegLclVar() const
{
if (OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
return AsLclVar()->IsMultiReg();
}
return false;
}
//-----------------------------------------------------------------------------------
// GetRegByIndex: Get a specific register, based on regIndex, that is produced
// by this node.
//
// Arguments:
// regIndex - which register to return (must be 0 for non-multireg nodes)
//
// Return Value:
// The register, if any, assigned to this index for this node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline regNumber GenTree::GetRegByIndex(int regIndex)
{
if (regIndex == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegNumByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegNumByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegNumByIdx(regIndex);
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegNumByIdx(regIndex);
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIs(GT_HWINTRINSIC))
{
assert(regIndex == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
return AsHWIntrinsic()->GetOtherReg();
}
#endif // FEATURE_HW_INTRINSICS
if (OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
return AsLclVar()->GetRegNumByIdx(regIndex);
}
assert(!"Invalid regIndex for GetRegFromMultiRegNode");
return REG_NA;
}
//-----------------------------------------------------------------------------------
// GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// regIndex - which register type to return
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg node that is *not* a copy or reload (which must retrieve the
// type from its source), and 'regIndex' must be a valid index for this node.
//
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline var_types GenTree::GetRegTypeByIndex(int regIndex)
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegType(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegType(regIndex);
}
#endif
#endif // FEATURE_MULTIREG_RET
if (OperIsHWIntrinsic())
{
assert(TypeGet() == TYP_STRUCT);
#ifdef TARGET_ARM64
if (AsHWIntrinsic()->GetSimdSize() == 16)
{
return TYP_SIMD16;
}
else
{
assert(AsHWIntrinsic()->GetSimdSize() == 8);
return TYP_SIMD8;
}
#elif defined(TARGET_XARCH)
// At this time, the only multi-reg HW intrinsics all return the type of their
// arguments. If this changes, we will need a way to record or determine this.
return gtGetOp1()->TypeGet();
#endif
}
if (OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
if (TypeGet() == TYP_LONG)
{
return TYP_INT;
}
assert(TypeGet() == TYP_STRUCT);
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register type for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
// on GenTreeLclVar.
assert(!"GetRegTypeByIndex for LclVar");
}
assert(!"Invalid node type for GetRegTypeByIndex");
return TYP_UNDEF;
}
//-----------------------------------------------------------------------------------
// GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
// for this multi-reg node.
//
// Arguments:
// regIndex - which register's spill flags to return
//
// Return Value:
// The spill flags (GTF_SPILL GTF_SPILLED) for this register.
//
// Notes:
// This must be a multireg node and 'regIndex' must be a valid index for this node.
// This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
//
inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegSpillFlagByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
}
#endif
#endif // FEATURE_MULTIREG_RET
if (OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
}
assert(!"Invalid node type for GetRegSpillFlagByIdx");
return GTF_EMPTY;
}
//-----------------------------------------------------------------------------------
// GetLastUseBit: Get the last use bit for regIndex
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// The bit to set, clear or query for the last-use of the regIndex'th value.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline GenTreeFlags GenTree::GetLastUseBit(int regIndex)
{
assert(regIndex < 4);
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
static_assert_no_msg((1 << MULTIREG_LAST_USE_SHIFT) == GTF_VAR_MULTIREG_DEATH0);
return (GenTreeFlags)(1 << (MULTIREG_LAST_USE_SHIFT + regIndex));
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of the regIndex'th value
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// true iff this is a last use.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::IsLastUse(int regIndex)
{
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
return (gtFlags & GetLastUseBit(regIndex)) != 0;
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of any value
//
// Return Value:
// true iff this has any last uses (i.e. at any index).
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::HasLastUse()
{
return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
}
//-----------------------------------------------------------------------------------
// SetLastUse: Set the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::SetLastUse(int regIndex)
{
gtFlags |= GetLastUseBit(regIndex);
}
//-----------------------------------------------------------------------------------
// ClearLastUse: Clear the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::ClearLastUse(int regIndex)
{
gtFlags &= ~GetLastUseBit(regIndex);
}
//-------------------------------------------------------------------------
// IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload node.
inline bool GenTree::IsCopyOrReload() const
{
return (gtOper == GT_COPY || gtOper == GT_RELOAD);
}
//-----------------------------------------------------------------------------------
// IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
// call node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload of multi-reg call node.
inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
{
if (IsCopyOrReload())
{
return gtGetOp1()->IsMultiRegCall();
}
return false;
}
inline bool GenTree::IsCnsIntOrI() const
{
return (gtOper == GT_CNS_INT);
}
inline bool GenTree::IsIntegralConst() const
{
#ifdef TARGET_64BIT
return IsCnsIntOrI();
#else // !TARGET_64BIT
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !TARGET_64BIT
}
// Is this node an integer constant that fits in a 32-bit signed integer (INT32)
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
return IsCnsIntOrI() && AsIntCon()->FitsInI32();
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
}
inline bool GenTree::IsCnsFltOrDbl() const
{
return OperGet() == GT_CNS_DBL;
}
inline bool GenTree::IsCnsNonZeroFltOrDbl() const
{
if (OperGet() == GT_CNS_DBL)
{
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue != 0;
}
return false;
}
inline bool GenTree::IsHelperCall()
{
return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
}
inline var_types GenTree::CastFromType()
{
return this->AsCast()->CastOp()->TypeGet();
}
inline var_types& GenTree::CastToType()
{
return this->AsCast()->gtCastType;
}
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
return false;
}
/*****************************************************************************/
#ifndef HOST_64BIT
#include <poppack.h>
#endif
/*****************************************************************************/
const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
VR_INVARIANT = 0x00, // an invariant value
VR_NONE = 0x00,
VR_IND_REF = 0x01, // an object reference
VR_IND_SCL = 0x02, // a non-object reference
VR_GLB_VAR = 0x04, // a global (clsVar)
};
/*****************************************************************************/
#endif // !GENTREE_H
/*****************************************************************************/
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XX This is the node in the semantic tree graph. It represents the operation XX
XX corresponding to the node, and other information during code-gen. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _GENTREE_H_
#define _GENTREE_H_
/*****************************************************************************/
#include "vartype.h" // For "var_types"
#include "target.h" // For "regNumber"
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "valuenumtype.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
#include "layout.h"
#include "debuginfo.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
// build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
// making the debugging value supplied by them useless).
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
// The SpecialCodeKind enum is used to indicate the type of special (unique)
// target block that will be targeted by an instruction.
// These are used by:
// GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
// - these nodes have a field (gtThrowKind) to indicate which kind
// GenTreeOps nodes, for which codegen will generate the branch
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
//
enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
SCK_ARITH_EXCPN, // target on arithmetic exception
SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
SCK_COUNT
};
/*****************************************************************************/
enum genTreeOps : BYTE
{
#define GTNODE(en, st, cm, ok) GT_##en,
#include "gtlist.h"
GT_COUNT,
#ifdef TARGET_64BIT
// GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
// For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
GT_CNS_NATIVELONG = GT_CNS_INT,
#else
// For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
// In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
};
// The following enum defines a set of bit flags that can be used
// to classify expression tree nodes.
//
enum GenTreeOperKind
{
GTK_SPECIAL = 0x00, // special operator
GTK_LEAF = 0x01, // leaf operator
GTK_UNOP = 0x02, // unary operator
GTK_BINOP = 0x04, // binary operator
GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
GTK_COMMUTE = 0x08, // commutative operator
GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
// by adding non-node fields to unary or binary operator.
GTK_NOVALUE = 0x20, // node does not produce a value
GTK_MASK = 0xFF
};
// The following enum defines a set of bit flags that describe opers for the purposes
// of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
// making the table for those larger in Release builds. However, it resides in the same
// "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
//
enum GenTreeDebugOperKind
{
DBK_FIRST_FLAG = GTK_MASK + 1,
DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
DBK_MASK = ~GTK_MASK
};
/*****************************************************************************/
enum gtCallTypes : BYTE
{
CT_USER_FUNC, // User function
CT_HELPER, // Jit-helper
CT_INDIRECT, // Indirect call
CT_COUNT // fake entry (must be last)
};
#ifdef DEBUG
/*****************************************************************************
*
* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
* The values are such that they don't overlap with helper's or user function's handle.
*/
enum TargetHandleType : BYTE
{
THT_Unknown = 2,
THT_GSCookieCheck = 4,
THT_SetGSCookie = 6,
THT_IntializeArrayIntrinsics = 8
};
#endif
/*****************************************************************************/
struct BasicBlock;
enum BasicBlockFlags : unsigned __int64;
struct InlineCandidateInfo;
struct GuardedDevirtualizationCandidateInfo;
struct ClassProfileCandidateInfo;
struct LateDevirtualizationInfo;
typedef unsigned short AssertionIndex;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
//------------------------------------------------------------------------
// GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
//
// Arguments:
// index - 0-based index
// Return Value:
// 1-based AssertionIndex.
inline AssertionIndex GetAssertionIndex(unsigned index)
{
return (AssertionIndex)(index + 1);
}
class AssertionInfo
{
// true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
unsigned short m_isNextEdgeAssertion : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
: m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
public:
AssertionInfo() : AssertionInfo(false, 0)
{
}
AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
{
}
static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
{
// Ignore the edge information if there's no assertion
bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
return AssertionInfo(isNextEdge, assertionIndex);
}
void Clear()
{
m_isNextEdgeAssertion = 0;
m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
{
return m_assertionIndex != NO_ASSERTION_INDEX;
}
AssertionIndex GetAssertionIndex() const
{
return m_assertionIndex;
}
bool IsNextEdgeAssertion() const
{
return m_isNextEdgeAssertion;
}
};
/*****************************************************************************/
// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
// represents a (possibly) empty sequence of fields. The fields are in the order
// in which they are dereferenced. The first field may be an object field or a struct field;
// all subsequent fields must be struct fields.
struct FieldSeqNode
{
CORINFO_FIELD_HANDLE m_fieldHnd;
FieldSeqNode* m_next;
FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next) : m_fieldHnd(fieldHnd), m_next(next)
{
}
// returns true when this is the pseudo #FirstElem field sequence
bool IsFirstElemFieldSeq();
// returns true when this is the pseudo #ConstantIndex field sequence
bool IsConstantIndexFieldSeq();
// returns true when this is the the pseudo #FirstElem field sequence or the pseudo #ConstantIndex field sequence
bool IsPseudoField() const;
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(!IsPseudoField() && (m_fieldHnd != nullptr));
return m_fieldHnd;
}
FieldSeqNode* GetTail()
{
FieldSeqNode* tail = this;
while (tail->m_next != nullptr)
{
tail = tail->m_next;
}
return tail;
}
// Make sure this provides methods that allow it to be used as a KeyFuncs type in SimplerHash.
static int GetHashCode(FieldSeqNode fsn)
{
return static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_fieldHnd)) ^
static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
{
return fsn1.m_fieldHnd == fsn2.m_fieldHnd && fsn1.m_next == fsn2.m_next;
}
};
// This class canonicalizes field sequences.
class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
// Dummy variables to provide the addresses for the "pseudo field handle" statics below.
static int FirstElemPseudoFieldStruct;
static int ConstantIndexPseudoFieldStruct;
public:
FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd);
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
static FieldSeqNode* NotAField()
{
return &s_notAField;
}
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
// they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
// are the "NotAField" value, so is the result.
FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
// We have a few "pseudo" field handles:
// This treats the constant offset of the first element of something as if it were a field.
// Works for method table offsets of boxed structs, or first elem offset of arrays/strings.
static CORINFO_FIELD_HANDLE FirstElemPseudoField;
// If there is a constant index, we make a psuedo field to correspond to the constant added to
// offset of the indexed field. This keeps the field sequence structure "normalized", especially in the
// case where the element type is a struct, so we might add a further struct field offset.
static CORINFO_FIELD_HANDLE ConstantIndexPseudoField;
static bool IsPseudoField(CORINFO_FIELD_HANDLE hnd)
{
return hnd == FirstElemPseudoField || hnd == ConstantIndexPseudoField;
}
};
class GenTreeUseEdgeIterator;
class GenTreeOperandIterator;
struct Statement;
/*****************************************************************************/
// Forward declarations of the subtypes
#define GTSTRUCT_0(fn, en) struct GenTree##fn;
#define GTSTRUCT_1(fn, en) struct GenTree##fn;
#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
/*****************************************************************************/
// Don't format the GenTreeFlags declaration
// clang-format off
//------------------------------------------------------------------------
// GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
//
enum GenTreeFlags : unsigned int
{
GTF_EMPTY = 0,
//---------------------------------------------------------------------
// The first set of flags can be used with a large set of nodes, and
// thus they must all have distinct values. That is, one can test any
// expression node for one of these flags.
//---------------------------------------------------------------------
GTF_ASG = 0x00000001, // sub-expression contains an assignment
GTF_CALL = 0x00000002, // sub-expression contains a func. call
GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
// otherwise the C# (run csc /o-) code:
// var v = side_eff_operation
// with no use of `v` will drop your tree on the floor.
GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
GTF_SPILLED = 0x00000080, // the value has been spilled
GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
GTF_USE_FLAGS = 0x00000400, // Indicates that this node uses the flags bits.
GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
GTF_NODE_MASK = GTF_COLON_COND,
GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
// With operators: the specified node is an unsigned operator
GTF_LATE_ARG = 0x00010000, // The specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
GTF_SPILL = 0x00020000, // Needs to be spilled here
// The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
// that we are calling in the context of performing a CSE, thus we
// should allow the run-once side effects of running a class constructor.
//
// The only requirement of this flag is that it not overlap any of the
// side-effect flags. The actual bit used is otherwise arbitrary.
GTF_IS_IN_CSE = GTF_BOOLEAN,
GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
// register assigned to this node, so the code generator does not have to generate
// code to produce the value. It is currently used only on constant nodes.
// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
// it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
//---------------------------------------------------------------------
// The following flags can be used only with a small set of nodes, and
// thus their values need not be distinct (other than within the set
// that goes with a particular node/nodes, of course). That is, one can
// only test for one of these flags if the 'gtOper' value is tested as
// well to make sure it's the right operator for the particular flag.
//---------------------------------------------------------------------
// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags.
// These flags are also used by GT_LCL_FLD, and the last-use (DEATH) flags are also used by GenTreeCopyOrReload.
GTF_VAR_DEF = 0x80000000, // GT_LCL_VAR -- this is a definition
GTF_VAR_USEASG = 0x40000000, // GT_LCL_VAR -- this is a partial definition, a use of the previous definition is implied
// A partial definition usually occurs when a struct field is assigned to (s.f = ...) or
// when a scalar typed variable is assigned to via a narrow store (*((byte*)&i) = ...).
// Last-use bits.
// Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
// so we don't need the equivalent of GTF_VAR_USEASG.
GTF_VAR_MULTIREG_DEATH0 = 0x04000000, // GT_LCL_VAR -- The last-use bit for a lclVar (the first register if it is multireg).
GTF_VAR_DEATH = GTF_VAR_MULTIREG_DEATH0,
GTF_VAR_MULTIREG_DEATH1 = 0x08000000, // GT_LCL_VAR -- The last-use bit for the second register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH2 = 0x10000000, // GT_LCL_VAR -- The last-use bit for the third register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH3 = 0x20000000, // GT_LCL_VAR -- The last-use bit for the fourth register of a multireg lclVar.
GTF_VAR_DEATH_MASK = GTF_VAR_MULTIREG_DEATH0 | GTF_VAR_MULTIREG_DEATH1 | GTF_VAR_MULTIREG_DEATH2 | GTF_VAR_MULTIREG_DEATH3,
// This is the amount we have to shift, plus the regIndex, to get the last use bit we want.
#define MULTIREG_LAST_USE_SHIFT 26
GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
// to/from a multireg source or destination (e.g. a call arg or return, or an op
// that returns its result in multiple registers such as a long multiply).
GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
GTF_VAR_CAST = 0x01000000, // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
GTF_VAR_ITERATOR = 0x00800000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
GTF_VAR_CLONED = 0x00400000, // GT_LCL_VAR -- this node has been cloned or is a clone
GTF_VAR_CONTEXT = 0x00200000, // GT_LCL_VAR -- this node is part of a runtime lookup
GTF_VAR_FOLDED_IND = 0x00100000, // GT_LCL_VAR -- this node was folded from *(typ*)&lclVar expression tree in fgMorphSmpOp()
// where 'typ' is a small type and 'lclVar' corresponds to a normalized-on-store local variable.
// This flag identifies such nodes in order to make sure that fgDoNormalizeOnStore() is called
// on their parents in post-order morph.
// Relevant for inlining optimizations (see fgInlinePrependStatements)
GTF_VAR_ARR_INDEX = 0x00000020, // The variable is part of (the index portion of) an array index expression.
// Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
// For additional flags for GT_CALL node see GTF_CALL_M_*
GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
GTF_FLD_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_FLD_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- field access requires preceding class/static init helper
GTF_INX_RNGCHK = 0x80000000, // GT_INDEX/GT_INDEX_ADDR -- the array reference should be range-checked.
GTF_INX_STRING_LAYOUT = 0x40000000, // GT_INDEX -- this uses the special string array layout
GTF_INX_NOFAULT = 0x20000000, // GT_INDEX -- the INDEX does not throw an exception (morph to GTF_IND_NONFAULTING)
GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
GTF_IND_VOLATILE = 0x40000000, // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
GTF_IND_NONFAULTING = 0x20000000, // Operations for which OperIsIndir() is true -- An indir that cannot fault.
// Same as GTF_ARRLEN_NONFAULTING.
GTF_IND_TGTANYWHERE = 0x10000000, // GT_IND -- the target could be anywhere
GTF_IND_TLS_REF = 0x08000000, // GT_IND -- the target is accessed via TLS
GTF_IND_ASG_LHS = 0x04000000, // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
// assignment; don't evaluate it independently.
GTF_IND_REQ_ADDR_IN_REG = GTF_IND_ASG_LHS, // GT_IND -- requires its addr operand to be evaluated
// into a register. This flag is useful in cases where it
// is required to generate register indirect addressing mode.
// One such case is virtual stub calls on xarch. This is only
// valid in the backend, where GTF_IND_ASG_LHS is not necessary
// (all such indirections will be lowered to GT_STOREIND).
GTF_IND_UNALIGNED = 0x02000000, // GT_IND -- the load or store is unaligned (we assume worst case
// alignment of 1 byte)
GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
GTF_IND_ARR_INDEX = 0x00800000, // GT_IND -- the indirection represents an (SZ) array index
GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
GTF_IND_FLAGS = GTF_IND_VOLATILE | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF | \
GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_NONNULL | GTF_IND_ARR_INDEX | GTF_IND_TGT_NOT_HEAP,
GTF_CLS_VAR_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_CLS_VAR_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_FLD_INITCLASS
GTF_CLS_VAR_ASG_LHS = 0x04000000, // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
// of an assignment; don't evaluate it independently.
GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
// addressing mode
GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
// with explicit "loop test" in the header block.
GTF_RELOP_SJUMP_OPT = 0x04000000, // GT_<relop> -- Swap signed jl/jge with js/jns during emitter, reuses flags
// from previous instruction.
GTF_JCMP_EQ = 0x80000000, // GTF_JCMP_EQ -- Branch on equal rather than not equal
GTF_JCMP_TST = 0x40000000, // GTF_JCMP_TST -- Use bit test instruction rather than compare against zero instruction
GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
// castclass or instanceof?
GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a string handle
GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// the address of the static field itself, and in other cases
// there's an extra layer of indirection and it is the address
// of the cell that the runtime will fill in with the address
// of the static field; in both of those cases, the constant
// is what gets flagged.
GTF_BLK_VOLATILE = GTF_IND_VOLATILE, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is a volatile block operation
GTF_BLK_UNALIGNED = GTF_IND_UNALIGNED, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is an unaligned block operation
GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
// Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proved this check is always in-bounds
GTF_ARRLEN_ARR_IDX = 0x80000000, // GT_ARR_LENGTH -- Length which feeds into an array index expression
GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
// rather than from gtGetStructHandleForHWSIMD.
// Flag used by assertion prop to indicate that a type is a TYP_LONG
#ifdef TARGET_64BIT
GTF_ASSERTION_PROP_LONG = 0x00000001,
#endif // TARGET_64BIT
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
{
return (GenTreeFlags)(~(unsigned int)a);
}
inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
}
// Can any side-effects be observed externally, say by a caller method?
// For assignments, only assignments to global memory can be observed
// externally, whereas simple assignments to local variables can not.
//
// Be careful when using this inside a "try" protected region as the
// order of assignments to local variables would need to be preserved
// wrt side effects if the variables are alive on entry to the
// "catch/finally" region. In such cases, even assignments to locals
// will have to be restricted.
#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
(((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
#if defined(DEBUG)
//------------------------------------------------------------------------
// GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
//
enum GenTreeDebugFlags : unsigned int
{
GTF_DEBUG_NONE = 0x00000000, // No debug flags.
GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
GTF_DEBUG_NODE_SMALL = 0x00000002,
GTF_DEBUG_NODE_LARGE = 0x00000004,
GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
};
inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
{
return (GenTreeDebugFlags)(~(unsigned int)a);
}
inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
#endif // defined(DEBUG)
// clang-format on
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper)
{
return (firstOper + 1) == secondOper;
}
template <typename... Opers>
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper, Opers... otherOpers)
{
return OpersAreContiguous(firstOper, secondOper) && OpersAreContiguous(secondOper, otherOpers...);
}
#ifndef HOST_64BIT
#include <pshpack4.h>
#endif
struct GenTree
{
// We use GT_STRUCT_0 only for the category of simple ops.
#define GTSTRUCT_0(fn, en) \
GenTree##fn* As##fn() \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_N(fn, ...) \
GenTree##fn* As##fn() \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
genTreeOps gtOper; // enum subtype BYTE
var_types gtType; // enum subtype BYTE
genTreeOps OperGet() const
{
return gtOper;
}
var_types TypeGet() const
{
return gtType;
}
#ifdef DEBUG
genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#define NO_CSE (0)
#define IS_CSE_INDEX(x) ((x) != 0)
#define IS_CSE_USE(x) ((x) > 0)
#define IS_CSE_DEF(x) ((x) < 0)
#define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
#define TO_CSE_DEF(x) (-(x))
signed char gtCSEnum; // 0 or the CSE index (negated if def)
// valid only for CSE expressions
unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
AssertionInfo gtAssertionInfo;
bool GeneratesAssertion() const
{
return gtAssertionInfo.HasAssertion();
}
void ClearAssertion()
{
gtAssertionInfo.Clear();
}
AssertionInfo GetAssertionInfo() const
{
return gtAssertionInfo;
}
void SetAssertionInfo(AssertionInfo info)
{
gtAssertionInfo = info;
}
//
// Cost metrics on the node. Don't allow direct access to the variable for setting.
//
public:
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
// Obviously, this information does need to be initialized when a node is created.
// This is public so the dumpers can see it.
bool gtCostsInitialized;
#endif // DEBUG
#define MAX_COST UCHAR_MAX
#define IND_COST_EX 3 // execution cost for an indirection
unsigned char GetCostEx() const
{
assert(gtCostsInitialized);
return _gtCostEx;
}
unsigned char GetCostSz() const
{
assert(gtCostsInitialized);
return _gtCostSz;
}
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
INDEBUG(gtCostsInitialized = true;)
_gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
// Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
// happening.
void CopyCosts(const GenTree* const tree)
{
// If the 'tree' costs aren't initialized, we'll hit an assert below.
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->GetCostEx();
_gtCostSz = tree->GetCostSz();
}
// Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
_gtCostSz = tree->_gtCostSz;
}
private:
unsigned char _gtCostEx; // estimate of expression execution cost
unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
public:
enum genRegTag
{
GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
GT_REGTAG_REG // _gtRegNum has been assigned
};
genRegTag GetRegTag() const
{
assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
return gtRegTag;
}
private:
genRegTag gtRegTag; // What is in _gtRegNum?
#endif // DEBUG
private:
// This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
regNumberSmall _gtRegNum;
// Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
friend struct GenTreeMultiOp;
uint8_t m_operandCount;
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
#endif
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
bool isContainedIndir() const;
bool isIndirAddrMode();
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
bool isIndir() const;
bool isContainedIntOrIImmed() const
{
return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
}
bool isContainedFltOrDblImmed() const
{
return isContained() && (OperGet() == GT_CNS_DBL);
}
bool isLclField() const
{
return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
}
bool isUsedFromSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
bool isMemoryOp() const
{
return isIndir() || isLclField();
}
bool isUsedFromMemory() const
{
return ((isContained() && (isMemoryOp() || (OperGet() == GT_LCL_VAR) || (OperGet() == GT_CNS_DBL))) ||
isUsedFromSpillTemp());
}
bool isLclVarUsedFromMemory() const
{
return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
}
bool isLclFldUsedFromMemory() const
{
return isLclField() && (isContained() || isUsedFromSpillTemp());
}
bool isUsedFromReg() const
{
return !isContained() && !isUsedFromSpillTemp();
}
regNumber GetRegNum() const
{
assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
// and fix everyplace that reads undefined
// values
regNumber reg = (regNumber)_gtRegNum;
assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
// undefined values
(reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
assert(reg >= REG_FIRST && reg <= REG_COUNT);
_gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
void ClearRegNum()
{
_gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
}
// Copy the _gtRegNum/gtRegTag fields
void CopyReg(GenTree* from);
bool gtHasReg(Compiler* comp) const;
int GetRegisterDstCount(Compiler* compiler) const;
regMaskTP gtGetRegMask() const;
GenTreeFlags gtFlags;
#if defined(DEBUG)
GenTreeDebugFlags gtDebugFlags;
#endif // defined(DEBUG)
ValueNumPair gtVNPair;
regMaskSmall gtRsvdRegs; // set of fixed trashed registers
unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
void SetVNsFromNode(GenTree* tree)
{
gtVNPair = tree->gtVNPair;
}
ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
return gtVNPair.GetLiberal();
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.GetConservative();
}
}
void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
return gtVNPair.SetLiberal(vn);
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.SetConservative(vn);
}
}
void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
void ClearVN()
{
gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
GenTree* gtNext;
GenTree* gtPrev;
#ifdef DEBUG
unsigned gtTreeID;
unsigned gtSeqNum; // liveness traversal order within the current statement
int gtUseNum; // use-ordered traversal within the function
#endif
static const unsigned char gtOperKindTable[];
static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
bool IsValue() const
{
if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
{
return false;
}
if (gtType == TYP_VOID)
{
// These are the only operators which can produce either VOID or non-VOID results.
assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsSIMD() ||
OperIsHWIntrinsic());
return false;
}
return true;
}
// LIR flags
// These helper methods, along with the flag values they manipulate, are defined in lir.h
//
// UnusedValue indicates that, although this node produces a value, it is unused.
inline void SetUnusedValue();
inline void ClearUnusedValue();
inline bool IsUnusedValue() const;
// RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
inline bool IsRegOptional() const;
inline void SetRegOptional();
inline void ClearRegOptional();
#ifdef DEBUG
void dumpLIRFlags();
#endif
bool TypeIs(var_types type) const
{
return gtType == type;
}
template <typename... T>
bool TypeIs(var_types type, T... rest) const
{
return TypeIs(type) || TypeIs(rest...);
}
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
{
return operCompare == oper;
}
template <typename... T>
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
{
return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
}
bool OperIs(genTreeOps oper) const
{
return OperGet() == oper;
}
template <typename... T>
bool OperIs(genTreeOps oper, T... rest) const
{
return OperIs(oper) || OperIs(rest...);
}
static bool OperIsConst(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_STR);
}
bool OperIsConst() const
{
return OperIsConst(gtOper);
}
static bool OperIsLeaf(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
bool OperIsLeaf() const
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
static bool OperIsLocal(genTreeOps gtOper)
{
static_assert_no_msg(
OpersAreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
}
static bool OperIsLocalAddr(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
static bool OperIsLocalField(genTreeOps gtOper)
{
return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
static bool OperIsScalarLocal(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
}
static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
static bool OperIsLocalStore(genTreeOps gtOper)
{
return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
static bool OperIsInitVal(genTreeOps gtOper)
{
return (gtOper == GT_INIT_VAL);
}
bool OperIsInitVal() const
{
return OperIsInitVal(OperGet());
}
bool IsConstInitVal() const
{
return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
}
bool OperIsBlkOp();
bool OperIsCopyBlkOp();
bool OperIsInitBlkOp();
static bool OperIsBlk(genTreeOps gtOper)
{
return (gtOper == GT_BLK) || (gtOper == GT_OBJ) || OperIsStoreBlk(gtOper);
}
bool OperIsBlk() const
{
return OperIsBlk(OperGet());
}
static bool OperIsStoreBlk(genTreeOps gtOper)
{
return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYN_BLK);
}
bool OperIsStoreBlk() const
{
return OperIsStoreBlk(OperGet());
}
bool OperIsPutArgSplit() const
{
#if FEATURE_ARG_SPLIT
assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
return gtOper == GT_PUTARG_SPLIT;
#else // !FEATURE_ARG_SPLIT
return false;
#endif
}
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
bool OperIsPutArgStkOrSplit() const
{
return OperIsPutArgStk() || OperIsPutArgSplit();
}
bool OperIsPutArgReg() const
{
return gtOper == GT_PUTARG_REG;
}
bool OperIsPutArg() const
{
return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
}
bool OperIsFieldList() const
{
return OperIs(GT_FIELD_LIST);
}
bool OperIsMultiRegOp() const
{
#if !defined(TARGET_64BIT)
if (OperIs(GT_MUL_LONG))
{
return true;
}
#if defined(TARGET_ARM)
if (OperIs(GT_PUTARG_REG, GT_BITCAST))
{
return true;
}
#endif // TARGET_ARM
#endif // TARGET_64BIT
return false;
}
bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
static bool OperIsCompare(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
}
bool OperIsCompare() const
{
return OperIsCompare(OperGet());
}
static bool OperIsShift(genTreeOps gtOper)
{
return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
bool OperIsShift() const
{
return OperIsShift(OperGet());
}
static bool OperIsShiftLong(genTreeOps gtOper)
{
#ifdef TARGET_64BIT
return false;
#else
return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
#endif
}
bool OperIsShiftLong() const
{
return OperIsShiftLong(OperGet());
}
static bool OperIsRotate(genTreeOps gtOper)
{
return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
}
bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
static bool OperIsMul(genTreeOps gtOper)
{
return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
|| (gtOper == GT_MUL_LONG)
#endif
;
}
bool OperIsMul() const
{
return OperIsMul(gtOper);
}
bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
|| op == GT_UDIV || op == GT_UMOD
|| op == GT_OR || op == GT_XOR || op == GT_AND
|| OperIsShiftOrRotate(op);
}
#ifdef TARGET_XARCH
static bool OperIsRMWMemOp(genTreeOps gtOper)
{
// Return if binary op is one of the supported operations for RMW of memory.
return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
}
bool OperIsRMWMemOp() const
{
// Return if binary op is one of the supported operations for RMW of memory.
return OperIsRMWMemOp(gtOper);
}
#endif // TARGET_XARCH
static bool OperIsUnary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_UNOP) != 0;
}
bool OperIsUnary() const
{
return OperIsUnary(gtOper);
}
static bool OperIsBinary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_BINOP) != 0;
}
bool OperIsBinary() const
{
return OperIsBinary(gtOper);
}
static bool OperIsSimple(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
static bool OperIsSpecial(genTreeOps gtOper)
{
return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
bool OperIsSimple() const
{
return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
bool isCommutativeSIMDIntrinsic();
#else // !
bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool isCommutativeHWIntrinsic() const;
bool isContainableHWIntrinsic() const;
bool isRMWHWIntrinsic(Compiler* comp);
#else
bool isCommutativeHWIntrinsic() const
{
return false;
}
bool isContainableHWIntrinsic() const
{
return false;
}
bool isRMWHWIntrinsic(Compiler* comp)
{
return false;
}
#endif // FEATURE_HW_INTRINSICS
static bool OperIsCommutative(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic()) ||
(OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
}
static bool OperMayOverflow(genTreeOps gtOper)
{
return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
#if !defined(TARGET_64BIT)
|| (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
#endif
);
}
bool OperMayOverflow() const
{
return OperMayOverflow(gtOper);
}
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
static bool OperIsIndir(genTreeOps gtOper)
{
return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || OperIsBlk(gtOper);
}
static bool OperIsIndirOrArrLength(genTreeOps gtOper)
{
return OperIsIndir(gtOper) || (gtOper == GT_ARR_LENGTH);
}
bool OperIsIndir() const
{
return OperIsIndir(gtOper);
}
bool OperIsIndirOrArrLength() const
{
return OperIsIndirOrArrLength(gtOper);
}
bool OperIsImplicitIndir() const;
static bool OperIsAtomicOp(genTreeOps gtOper)
{
switch (gtOper)
{
case GT_XADD:
case GT_XORR:
case GT_XAND:
case GT_XCHG:
case GT_LOCKADD:
case GT_CMPXCHG:
return true;
default:
return false;
}
}
bool OperIsAtomicOp() const
{
return OperIsAtomicOp(gtOper);
}
bool OperIsStore() const
{
return OperIsStore(gtOper);
}
static bool OperIsStore(genTreeOps gtOper)
{
return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
OperIsStoreBlk(gtOper) || OperIsAtomicOp(gtOper));
}
static bool OperIsMultiOp(genTreeOps gtOper)
{
return OperIsSIMD(gtOper) || OperIsHWIntrinsic(gtOper);
}
bool OperIsMultiOp() const
{
return OperIsMultiOp(OperGet());
}
// This is here for cleaner FEATURE_SIMD #ifdefs.
static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
bool OperIsSIMD() const
{
return OperIsSIMD(gtOper);
}
static bool OperIsHWIntrinsic(genTreeOps gtOper)
{
#ifdef FEATURE_HW_INTRINSICS
return gtOper == GT_HWINTRINSIC;
#else
return false;
#endif // FEATURE_HW_INTRINSICS
}
bool OperIsHWIntrinsic() const
{
return OperIsHWIntrinsic(gtOper);
}
bool OperIsSimdOrHWintrinsic() const
{
return OperIsSIMD() || OperIsHWIntrinsic();
}
// This is here for cleaner GT_LONG #ifdefs.
static bool OperIsLong(genTreeOps gtOper)
{
#if defined(TARGET_64BIT)
return false;
#else
return gtOper == GT_LONG;
#endif
}
bool OperIsLong() const
{
return OperIsLong(gtOper);
}
bool OperIsConditionalJump() const
{
return (gtOper == GT_JTRUE) || (gtOper == GT_JCMP) || (gtOper == GT_JCC);
}
#ifdef DEBUG
static const GenTreeDebugOperKind gtDebugOperKindTable[];
static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
{
assert(oper < GT_COUNT);
return gtDebugOperKindTable[oper];
}
GenTreeDebugOperKind DebugOperKind() const
{
return DebugOperKind(OperGet());
}
bool NullOp1Legal() const
{
assert(OperIsSimple());
switch (gtOper)
{
case GT_LEA:
case GT_RETFILT:
case GT_NOP:
case GT_FIELD:
return true;
case GT_RETURN:
return gtType == TYP_VOID;
default:
return false;
}
}
bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
if (!OperIsBinary(gtOper))
{
return true;
}
switch (gtOper)
{
case GT_INTRINSIC:
case GT_LEA:
#if defined(TARGET_ARM)
case GT_PUTARG_REG:
#endif // defined(TARGET_ARM)
return true;
default:
return false;
}
}
bool OperIsLIR() const
{
if (OperIs(GT_NOP))
{
// NOPs may only be present in LIR if they do not produce a value.
return IsNothingNode();
}
return (DebugOperKind() & DBK_NOTLIR) == 0;
}
bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
static bool RequiresNonNullOp2(genTreeOps oper);
bool IsValidCallArgument();
#endif // DEBUG
inline bool IsFPZero() const;
inline bool IsIntegralConst(ssize_t constVal) const;
inline bool IsIntegralConstVector(ssize_t constVal) const;
inline bool IsSIMDZero() const;
inline bool IsFloatPositiveZero() const;
inline bool IsVectorZero() const;
inline bool IsBoxedValue();
inline GenTree* gtGetOp1() const;
// Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
// a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
// node type you have.
inline GenTree* gtGetOp2() const;
// The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
inline GenTree* gtGetOp2IfPresent() const;
bool TryGetUse(GenTree* operand, GenTree*** pUse);
bool TryGetUse(GenTree* operand)
{
GenTree** unusedUse = nullptr;
return TryGetUse(operand, &unusedUse);
}
private:
bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
public:
GenTree* gtGetParent(GenTree*** pUse);
void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
inline GenTree* gtEffectiveVal(bool commaOnly = false);
inline GenTree* gtCommaAssignVal();
// Tunnel through any GT_RET_EXPRs
GenTree* gtRetExprVal(BasicBlockFlags* pbbFlags = nullptr);
inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
inline bool IsMultiRegCall() const;
// Returns true if it is a struct lclVar node residing in multiple registers.
inline bool IsMultiRegLclVar() const;
// Returns true if it is a node returning its value in more than one register
bool IsMultiRegNode() const;
// Returns the number of registers defined by a multireg node.
unsigned GetMultiRegCount(Compiler* comp) const;
// Returns the regIndex'th register defined by a possibly-multireg node.
regNumber GetRegByIndex(int regIndex) const;
// Returns the type of the regIndex'th register defined by a multi-reg node.
var_types GetRegTypeByIndex(int regIndex) const;
// Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
// Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
private:
GenTreeFlags GetLastUseBit(int regIndex) const;
public:
bool IsLastUse(int regIndex) const;
bool HasLastUse() const;
void SetLastUse(int regIndex);
void ClearLastUse(int regIndex);
// Returns true if it is a GT_COPY or GT_RELOAD node
inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
inline bool IsCopyOrReloadOfMultiRegCall() const;
bool OperRequiresAsgFlag();
bool OperRequiresCallFlag(Compiler* comp);
bool OperMayThrow(Compiler* comp);
unsigned GetScaleIndexMul();
unsigned GetScaleIndexShf();
unsigned GetScaledIndex();
public:
static unsigned char s_gtNodeSizes[];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
static unsigned char s_gtTrueSizes[];
#endif
#if COUNT_AST_OPERS
static unsigned s_gtNodeCounts[];
#endif
static void InitNodeSize();
size_t GetNodeSize() const;
bool IsNodeProperlySized() const;
void ReplaceWith(GenTree* src, Compiler* comp);
static genTreeOps ReverseRelop(genTreeOps relop);
static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
//---------------------------------------------------------------------
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* OpName(genTreeOps op);
#endif
#if MEASURE_NODE_SIZE
static const char* OpStructName(genTreeOps op);
#endif
//---------------------------------------------------------------------
bool IsNothingNode() const;
void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
CLEAR_VN, // Clear value number
PRESERVE_VN // Preserve value number
};
void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
// set gtOper and only keep GTF_COMMON_MASK flags
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
void SetOperRaw(genTreeOps oper);
void ChangeType(var_types newType)
{
var_types oldType = gtType;
gtType = newType;
GenTree* node = this;
while (node->gtOper == GT_COMMA)
{
node = node->gtGetOp2();
if (node->gtType != newType)
{
assert(node->gtType == oldType);
node->gtType = newType;
}
}
}
template <typename T>
void BashToConst(T value, var_types type = TYP_UNDEF);
void BashToZeroConst(var_types type);
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
static void ReportOperBashing(FILE* fp);
#else
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{ /* do nothing */
}
static void ReportOperBashing(FILE* fp)
{ /* do nothing */
}
#endif
bool IsLocal() const
{
return OperIsLocal(OperGet());
}
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
// Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
// variable, or just a portion of it.
bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
bool IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset = nullptr);
// Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
// Determine if this tree represents the value of an entire implicit byref parameter,
// and if so return the tree for the parameter.
GenTreeLclVar* IsImplicitByrefParameterValue(Compiler* compiler);
// Determine if this is a LclVarCommon node and return some additional info about it in the
// two out parameters.
bool IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of an array (the child of a GT_IND labeled with GTF_IND_ARR_INDEX).
// Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some
// element).
// Sets "*pArrayType" to the class handle for the array type.
// Sets "*inxVN" to the value number inferred for the array index.
// Sets "*pFldSeq" to the sequence, if any, of struct fields used to index into the array element.
void ParseArrayAddress(
Compiler* comp, struct ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq);
// Helper method for the above.
void ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq);
// Requires "this" to be a GT_IND. Requires the outermost caller to set "*pFldSeq" to nullptr.
// Returns true if it is an array index expression, or access to a (sequence of) struct field(s)
// within a struct array element. If it returns true, sets *arrayInfo to the array information, and sets *pFldSeq
// to the sequence of struct field accesses.
bool ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of a (possible) array element (or struct field within that).
// If it is, sets "*arrayInfo" to the array access info, "*pFldSeq" to the sequence of struct fields
// accessed within the array element, and returns true. If not, returns "false".
bool ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be an int expression. If it is a sequence of one or more integer constants added together,
// returns true and sets "*pFldSeq" to the sequence of fields with which those constants are annotated.
bool ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq);
// Labels "*this" as an array index expression: label all constants and variables that could contribute, as part of
// an affine expression, to the value of the of the index.
void LabelIndex(Compiler* comp, bool isConst = true);
// Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
// statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
// operation. Returns "true" if "this" is an address of (or within)
// a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
}
regNumber GetReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
}
#endif
static bool IsContained(unsigned flags)
{
return ((flags & GTF_CONTAINED) != 0);
}
void SetContained()
{
assert(IsValue());
gtFlags |= GTF_CONTAINED;
assert(isContained());
}
void ClearContained()
{
assert(IsValue());
gtFlags &= ~GTF_CONTAINED;
ClearRegOptional();
}
bool CanCSE() const
{
return ((gtFlags & GTF_DONT_CSE) == 0);
}
void SetDoNotCSE()
{
gtFlags |= GTF_DONT_CSE;
}
void ClearDoNotCSE()
{
gtFlags &= ~GTF_DONT_CSE;
}
bool IsReverseOp() const
{
return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
void SetReverseOp()
{
gtFlags |= GTF_REVERSE_OPS;
}
void ClearReverseOp()
{
gtFlags &= ~GTF_REVERSE_OPS;
}
bool IsUnsigned() const
{
return ((gtFlags & GTF_UNSIGNED) != 0);
}
void SetUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
gtFlags |= GTF_UNSIGNED;
}
void ClearUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
gtFlags &= ~GTF_UNSIGNED;
}
void SetOverflow()
{
assert(OperMayOverflow());
gtFlags |= GTF_OVERFLOW;
}
void ClearOverflow()
{
assert(OperMayOverflow());
gtFlags &= ~GTF_OVERFLOW;
}
bool Is64RsltMul() const
{
return (gtFlags & GTF_MUL_64RSLT) != 0;
}
void Set64RsltMul()
{
gtFlags |= GTF_MUL_64RSLT;
}
void Clear64RsltMul()
{
gtFlags &= ~GTF_MUL_64RSLT;
}
void SetAllEffectsFlags(GenTree* source)
{
SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSouce)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSouce->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTreeFlags sourceFlags)
{
assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
gtFlags &= ~GTF_ALL_EFFECT;
gtFlags |= sourceFlags;
}
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
inline bool IsCnsFltOrDbl() const;
inline bool IsCnsNonZeroFltOrDbl() const;
bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
bool IsIconHandle(GenTreeFlags handleType) const
{
assert(gtOper == GT_CNS_INT);
assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
GenTreeFlags GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
bool IsArgPlaceHolderNode() const
{
return OperGet() == GT_ARGPLACE;
}
bool IsCall() const
{
return OperGet() == GT_CALL;
}
inline bool IsHelperCall();
bool gtOverflow() const;
bool gtOverflowEx() const;
bool gtSetFlags() const;
bool gtRequestSetFlags();
#ifdef DEBUG
static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
#endif
// cast operations
inline var_types CastFromType();
inline var_types& CastToType();
// Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
bool IsPhiNode();
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
// Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
// Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
GenTree()
{
}
// Returns an iterator that will produce the use edge to each operand of this node. Differs
// from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
// and block op nodes.
GenTreeUseEdgeIterator UseEdgesBegin();
GenTreeUseEdgeIterator UseEdgesEnd();
IteratorPair<GenTreeUseEdgeIterator> UseEdges();
// Returns an iterator that will produce each operand of this node, in execution order.
GenTreeOperandIterator OperandsBegin();
GenTreeOperandIterator OperandsEnd();
// Returns a range that will produce the operands of this node in execution order.
IteratorPair<GenTreeOperandIterator> Operands();
enum class VisitResult
{
Abort = false,
Continue = true
};
// Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
// `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
//
// unsigned operandCount = 0;
// node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
// {
// operandCount++;
// return GenTree::VisitResult::Continue;
// });
//
// This function is generally more efficient that the operand iterator and should be preferred over that API for
// hot code, as it affords better opportunities for inlining and acheives shorter dynamic path lengths when
// deciding how operands need to be accessed.
//
// Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
// in HIR if for some reason you need to visit operands in the order in which they will execute.
template <typename TVisitor>
void VisitOperands(TVisitor visitor);
private:
template <typename TVisitor>
void VisitBinOpOperands(TVisitor visitor);
public:
bool Precedes(GenTree* other);
bool IsInvariant() const;
bool IsNeverNegative(Compiler* comp) const;
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
if (IsInvariant() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
return false;
}
void SetReuseRegVal()
{
assert(IsInvariant());
gtFlags |= GTF_REUSE_REG_VAL;
}
void ResetReuseRegVal()
{
assert(IsInvariant());
gtFlags &= ~GTF_REUSE_REG_VAL;
}
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
static void DumpNodeSizes(FILE* fp);
#endif
#ifdef DEBUG
private:
GenTree& operator=(const GenTree& gt)
{
assert(!"Don't copy");
return *this;
}
#endif // DEBUG
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
virtual void DummyVirt()
{
}
typedef void* VtablePtr;
VtablePtr GetVtableForOper(genTreeOps oper);
void SetVtableForOper(genTreeOps oper);
static VtablePtr s_vtablesForOpers[GT_COUNT];
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
// Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
// All PHI_ARG nodes must represent uses of the same local variable and
// the PHI node's type must be the same as the local variable's type.
//
// The PHI node does not represent a definition by itself, it is always
// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR
// node, that is a definition for the same local variable referenced by
// all the used PHI_ARG nodes:
//
// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7)))
//
// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the
// ASG node.
//
// The order of the PHI_ARG uses is not currently relevant and it may be
// the same or not as the order of the predecessor blocks.
//
struct GenTreePhi final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node->OperIs(GT_PHI_ARG));
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node->OperIs(GT_PHI_ARG));
return m_node;
}
void SetNode(GenTree* node)
{
assert(node->OperIs(GT_PHI_ARG));
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtUses;
GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
{
}
UseList Uses()
{
return UseList(gtUses);
}
//--------------------------------------------------------------------------
// Equals: Checks if 2 PHI nodes are equal.
//
// Arguments:
// phi1 - The first PHI node
// phi2 - The second PHI node
//
// Return Value:
// true if the 2 PHI nodes have the same type, number of uses, and the
// uses are equal.
//
// Notes:
// The order of uses must be the same for equality, even if the
// order is not usually relevant and is not guaranteed to reflect
// a particular order of the predecessor blocks.
//
static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
{
if (phi1->TypeGet() != phi2->TypeGet())
{
return false;
}
GenTreePhi::UseIterator i1 = phi1->Uses().begin();
GenTreePhi::UseIterator end1 = phi1->Uses().end();
GenTreePhi::UseIterator i2 = phi2->Uses().begin();
GenTreePhi::UseIterator end2 = phi2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
#if DEBUGGABLE_GENTREE
GenTreePhi() : GenTree()
{
}
#endif
};
// Represents a list of fields constituting a struct, when it is passed as an argument.
//
struct GenTreeFieldList : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
uint16_t m_offset;
var_types m_type;
public:
Use(GenTree* node, unsigned offset, var_types type)
: m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
{
// We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
// only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
// SIMD vector size (32 bytes).
assert(offset <= UINT16_MAX);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
unsigned GetOffset() const
{
return m_offset;
}
var_types GetType() const
{
return m_type;
}
void SetType(var_types type)
{
m_type = type;
}
};
class UseIterator
{
Use* use;
public:
UseIterator(Use* use) : use(use)
{
}
Use& operator*()
{
return *use;
}
Use* operator->()
{
return use;
}
void operator++()
{
use = use->GetNext();
}
bool operator==(const UseIterator& other)
{
return use == other.use;
}
bool operator!=(const UseIterator& other)
{
return use != other.use;
}
};
class UseList
{
Use* m_head;
Use* m_tail;
public:
UseList() : m_head(nullptr), m_tail(nullptr)
{
}
Use* GetHead() const
{
return m_head;
}
UseIterator begin() const
{
return m_head;
}
UseIterator end() const
{
return nullptr;
}
void AddUse(Use* newUse)
{
assert(newUse->GetNext() == nullptr);
if (m_head == nullptr)
{
m_head = newUse;
}
else
{
m_tail->SetNext(newUse);
}
m_tail = newUse;
}
void InsertUse(Use* insertAfter, Use* newUse)
{
assert(newUse->GetNext() == nullptr);
newUse->SetNext(insertAfter->GetNext());
insertAfter->SetNext(newUse);
if (m_tail == insertAfter)
{
m_tail = newUse;
}
}
void Reverse()
{
m_tail = m_head;
m_head = nullptr;
for (Use *next, *use = m_tail; use != nullptr; use = next)
{
next = use->GetNext();
use->SetNext(m_head);
m_head = use;
}
}
bool IsSorted() const
{
unsigned offset = 0;
for (GenTreeFieldList::Use& use : *this)
{
if (use.GetOffset() < offset)
{
return false;
}
offset = use.GetOffset();
}
return true;
}
};
private:
UseList m_uses;
public:
GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
{
SetContained();
}
UseList& Uses()
{
return m_uses;
}
// Add a new field use to the end of the use list and update side effect flags.
void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Add a new field use to the end of the use list without updating side effect flags.
void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use and update side effect flags.
void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use without updating side effect flags.
void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
//--------------------------------------------------------------------------
// Equals: Check if 2 FIELD_LIST nodes are equal.
//
// Arguments:
// list1 - The first FIELD_LIST node
// list2 - The second FIELD_LIST node
//
// Return Value:
// true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
// uses are equal.
//
static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
{
assert(list1->TypeGet() == TYP_STRUCT);
assert(list2->TypeGet() == TYP_STRUCT);
UseIterator i1 = list1->Uses().begin();
UseIterator end1 = list1->Uses().end();
UseIterator i2 = list2->Uses().begin();
UseIterator end2 = list2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
(i1->GetType() != i2->GetType()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
};
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
// they are used.
//
// Operand iteration is common enough in the back end of the compiler that the implementation of this type has
// traded some simplicity for speed:
// - As much work as is reasonable is done in the constructor rather than during operand iteration
// - Node-specific functionality is handled by a small class of "advance" functions called by operator++
// rather than making operator++ itself handle all nodes
// - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
// binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
//
// Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
//
class GenTreeUseEdgeIterator final
{
friend class GenTreeOperandIterator;
friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
enum
{
CALL_INSTANCE = 0,
CALL_ARGS = 1,
CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
CALL_COOKIE = 4,
CALL_ADDRESS = 5,
CALL_TERMINAL = 6,
};
typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
AdvanceFn m_advance;
GenTree* m_node;
GenTree** m_edge;
// Pointer sized state storage, GenTreePhi::Use* or GenTreeCall::Use*
// or the exclusive end/beginning of GenTreeMultiOp's operand array.
void* m_statePtr;
// Integer sized state storage, usually the operand index for non-list based nodes.
int m_state;
GenTreeUseEdgeIterator(GenTree* node);
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
void AdvanceArrOffset();
void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
template <bool ReverseOperands>
void AdvanceBinOp();
void SetEntryStateForBinOp();
// The advance function for call nodes
template <int state>
void AdvanceCall();
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void AdvanceMultiOp();
void AdvanceReversedMultiOp();
void SetEntryStateForMultiOp();
#endif
void Terminate();
public:
GenTreeUseEdgeIterator();
inline GenTree** operator*()
{
assert(m_state != -1);
return m_edge;
}
inline GenTree** operator->()
{
assert(m_state != -1);
return m_edge;
}
inline bool operator==(const GenTreeUseEdgeIterator& other) const
{
if (m_state == -1 || other.m_state == -1)
{
return m_state == other.m_state;
}
return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
(m_state == other.m_state);
}
inline bool operator!=(const GenTreeUseEdgeIterator& other) const
{
return !(operator==(other));
}
GenTreeUseEdgeIterator& operator++();
};
//------------------------------------------------------------------------
// GenTreeOperandIterator: an iterator that will produce each operand of a
// GenTree node in the order in which they are
// used. This uses `GenTreeUseEdgeIterator` under
// the covers.
//
// Note: valid values of this type may be obtained by calling
// `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
class GenTreeOperandIterator final
{
friend GenTreeOperandIterator GenTree::OperandsBegin();
friend GenTreeOperandIterator GenTree::OperandsEnd();
GenTreeUseEdgeIterator m_useEdges;
GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
{
}
public:
GenTreeOperandIterator() : m_useEdges()
{
}
inline GenTree* operator*()
{
return *(*m_useEdges);
}
inline GenTree* operator->()
{
return *(*m_useEdges);
}
inline bool operator==(const GenTreeOperandIterator& other) const
{
return m_useEdges == other.m_useEdges;
}
inline bool operator!=(const GenTreeOperandIterator& other) const
{
return !(operator==(other));
}
inline GenTreeOperandIterator& operator++()
{
++m_useEdges;
return *this;
}
};
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
struct GenTreeUnOp : public GenTree
{
GenTree* gtOp1;
protected:
GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
{
}
GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
{
assert(op1 != nullptr || NullOp1Legal());
if (op1 != nullptr)
{ // Propagate effects flags from child.
gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
}
#if DEBUGGABLE_GENTREE
GenTreeUnOp() : GenTree(), gtOp1(nullptr)
{
}
#endif
};
struct GenTreeOp : public GenTreeUnOp
{
GenTree* gtOp2;
GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
{
// comparisons are always integral types
assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
// Binary operators, with a few exceptions, require a non-nullptr
// second argument.
assert(op2 != nullptr || NullOp2Legal());
// Unary operators, on the other hand, require a null second argument.
assert(!OperIsUnary(oper) || op2 == nullptr);
// Propagate effects flags from child. (UnOp handled this for first child.)
if (op2 != nullptr)
{
gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
}
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
{
// Unary operators with optional arguments:
assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
}
// returns true if we will use the division by constant optimization for this node.
bool UsesDivideByConstOptimized(Compiler* comp);
// checks if we will use the division by constant optimization this node
// then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
void CheckDivideByConstOptimized(Compiler* comp);
// True if this node is marked as using the division by constant optimization
bool MarkedDivideByConstOptimized() const
{
return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
bool IsValidLongMul();
#endif
#if !defined(TARGET_64BIT) && defined(DEBUG)
void DebugCheckLongMul();
#endif
#if DEBUGGABLE_GENTREE
GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
{
}
#endif
// True if this relop is marked for a transform during the emitter
// phase, e.g., jge => jns
bool MarkedForSignJumpOpt() const
{
return (gtFlags & GTF_RELOP_SJUMP_OPT) != 0;
}
};
struct GenTreeVal : public GenTree
{
size_t gtVal1;
GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
{
}
#if DEBUGGABLE_GENTREE
GenTreeVal() : GenTree()
{
}
#endif
};
struct GenTreeIntConCommon : public GenTree
{
inline INT64 LngValue() const;
inline void SetLngValue(INT64 val);
inline ssize_t IconValue() const;
inline void SetIconValue(ssize_t val);
inline INT64 IntegralValue() const;
inline void SetIntegralValue(int64_t value);
template <typename T>
inline void SetValueTruncating(T value);
GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode))
{
}
bool FitsInI8() // IconValue() fits into 8-bit signed storage
{
return FitsInI8(IconValue());
}
static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
{
return (int8_t)val == val;
}
bool FitsInI32() // IconValue() fits into 32-bit signed storage
{
return FitsInI32(IconValue());
}
static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
{
#ifdef TARGET_64BIT
return (int32_t)val == val;
#else
return true;
#endif
}
bool ImmedValNeedsReloc(Compiler* comp);
bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef TARGET_XARCH
bool FitsInAddrBase(Compiler* comp);
bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
GenTreeIntConCommon() : GenTree()
{
}
#endif
};
// node representing a read from a physical register
struct GenTreePhysReg : public GenTree
{
// physregs need a field beyond GetRegNum() because
// GetRegNum() indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
GenTreePhysReg() : GenTree()
{
}
#endif
};
/* gtIntCon -- integer constant (GT_CNS_INT) */
struct GenTreeIntCon : public GenTreeIntConCommon
{
/*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
// are mutually exclusive, and they could be put in a union. Or else we should separate
// this type into three subtypes.
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
FieldSeqNode* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address, holds the method handle to that target which is used
// to fetch target method name and display in the disassembled code.
size_t gtTargetHandle = 0;
#endif
GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(FieldSeqStore::NotAField())
{
}
GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon()
{
}
#endif
};
/* gtLngCon -- long constant (GT_CNS_LNG) */
struct GenTreeLngCon : public GenTreeIntConCommon
{
INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
}
INT32 HiVal()
{
return (INT32)(gtLconVal >> 32);
}
GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
{
SetLngValue(val);
}
#if DEBUGGABLE_GENTREE
GenTreeLngCon() : GenTreeIntConCommon()
{
}
#endif
};
inline INT64 GenTreeIntConCommon::LngValue() const
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
return AsLngCon()->gtLconVal;
#else
return IconValue();
#endif
}
inline void GenTreeIntConCommon::SetLngValue(INT64 val)
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
AsLngCon()->gtLconVal = val;
#else
// Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
SetIconValue(ssize_t(val));
#endif
}
inline ssize_t GenTreeIntConCommon::IconValue() const
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
inline INT64 GenTreeIntConCommon::IntegralValue() const
{
#ifdef TARGET_64BIT
return LngValue();
#else
return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
#endif // TARGET_64BIT
}
inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
{
#ifdef TARGET_64BIT
SetIconValue(value);
#else
if (OperIs(GT_CNS_LNG))
{
SetLngValue(value);
}
else
{
assert(FitsIn<int32_t>(value));
SetIconValue(static_cast<int32_t>(value));
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
//
// The function will truncate the supplied value to a 32 bit signed
// integer if the node's type is not TYP_LONG, otherwise setting it
// as-is. Note that this function intentionally does not check for
// small types (such nodes are created in lowering) for TP reasons.
//
// This function is intended to be used where its truncating behavior is
// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
// wider integers, which is typical when compiling on 64 bit hosts, as
// most aritmetic is done in ssize_t's aka int64_t's in that case, while
// the node itself can be of a narrower type.
//
// Arguments:
// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
//
// Notes:
// This function is templated so that it works well with compiler warnings of
// the form "Operation may overflow before being assigned to a wider type", in
// case "value" is of type ssize_t, which is common.
//
template <typename T>
inline void GenTreeIntConCommon::SetValueTruncating(T value)
{
static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value));
if (TypeIs(TYP_LONG))
{
SetLngValue(value);
}
else
{
SetIconValue(static_cast<int32_t>(value));
}
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
struct GenTreeDblCon : public GenTree
{
double gtDconVal;
bool isBitwiseEqual(GenTreeDblCon* other)
{
unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val)
{
assert(varTypeIsFloating(type));
}
#if DEBUGGABLE_GENTREE
GenTreeDblCon() : GenTree()
{
}
#endif
};
/* gtStrCon -- string constant (GT_CNS_STR) */
#define EMPTY_STRING_SCON (unsigned)-1
struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Returns true if this GT_CNS_STR was imported for String.Empty field
bool IsStringEmptyField()
{
return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
}
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
: GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
{
}
#if DEBUGGABLE_GENTREE
GenTreeStrCon() : GenTree()
{
}
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
unsigned _gtSsaNum; // The SSA number.
public:
GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
unsigned GetLclNum() const
{
return _gtLclNum;
}
void SetLclNum(unsigned lclNum)
{
_gtLclNum = lclNum;
_gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
}
uint16_t GetLclOffs() const;
unsigned GetSsaNum() const
{
return _gtSsaNum;
}
void SetSsaNum(unsigned ssaNum)
{
_gtSsaNum = ssaNum;
}
bool HasSsaName()
{
return (GetSsaNum() != SsaConfig::RESERVED_SSA_NUM);
}
#if DEBUGGABLE_GENTREE
GenTreeLclVarCommon() : GenTreeUnOp()
{
}
#endif
};
//------------------------------------------------------------------------
// MultiRegSpillFlags
//
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flags of each register
// are stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
//
typedef unsigned char MultiRegSpillFlags;
static const unsigned PACKED_GTF_SPILL = 1;
static const unsigned PACKED_GTF_SPILLED = 2;
//----------------------------------------------------------------------
// GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
// idx - Position or index of the return register
//
// Return Value:
// Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
//
inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
GenTreeFlags spillFlags = GTF_EMPTY;
if (bits & PACKED_GTF_SPILL)
{
spillFlags |= GTF_SPILL;
}
if (bits & PACKED_GTF_SPILLED)
{
spillFlags |= GTF_SPILLED;
}
return spillFlags;
}
//----------------------------------------------------------------------
// SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
//
// Arguments:
// oldFlags - The current value of the MultiRegSpillFlags for a node.
// flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
// Note that these are the flags used on non-multireg nodes,
// and this method adds the appropriate flags to the
// incoming MultiRegSpillFlags and returns it.
// idx - Position or index of the register
//
// Return Value:
// The new value for the node's MultiRegSpillFlags.
//
inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
MultiRegSpillFlags newFlags = oldFlags;
unsigned bits = 0;
if (flagsToSet & GTF_SPILL)
{
bits |= PACKED_GTF_SPILL;
}
if (flagsToSet & GTF_SPILLED)
{
bits |= PACKED_GTF_SPILLED;
}
const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
// Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
return newFlags;
}
// gtLclVar -- load/store/addr of local variable
struct GenTreeLclVar : public GenTreeLclVarCommon
{
private:
regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
public:
INDEBUG(IL_OFFSET gtLclILoffs;) // instr offset of ref (only for JIT dumps)
// Multireg support
bool IsMultiReg() const
{
return ((gtFlags & GTF_VAR_MULTIREG) != 0);
}
void ClearMultiReg()
{
gtFlags &= ~GTF_VAR_MULTIREG;
}
void SetMultiReg()
{
gtFlags |= GTF_VAR_MULTIREG;
ClearOtherRegFlags();
}
regNumber GetRegNumByIdx(int regIndex) const
{
assert(regIndex < MAX_MULTIREG_COUNT);
return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
}
void SetRegNumByIdx(regNumber reg, int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
if (regIndex == 0)
{
SetRegNum(reg);
}
else
{
gtOtherReg[regIndex - 1] = regNumberSmall(reg);
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
}
unsigned int GetFieldCount(Compiler* compiler) const;
var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given LclVar node.
//
// Arguments:
// fromCall - GenTreeLclVar node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeLclVar* from)
{
this->gtSpillFlags = from->gtSpillFlags;
}
GenTreeLclVar(genTreeOps oper,
var_types type,
unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
: GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
{
assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
}
#if DEBUGGABLE_GENTREE
GenTreeLclVar() : GenTreeLclVarCommon()
{
}
#endif
};
// gtLclFld -- load/store/addr of local variable field
struct GenTreeLclFld : public GenTreeLclVarCommon
{
private:
uint16_t m_lclOffs; // offset into the variable to access
FieldSeqNode* m_fieldSeq; // This LclFld node represents some sequences of accesses.
public:
GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
: GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs)), m_fieldSeq(nullptr)
{
assert(lclOffs <= UINT16_MAX);
}
uint16_t GetLclOffs() const
{
return m_lclOffs;
}
void SetLclOffs(unsigned lclOffs)
{
assert(lclOffs <= UINT16_MAX);
m_lclOffs = static_cast<uint16_t>(lclOffs);
}
FieldSeqNode* GetFieldSeq() const
{
return m_fieldSeq;
}
void SetFieldSeq(FieldSeqNode* fieldSeq)
{
m_fieldSeq = fieldSeq;
}
#ifdef TARGET_ARM
bool IsOffsetMisaligned() const;
#endif // TARGET_ARM
#if DEBUGGABLE_GENTREE
GenTreeLclFld() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtCast -- conversion to a different type (GT_CAST) */
struct GenTreeCast : public GenTreeOp
{
GenTree*& CastOp()
{
return gtOp1;
}
var_types gtCastType;
GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
: GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
{
// We do not allow casts from floating point types to be treated as from
// unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
// CastOp's type changes.
assert(!varTypeIsFloating(op) || !fromUnsigned);
gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
}
#if DEBUGGABLE_GENTREE
GenTreeCast() : GenTreeOp()
{
}
#endif
};
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
GenTree*& BoxOp()
{
return gtOp1;
}
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
// type
Statement* gtAsgStmtWhenInlinedBoxValue;
// And this is the statement that copies from the value being boxed to the box payload
Statement* gtCopyStmtWhenInlinedBoxValue;
GenTreeBox(var_types type,
GenTree* boxOp,
Statement* asgStmtWhenInlinedBoxValue,
Statement* copyStmtWhenInlinedBoxValue)
: GenTreeUnOp(GT_BOX, type, boxOp)
, gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
, gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
{
}
#if DEBUGGABLE_GENTREE
GenTreeBox() : GenTreeUnOp()
{
}
#endif
};
// GenTreeField -- data member ref (GT_FIELD)
struct GenTreeField : public GenTreeUnOp
{
CORINFO_FIELD_HANDLE gtFldHnd;
DWORD gtFldOffset;
bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
GenTreeField(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
: GenTreeUnOp(GT_FIELD, type, obj), gtFldHnd(fldHnd), gtFldOffset(offs), gtFldMayOverlap(false)
{
#ifdef FEATURE_READYTORUN
gtFieldLookup.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeField() : GenTreeUnOp()
{
}
#endif
// The object this field belongs to. Will be "nullptr" for static fields.
// Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
GenTree* GetFldObj() const
{
return gtOp1;
}
// True if this field is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_FLD_VOLATILE) != 0;
}
};
// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
struct GenTreeColon : public GenTreeOp
{
GenTree*& ThenNode()
{
return gtOp2;
}
GenTree*& ElseNode()
{
return gtOp1;
}
#if DEBUGGABLE_GENTREE
GenTreeColon() : GenTreeOp()
{
}
#endif
GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
{
}
};
// gtCall -- method call (GT_CALL)
enum class InlineObservation;
//------------------------------------------------------------------------
// GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
//
// clang-format off
enum GenTreeCallFlags : unsigned int
{
GTF_CALL_M_EMPTY = 0,
GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
GTF_CALL_M_VARARGS = 0x00000004, // the call uses varargs ABI
GTF_CALL_M_RETBUFFARG = 0x00000008, // call has a return buffer argument
GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
// in special cases. Used to optimize fast way out in morphing
GTF_CALL_M_UNMGD_THISCALL = 0x00000080, // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
#if FEATURE_TAILCALL_OPT
GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
#endif
GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
// A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
// an IL Stub dynamically generated for a PInvoke declaration is flagged as
// a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
// know when these flags are set.
GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
// as the first argument for calli. It is CoreRT replacement for instantiating
// stubs, because executable code cannot be generated at runtime.
GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
// the comma result is unused.
GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be tranformed into CFG for the dynamic dictionary expansion feature.
GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
GTF_CALL_M_LATE_DEVIRT = 0x10000000, // this call has late devirtualzation info
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
{
return (GenTreeCallFlags)(~(unsigned int)a);
}
inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
// clang-format on
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
// - count of return registers in which the value is returned
//
// TODO-ARM: Update this to meet the needs of Arm64 and Arm32
//
// TODO-AllArch: Right now it is used for describing multi-reg returned types.
// Eventually we would want to use it for describing even single-reg
// returned types (e.g. structs returned in single register x64/arm).
// This would allow us not to lie or normalize single struct return
// values in importer/morph.
struct ReturnTypeDesc
{
private:
var_types m_regType[MAX_RET_REG_COUNT];
bool m_isEnclosingType;
#ifdef DEBUG
bool m_inited;
#endif
public:
ReturnTypeDesc()
{
Reset();
}
// Initialize the Return Type Descriptor for a method that returns a struct type
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
void InitializeLongReturnType();
// Reset type descriptor to defaults
void Reset()
{
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
m_regType[i] = TYP_UNKNOWN;
}
m_isEnclosingType = false;
#ifdef DEBUG
m_inited = false;
#endif
}
#ifdef DEBUG
// NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
// has been initialized.
unsigned TryGetReturnRegCount() const
{
return m_inited ? GetReturnRegCount() : 0;
}
#endif // DEBUG
//--------------------------------------------------------------------------------------------
// GetReturnRegCount: Get the count of return registers in which the return value is returned.
//
// Arguments:
// None
//
// Return Value:
// Count of return registers.
// Returns 0 if the return type is not returned in registers.
unsigned GetReturnRegCount() const
{
assert(m_inited);
int regCount = 0;
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
if (m_regType[i] == TYP_UNKNOWN)
{
break;
}
// otherwise
regCount++;
}
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
#endif
return regCount;
}
//-----------------------------------------------------------------------
// IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
// Arguments:
// None
//
// Return Value:
// Returns true if the type is returned in multiple return registers.
// False otherwise.
// Note that we only have to examine the first two values to determine this
//
bool IsMultiRegRetType() const
{
if (MAX_RET_REG_COUNT < 2)
{
return false;
}
else
{
assert(m_inited);
return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
//
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
//
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
var_types GetReturnRegType(unsigned index) const
{
var_types result = m_regType[index];
assert(result != TYP_UNKNOWN);
return result;
}
// True if this value is returned in integer register
// that is larger than the type itself.
bool IsEnclosingType() const
{
return m_isEnclosingType;
}
// Get i'th ABI return register
regNumber GetABIReturnReg(unsigned idx) const;
// Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs() const;
};
class TailCallSiteInfo
{
bool m_isCallvirt : 1;
bool m_isCalli : 1;
CORINFO_SIG_INFO m_sig;
CORINFO_RESOLVED_TOKEN m_token;
public:
// Is the tailcall a callvirt instruction?
bool IsCallvirt()
{
return m_isCallvirt;
}
// Is the tailcall a calli instruction?
bool IsCalli()
{
return m_isCalli;
}
// Get the token of the callee
CORINFO_RESOLVED_TOKEN* GetToken()
{
assert(!IsCalli());
return &m_token;
}
// Get the signature of the callee
CORINFO_SIG_INFO* GetSig()
{
return &m_sig;
}
// Mark the tailcall as a calli with the given signature
void SetCalli(CORINFO_SIG_INFO* sig)
{
m_isCallvirt = false;
m_isCalli = true;
m_sig = *sig;
}
// Mark the tailcall as a callvirt with the given signature and token
void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = true;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
// Mark the tailcall as a call with the given signature and token
void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = false;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
};
class fgArgInfo;
enum class NonStandardArgKind : unsigned
{
None,
PInvokeFrame,
PInvokeTarget,
PInvokeCookie,
WrapperDelegateCell,
ShiftLow,
ShiftHigh,
FixedRetBuffer,
VirtualStubCell,
R2RIndirectionCell,
ValidateIndirectCallTarget,
// If changing this enum also change getNonStandardArgKindName and isNonStandardArgAddedLate in fgArgInfo
};
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind);
#endif
enum class CFGCallKind
{
ValidateAndCall,
Dispatch,
};
struct GenTreeCall final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node != nullptr);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node != nullptr);
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
Use* GetUse() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtCallThisArg; // The instance argument ('this' pointer)
Use* gtCallArgs; // The list of arguments in original evaluation order
Use* gtCallLateArgs; // On x86: The register arguments in an optimal order
// On ARM/x64: - also includes any outgoing arg space arguments
// - that were evaluated into a temp LclVar
fgArgInfo* fgArgInfo;
UseList Args()
{
return UseList(gtCallArgs);
}
UseList LateArgs()
{
return UseList(gtCallLateArgs);
}
#ifdef DEBUG
// Used to register callsites with the EE
CORINFO_SIG_INFO* callSig;
#endif
union {
TailCallSiteInfo* tailCallInfo;
// Only used for unmanaged calls, which cannot be tail-called
CorInfoCallConvExtension unmgdCallConv;
};
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
ReturnTypeDesc gtReturnTypeDesc;
// GetRegNum() would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
#endif // FEATURE_MULTIREG_RET
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
//
// Arguments:
// None
//
// Returns
// Type descriptor of the value returned by call
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
const ReturnTypeDesc* GetReturnTypeDesc() const
{
#if FEATURE_MULTIREG_RET
return >ReturnTypeDesc;
#else
return nullptr;
#endif
}
void InitializeLongReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeLongReturnType();
#endif
}
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
void ResetReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.Reset();
#endif
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th return register allocated to this call node.
//
// Arguments:
// idx - index of the return register
//
// Return Value:
// Return regNumber of i'th return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th return register of this call node
//
// Arguments:
// reg - reg number
// idx - index of the return register
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
unreached();
#endif
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given call node to this node
//
// Arguments:
// fromCall - GenTreeCall node from which to copy multi-reg state
//
// Return Value:
// None
//
void CopyOtherRegs(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
}
#endif
}
// Get reg mask of all the valid registers of gtOtherRegs array
regMaskTP GetOtherRegMask() const;
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
#if FEATURE_MULTIREG_RET
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
#else
assert(!"unreached");
return GTF_EMPTY;
#endif
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = 0;
#endif
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given call node.
//
// Arguments:
// fromCall - GenTreeCall node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
this->gtSpillFlags = fromCall->gtSpillFlags;
#endif
}
bool IsUnmanaged() const
{
return (gtFlags & GTF_CALL_UNMANAGED) != 0;
}
bool NeedsNullCheck() const
{
return (gtFlags & GTF_CALL_NULLCHECK) != 0;
}
bool CallerPop() const
{
return (gtFlags & GTF_CALL_POP_ARGS) != 0;
}
bool IsVirtual() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
}
bool IsVirtualStub() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
}
bool IsVirtualVtable() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
}
bool IsInlineCandidate() const
{
return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
}
bool IsR2ROrVirtualStubRelativeIndir()
{
#if defined(FEATURE_READYTORUN)
if (IsR2RRelativeIndir())
{
return true;
}
#endif
return IsVirtualStubRelativeIndir();
}
bool HasNonStandardAddedArgs(Compiler* compiler) const;
int GetNonStandardAddedArgCount(Compiler* compiler) const;
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
bool HasFixedRetBufArg() const
{
if (!(hasFixedRetBuffReg() && HasRetBufArg()))
{
return false;
}
#if !defined(TARGET_ARM)
return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
#else
return true;
#endif
}
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
// Arguments:
// None
//
// Return Value:
// True if the call is returning a multi-reg return value. False otherwise.
//
bool HasMultiRegRetVal() const
{
#ifdef FEATURE_MULTIREG_RET
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (varTypeIsLong(gtType))
{
return true;
}
#endif
if (!varTypeIsStruct(gtType) || HasRetBufArg())
{
return false;
}
// Now it is a struct that is returned in registers.
return GetReturnTypeDesc()->IsMultiRegRetType();
#else // !FEATURE_MULTIREG_RET
return false;
#endif // !FEATURE_MULTIREG_RET
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
bool IsPInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
}
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
bool IsTailPrefixedCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
}
// Returns true if this call didn't have an explicit tail. prefix in the IL
// but was marked as an explicit tail call because of tail call stress mode.
bool IsStressTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
}
// This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
bool IsTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
}
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
bool CanTailCall() const
{
return IsTailPrefixedCall() || IsImplicitTailCall();
}
// Check whether this is a tailcall dispatched via JIT helper. We only use
// this mechanism on x86 as it is faster than our other more general
// tailcall mechanism.
bool IsTailCallViaJitHelper() const
{
#ifdef TARGET_X86
return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return false;
#endif
}
#if FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
#ifdef TARGET_X86
return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return IsTailCall();
#endif
}
#else // !FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
bool IsImplicitTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
}
bool IsTailCallConvertibleToLoop() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
}
#else // !FEATURE_TAILCALL_OPT
bool IsImplicitTailCall() const
{
return false;
}
bool IsTailCallConvertibleToLoop() const
{
return false;
}
#endif // !FEATURE_TAILCALL_OPT
bool NormalizesSmallTypesOnReturn()
{
return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
}
bool IsSameThis() const
{
return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
}
bool IsDelegateInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
}
bool IsVirtualStubRelativeIndir() const
{
return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
}
bool IsR2RRelativeIndir() const
{
#ifdef FEATURE_READYTORUN
return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
#else
return false;
#endif
}
#ifdef FEATURE_READYTORUN
void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
{
gtEntryPoint = entryPoint;
if (gtEntryPoint.accessType == IAT_PVALUE)
{
gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
}
}
#endif // FEATURE_READYTORUN
bool IsVarargs() const
{
return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
}
bool IsNoReturn() const
{
return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
}
bool IsFatPointerCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
}
bool IsGuardedDevirtualizationCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
}
bool IsPure(Compiler* compiler) const;
bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
void ClearFatPointerCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
}
void SetFatPointerCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
}
bool IsDevirtualized() const
{
return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
}
bool IsGuarded() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
}
bool IsUnboxed() const
{
return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
}
bool IsSuppressGCTransition() const
{
return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
}
void ClearGuardedDevirtualizationCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_GUARDED_DEVIRT;
}
void SetGuardedDevirtualizationCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED_DEVIRT;
}
void SetIsGuarded()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED;
}
void SetExpRuntimeLookup()
{
gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
void ClearExpRuntimeLookup()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
bool IsExpRuntimeLookup() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
}
void SetExpandedEarly()
{
gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
}
void ClearExpandedEarly()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
}
bool IsExpandedEarly() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
}
//-----------------------------------------------------------------------------------------
// GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
//
// Arguments:
// None
//
// Return Value:
// The kind (either R2RIndirectionCell or VirtualStubCell),
// or NonStandardArgKind::None if this call does not have an indirection cell.
//
NonStandardArgKind GetIndirectionCellArgKind() const
{
if (IsVirtualStub())
{
return NonStandardArgKind::VirtualStubCell;
}
#if defined(TARGET_ARMARCH)
// For ARM architectures, we always use an indirection cell for R2R calls.
if (IsR2RRelativeIndir())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#elif defined(TARGET_XARCH)
// On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
if (IsR2RRelativeIndir() && IsFastTailCall())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#endif
return NonStandardArgKind::None;
}
CFGCallKind GetCFGCallKind()
{
#if defined(TARGET_AMD64)
// On x64 the dispatcher is more performant, but we cannot use it when
// we need to pass indirection cells as those go into registers that
// are clobbered by the dispatch helper.
bool mayUseDispatcher = GetIndirectionCellArgKind() == NonStandardArgKind::None;
bool shouldUseDispatcher = true;
#elif defined(TARGET_ARM64)
bool mayUseDispatcher = true;
// Branch predictors on ARM64 generally do not handle the dispatcher as
// well as on x64 hardware, so only use the validator by default.
bool shouldUseDispatcher = false;
#else
// Other platforms do not even support the dispatcher.
bool mayUseDispatcher = false;
bool shouldUseDispatcher = false;
#endif
#ifdef DEBUG
switch (JitConfig.JitCFGUseDispatcher())
{
case 0:
shouldUseDispatcher = false;
break;
case 1:
shouldUseDispatcher = true;
break;
default:
break;
}
#endif
return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
}
void ResetArgInfo();
GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
var_types gtReturnType : 5; // exact return type
CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
GenTree* gtCallCookie;
// gtInlineCandidateInfo is only used when inlining methods
InlineCandidateInfo* gtInlineCandidateInfo;
GuardedDevirtualizationCandidateInfo* gtGuardedDevirtualizationCandidateInfo;
ClassProfileCandidateInfo* gtClassProfileCandidateInfo;
LateDevirtualizationInfo* gtLateDevirtualizationInfo;
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
GenTree* gtControlExpr;
union {
CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
GenTree* gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// For non-inline candidates, track the first observation
// that blocks candidacy.
InlineObservation gtInlineObservation;
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
// In DEBUG we report even non inline candidates in the inline tree in
// fgNoteNonInlineCandidate. We need to keep around the inline context for
// this as normally it's part of the candidate info.
class InlineContext* gtInlineContext;
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
void ReplaceCallOperand(GenTree** operandUseEdge, GenTree* replacement);
bool AreArgsComplete() const;
CorInfoCallConvExtension GetUnmanagedCallConv() const
{
return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
}
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
fgArgInfo = nullptr;
}
#if DEBUGGABLE_GENTREE
GenTreeCall() : GenTree()
{
}
#endif
};
struct GenTreeCmpXchg : public GenTree
{
GenTree* gtOpLocation;
GenTree* gtOpValue;
GenTree* gtOpComparand;
GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
: GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
{
// There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
// have global effects.
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
// Merge in flags from operands
gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
}
#if DEBUGGABLE_GENTREE
GenTreeCmpXchg() : GenTree()
{
}
#endif
};
#if !defined(TARGET_64BIT)
struct GenTreeMultiRegOp : public GenTreeOp
{
regNumber gtOtherReg;
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
MultiRegSpillFlags gtSpillFlags;
GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
: GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
{
ClearOtherRegFlags();
}
unsigned GetRegCount() const
{
return (TypeGet() == TYP_LONG) ? 2 : 1;
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the register
//
// Return Value:
// Return regNumber of i'th register of this register argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < 2);
if (idx == 0)
{
return GetRegNum();
}
return gtOtherReg;
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
//
var_types GetRegType(unsigned index) const
{
assert(index < 2);
// The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
// reg.
// The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
// in which case we need to separate them into int for each index.
var_types result = TypeGet();
if (result == TYP_LONG)
{
result = TYP_INT;
}
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreeMultiRegOp() : GenTreeOp()
{
}
#endif
};
#endif // !defined(TARGET_64BIT)
struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
bool gtFptrDelegateTarget;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
: GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
gtEntryPoint.accessType = IAT_VALUE;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeFptrVal() : GenTree()
{
}
#endif
};
/* gtQmark */
struct GenTreeQmark : public GenTreeOp
{
GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
{
// These must follow a specific form.
assert((cond != nullptr) && cond->TypeIs(TYP_INT));
assert((colon != nullptr) && colon->OperIs(GT_COLON));
}
#if DEBUGGABLE_GENTREE
GenTreeQmark() : GenTreeOp()
{
}
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
struct GenTreeIntrinsic : public GenTreeOp
{
NamedIntrinsic gtIntrinsicName;
CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
GenTreeIntrinsic(
var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
#if DEBUGGABLE_GENTREE
GenTreeIntrinsic() : GenTreeOp()
{
}
#endif
};
// GenTreeMultiOp - a node with a flexible count of operands stored in an array.
// The array can be an inline one, or a dynamic one, or both, with switching
// between them supported. See GenTreeJitIntrinsic for an example of a node
// utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
// with two operands.
//
struct GenTreeMultiOp : public GenTree
{
public:
class Iterator
{
protected:
GenTree** m_use;
Iterator(GenTree** use) : m_use(use)
{
}
public:
Iterator& operator++()
{
m_use++;
return *this;
}
bool operator==(const Iterator& other) const
{
return m_use == other.m_use;
}
bool operator!=(const Iterator& other) const
{
return m_use != other.m_use;
}
};
class OperandsIterator final : public Iterator
{
public:
OperandsIterator(GenTree** use) : Iterator(use)
{
}
GenTree* operator*()
{
return *m_use;
}
};
class UseEdgesIterator final : public Iterator
{
public:
UseEdgesIterator(GenTree** use) : Iterator(use)
{
}
GenTree** operator*()
{
return m_use;
}
};
private:
GenTree** m_operands;
protected:
template <unsigned InlineOperandCount, typename... Operands>
GenTreeMultiOp(genTreeOps oper,
var_types type,
CompAllocator allocator,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
Operands... operands)
: GenTree(oper, type DEBUGARG(largeNode))
{
const size_t OperandCount = sizeof...(Operands);
m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
// "OperandCount + 1" so that it works well when OperandCount is 0.
GenTree* operandsArray[OperandCount + 1]{operands...};
InitializeOperands(operandsArray, OperandCount);
}
// Note that this constructor takes the owndership of the "operands" array.
template <unsigned InlineOperandCount>
GenTreeMultiOp(genTreeOps oper,
var_types type,
GenTree** operands,
size_t operandCount,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
: GenTree(oper, type DEBUGARG(largeNode))
{
m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
InitializeOperands(operands, operandCount);
}
public:
#if DEBUGGABLE_GENTREE
GenTreeMultiOp() : GenTree()
{
}
#endif
GenTree*& Op(size_t index)
{
size_t actualIndex = index - 1;
assert(actualIndex < m_operandCount);
assert(m_operands[actualIndex] != nullptr);
return m_operands[actualIndex];
}
GenTree* Op(size_t index) const
{
return const_cast<GenTreeMultiOp*>(this)->Op(index);
}
// Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<OperandsIterator> Operands()
{
return MakeIteratorPair(OperandsIterator(GetOperandArray()),
OperandsIterator(GetOperandArray() + GetOperandCount()));
}
// Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<UseEdgesIterator> UseEdges()
{
return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
UseEdgesIterator(GetOperandArray() + GetOperandCount()));
}
size_t GetOperandCount() const
{
return m_operandCount;
}
GenTree** GetOperandArray(size_t startIndex = 0) const
{
return m_operands + startIndex;
}
protected:
// Reconfigures the operand array, leaving it in a "dirty" state.
void ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount);
static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
private:
void InitializeOperands(GenTree** operands, size_t operandCount);
void SetOperandCount(size_t newOperandCount)
{
assert(FitsIn<uint8_t>(newOperandCount));
m_operandCount = static_cast<uint8_t>(newOperandCount);
}
};
// Helper class used to implement the constructor of GenTreeJitIntrinsic which
// transfers the ownership of the passed-in array to the underlying MultiOp node.
class IntrinsicNodeBuilder final
{
friend struct GenTreeJitIntrinsic;
GenTree** m_operands;
size_t m_operandCount;
GenTree* m_inlineOperands[2];
public:
IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
{
m_operands =
(operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
#ifdef DEBUG
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
}
IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
{
m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
: allocator.allocate<GenTree*>(m_operandCount);
for (size_t i = 0; i < m_operandCount; i++)
{
m_operands[i] = source->Op(i + 1);
}
}
void AddOperand(size_t index, GenTree* operand)
{
assert(index < m_operandCount);
assert(m_operands[index] == nullptr);
m_operands[index] = operand;
}
GenTree* GetOperand(size_t index) const
{
assert(index < m_operandCount);
assert(m_operands[index] != nullptr);
return m_operands[index];
}
size_t GetOperandCount() const
{
return m_operandCount;
}
private:
GenTree** GetBuiltOperands()
{
#ifdef DEBUG
for (size_t i = 0; i < m_operandCount; i++)
{
assert(m_operands[i] != nullptr);
}
#endif // DEBUG
return m_operands;
}
};
struct GenTreeJitIntrinsic : public GenTreeMultiOp
{
protected:
GenTree* gtInlineOperands[2];
uint16_t gtLayoutNum;
unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
NamedIntrinsic gtHWIntrinsicId;
};
#else
NamedIntrinsic gtHWIntrinsicId;
#endif
public:
unsigned GetLayoutNum() const
{
return gtLayoutNum;
}
void SetLayoutNum(unsigned layoutNum)
{
assert(FitsIn<uint16_t>(layoutNum));
gtLayoutNum = static_cast<uint16_t>(layoutNum);
}
regNumber GetOtherReg() const
{
return (regNumber)gtOtherReg;
}
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
CorInfoType GetAuxiliaryJitType() const
{
return (CorInfoType)gtAuxiliaryJitType;
}
void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
assert(gtAuxiliaryJitType == auxiliaryJitType);
}
var_types GetAuxiliaryType() const;
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)gtSimdBaseJitType;
}
CorInfoType GetNormalizedSimdBaseJitType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
switch (simdBaseJitType)
{
case CORINFO_TYPE_NATIVEINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_LONG;
#else
return CORINFO_TYPE_INT;
#endif
}
case CORINFO_TYPE_NATIVEUINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_ULONG;
#else
return CORINFO_TYPE_UINT;
#endif
}
default:
return simdBaseJitType;
}
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
gtSimdBaseJitType = (unsigned char)simdBaseJitType;
assert(gtSimdBaseJitType == simdBaseJitType);
}
var_types GetSimdBaseType() const;
unsigned char GetSimdSize() const
{
return gtSimdSize;
}
void SetSimdSize(unsigned simdSize)
{
gtSimdSize = (unsigned char)simdSize;
assert(gtSimdSize == simdSize);
}
template <typename... Operands>
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
CompAllocator allocator,
CorInfoType simdBaseJitType,
unsigned simdSize,
Operands... operands)
: GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
#if DEBUGGABLE_GENTREE
GenTreeJitIntrinsic() : GenTreeMultiOp()
{
}
#endif
protected:
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeMultiOp(oper,
type,
nodeBuilder.GetBuiltOperands(),
nodeBuilder.GetOperandCount(),
gtInlineOperands DEBUGARG(false))
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
public:
bool isSIMD() const
{
return gtSimdSize != 0;
}
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
GenTreeSIMD(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1, op2)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
#if DEBUGGABLE_GENTREE
GenTreeSIMD() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
SIMDIntrinsicID GetSIMDIntrinsicId() const
{
return gtSIMDIntrinsicID;
}
static bool Equals(GenTreeSIMD* op1, GenTreeSIMD* op2);
};
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
GenTreeHWIntrinsic(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
SetHWIntrinsicId(hwIntrinsicID);
if (OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
template <typename... Operands>
GenTreeHWIntrinsic(var_types type,
CompAllocator allocator,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
SetHWIntrinsicId(hwIntrinsicID);
if ((sizeof...(Operands) > 0) && OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
#if DEBUGGABLE_GENTREE
GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
bool OperIsMemoryStore() const; // Returns true for the HW Intrinsic instructions that have MemoryStore semantics,
// false otherwise
bool OperIsMemoryLoadOrStore() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad or
// MemoryStore semantics, false otherwise
bool IsSimdAsHWIntrinsic() const
{
return (gtFlags & GTF_SIMDASHW_OP) != 0;
}
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
NamedIntrinsic GetHWIntrinsicId() const;
//---------------------------------------------------------------------------------------
// ChangeHWIntrinsicId: Change the intrinsic id for this node.
//
// This method just sets the intrinsic id, asserting that the new intrinsic
// has the same number of operands as the old one, optionally setting some of
// the new operands. Intrinsics with an unknown number of operands are exempt
// from the "do I have the same number of operands" check however, so this method must
// be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
// the node for a different intrinsic, with a possibly different number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// operands - optional operands to set while changing the id
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
{
const size_t OperandCount = sizeof...(Operands);
assert(OperandCount <= GetOperandCount());
SetHWIntrinsicId(intrinsicId);
GenTree* operandsArray[OperandCount + 1]{operands...};
GenTree** operandsStore = GetOperandArray();
for (size_t i = 0; i < OperandCount; i++)
{
operandsStore[i] = operandsArray[i];
}
}
//---------------------------------------------------------------------------------------
// ResetHWIntrinsicId: Reset the intrinsic id for this node.
//
// This method resets the intrinsic id, fully reconfiguring the node. It must
// be supplied with all the operands the new node needs, and can allocate a
// new dynamic array if the operands do not fit into in an inline one, in which
// case a compiler argument is used to get the memory allocator.
//
// This method is similar to "ChangeHWIntrinsicId" but is more versatile and
// thus more expensive. Use it when you need to bash to an intrinsic id with
// a different number of operands than what the original node had, or, which
// is equivalent, when you do not know the original number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// compiler - compiler to allocate memory with, can be "nullptr" if the
// number of new operands does not exceed the length of the
// inline array (so, there are 2 or fewer of them)
// operands - *all* operands for the new node
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
{
const size_t NewOperandCount = sizeof...(Operands);
assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
ChangeHWIntrinsicId(intrinsicId, operands...);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
}
static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
};
#endif // FEATURE_HW_INTRINSICS
/* gtIndex -- array access */
struct GenTreeIndex : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
unsigned gtIndElemSize; // size of elements in the array
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
GenTreeIndex(var_types type, GenTree* arr, GenTree* ind, unsigned indElemSize)
: GenTreeOp(GT_INDEX, type, arr, ind)
, gtIndElemSize(indElemSize)
, gtStructElemClass(nullptr) // We always initialize this after construction.
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndex() : GenTreeOp()
{
}
#endif
};
// gtIndexAddr: given an array object and an index, checks that the index is within the bounds of the array if
// necessary and produces the address of the value at that index of the array.
struct GenTreeIndexAddr : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
var_types gtElemType; // The element type of the array.
unsigned gtElemSize; // size of elements in the array
unsigned gtLenOffset; // The offset from the array's base address to its length.
unsigned gtElemOffset; // The offset from the array's base address to its first element.
GenTreeIndexAddr(GenTree* arr,
GenTree* ind,
var_types elemType,
CORINFO_CLASS_HANDLE structElemClass,
unsigned elemSize,
unsigned lenOffset,
unsigned elemOffset)
: GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
, gtStructElemClass(structElemClass)
, gtIndRngFailBB(nullptr)
, gtElemType(elemType)
, gtElemSize(elemSize)
, gtLenOffset(lenOffset)
, gtElemOffset(elemOffset)
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndexAddr() : GenTreeOp()
{
}
#endif
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
struct GenTreeArrLen : public GenTreeUnOp
{
GenTree*& ArrRef()
{
return gtOp1;
} // the array address node
private:
int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
inline int ArrLenOffset()
{
return gtArrLenOffset;
}
GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
: GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArrLen() : GenTreeUnOp()
{
}
#endif
};
// This takes:
// - a length value
// - an index value, and
// - the label to jump to if the index is out of range.
// - the "kind" of the throw block to branch to on failure
// It generates no result.
//
struct GenTreeBoundsChk : public GenTreeOp
{
BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
: GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length), gtIndRngFailBB(nullptr), gtThrowKind(kind)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeBoundsChk() : GenTreeOp()
{
}
#endif
// If this check is against GT_ARR_LENGTH, returns array reference, else "NULL".
GenTree* GetArray() const
{
return GetArrayLength()->OperIs(GT_ARR_LENGTH) ? GetArrayLength()->AsArrLen()->ArrRef() : nullptr;
}
// The index expression.
GenTree* GetIndex() const
{
return gtOp1;
}
// An expression for the length.
GenTree* GetArrayLength() const
{
return gtOp2;
}
};
// GenTreeArrElem - bounds checked address (byref) of a general array element,
// for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
//
struct GenTreeArrElem : public GenTree
{
GenTree* gtArrObj;
#define GT_ARR_MAX_RANK 3
GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
unsigned char gtArrRank; // Rank of the array
unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
// on the optimization path of array intrisics.
// It stores the size of array elements WHEN it can fit
// into an "unsigned char".
// This has caused VSW 571394.
var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
GenTreeArrElem(
var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTree** inds)
: GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
{
gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
for (unsigned char i = 0; i < rank; i++)
{
gtArrInds[i] = inds[i];
gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
}
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrElem() : GenTree()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
//
// Notes:
// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
// The reason that this node incorporates the check into the effective index computation is
// to avoid duplicating the codegen, as the effective index is required to compute the
// offset anyway.
// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
// /--* <arrObj>
// +--* <index0>
// +--* ArrIndex[i, ]
// with something like:
// /--* <arrObj>
// /--* ArrLowerBound[i, ]
// | /--* <arrObj>
// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
// +--* <index0>
// +--* ArrIndex[i, ]
// Which could, for example, be optimized to the following when known to be within bounds:
// /--* TempForLowerBoundDim0
// +--* <index0>
// +--* - (GT_SUB)
//
struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
GenTree*& ArrObj()
{
return gtOp1;
}
// The index expression - may be any integral expression.
GenTree*& IndexExpr()
{
return gtOp2;
}
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrIndex(var_types type,
GenTree* arrObj,
GenTree* indexExpr,
unsigned char currDim,
unsigned char arrRank,
var_types elemType)
: GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
, gtCurrDim(currDim)
, gtArrRank(arrRank)
, gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeArrIndex() : GenTreeOp()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
// Notes:
// The result of this expression is (gtOffset * dimSize) + gtIndex
// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
// within a loop body where the expression for the 0th dimension may be invariant.
//
// Here is an example of how the tree might look for a two-dimension array reference:
// /--* const 0
// | /--* <arrObj>
// | +--* <index0>
// +--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
struct GenTreeArrOffs : public GenTree
{
GenTree* gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
// will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
GenTree* gtIndex; // The effective index for the current dimension - must be non-negative
// and can be any expression (though it is likely to be either a GenTreeArrIndex,
// node, a lclVar, or a constant).
GenTree* gtArrObj; // The array object - may be any expression producing an Array reference,
// but is likely to be a lclVar.
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrOffs(var_types type,
GenTree* offset,
GenTree* index,
GenTree* arrObj,
unsigned char currDim,
unsigned char rank,
var_types elemType)
: GenTree(GT_ARR_OFFSET, type)
, gtOffset(offset)
, gtIndex(index)
, gtArrObj(arrObj)
, gtCurrDim(currDim)
, gtArrRank(rank)
, gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrOffs() : GenTree()
{
}
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
//
// Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
// Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
// Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
// Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
// Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
// Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
// Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
//
// So, for example:
// 1. Base + Index is legal with Scale==1
// 2. If Index is null, Scale should be zero (or unintialized / unused)
// 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
// "Index*Scale + Offset".
// First operand is base address/pointer
bool HasBase() const
{
return gtOp1 != nullptr;
}
GenTree*& Base()
{
return gtOp1;
}
void SetBase(GenTree* base)
{
gtOp1 = base;
}
// Second operand is scaled index value
bool HasIndex() const
{
return gtOp2 != nullptr;
}
GenTree*& Index()
{
return gtOp2;
}
void SetIndex(GenTree* index)
{
gtOp2 = index;
}
unsigned GetScale() const
{
return gtScale;
}
void SetScale(unsigned scale)
{
gtScale = scale;
}
int Offset()
{
return static_cast<int>(gtOffset);
}
void SetOffset(int offset)
{
gtOffset = offset;
}
unsigned gtScale; // The scale factor
private:
ssize_t gtOffset; // The offset to add
public:
GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
: GenTreeOp(GT_LEA, type, base, index)
{
assert(base != nullptr || index != nullptr);
gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeAddrMode() : GenTreeOp()
{
}
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
struct GenTreeIndir : public GenTreeOp
{
// The address for the indirection.
GenTree*& Addr()
{
return gtOp1;
}
void SetAddr(GenTree* addr)
{
assert(addr != nullptr);
assert(addr->TypeIs(TYP_I_IMPL, TYP_BYREF));
gtOp1 = addr;
}
// these methods provide an interface to the indirection node which
bool HasBase();
bool HasIndex();
GenTree* Base();
GenTree* Index();
unsigned Scale();
ssize_t Offset();
GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
// True if this indirection is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_IND_VOLATILE) != 0;
}
// True if this indirection is an unaligned memory operation.
bool IsUnaligned() const
{
return (gtFlags & GTF_IND_UNALIGNED) != 0;
}
#if DEBUGGABLE_GENTREE
// Used only for GenTree::GetVtableForOper()
GenTreeIndir() : GenTreeOp()
{
}
#else
// Used by XARCH codegen to construct temporary trees to pass to the emitter.
GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
{
}
#endif
};
// gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
//
// This is the base type for all of the nodes that represent block or struct
// values.
// Since it can be a store, it includes gtBlkOpKind to specify the type of
// code generation that will be used for the block operation.
struct GenTreeBlk : public GenTreeIndir
{
private:
ClassLayout* m_layout;
public:
ClassLayout* GetLayout() const
{
return m_layout;
}
void SetLayout(ClassLayout* layout)
{
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
m_layout = layout;
}
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
return gtOp2;
}
void SetData(GenTree* dataNode)
{
gtOp2 = dataNode;
}
// The size of the buffer to be copied.
unsigned Size() const
{
assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
enum
{
BlkOpKindInvalid,
#ifndef TARGET_X86
BlkOpKindHelper,
#endif
#ifdef TARGET_XARCH
BlkOpKindRepInstr,
#endif
BlkOpKindUnroll,
} gtBlkOpKind;
#ifndef JIT32_GCENCODER
bool gtBlkOpGcUnsafe;
#endif
#ifdef TARGET_XARCH
bool IsOnHeapAndContainsReferences()
{
return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
}
#endif
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeBlk() : GenTreeIndir()
{
}
#endif // DEBUGGABLE_GENTREE
};
// gtObj -- 'object' (GT_OBJ).
//
// This node is used for block values that may have GC pointers.
struct GenTreeObj : public GenTreeBlk
{
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
GenTreeLclVarCommon* lcl = Addr()->IsLocalAddrExpr();
if ((lcl == nullptr) || ((lcl->gtFlags & GTF_GLOB_EFFECT) != 0))
{
gtFlags |= GTF_GLOB_REF;
}
noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
#if DEBUGGABLE_GENTREE
GenTreeObj() : GenTreeBlk()
{
}
#endif
};
// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
//
// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
// IL instructions are implemented with it. Note that such stores assume the input has no GC
// pointers in it, and as such do not ever use write barriers.
//
// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
// the zero constant/INIT_VAL for "initblk".
//
struct GenTreeStoreDynBlk : public GenTreeBlk
{
public:
GenTree* gtDynamicSize;
GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
: GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
{
// Conservatively the 'dstAddr' could be null or point into the global heap.
// Likewise, this is a store and so must be marked with the GTF_ASG flag.
gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeStoreDynBlk() : GenTreeBlk()
{
}
#endif // DEBUGGABLE_GENTREE
};
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
// Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
};
#ifdef DEBUG
inline const char* RMWStatusDescription(RMWStatus status)
{
switch (status)
{
case STOREIND_RMW_STATUS_UNKNOWN:
return "RMW status unknown";
case STOREIND_RMW_DST_IS_OP1:
return "dst candidate is op1";
case STOREIND_RMW_DST_IS_OP2:
return "dst candidate is op2";
case STOREIND_RMW_UNSUPPORTED_ADDR:
return "address mode is not supported";
case STOREIND_RMW_UNSUPPORTED_OPER:
return "oper is not supported";
case STOREIND_RMW_UNSUPPORTED_TYPE:
return "type is not supported";
case STOREIND_RMW_INDIR_UNEQUAL:
return "read indir is not equivalent to write indir";
default:
unreached();
}
}
#endif
// StoreInd is just a BinOp, with additional RMW status
struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
RMWStatus gtRMWStatus;
bool IsRMWStatusUnknown()
{
return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
}
bool IsNonRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
}
bool IsRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
bool IsRMWDstOp1()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
bool IsRMWDstOp2()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
#endif //! CPU_LOAD_STORE_ARCH
RMWStatus GetRMWStatus()
{
#if !CPU_LOAD_STORE_ARCH
return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatus(RMWStatus status)
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = status;
#endif
}
GenTree*& Data()
{
return gtOp2;
}
GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeStoreInd() : GenTreeIndir()
{
SetRMWStatusDefault();
}
#endif
};
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
struct GenTreeRetExpr : public GenTree
{
GenTree* gtInlineCandidate;
BasicBlockFlags bbFlags;
CORINFO_CLASS_HANDLE gtRetClsHnd;
GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
{
}
#if DEBUGGABLE_GENTREE
GenTreeRetExpr() : GenTree()
{
}
#endif
};
// In LIR there are no longer statements so debug information is inserted linearly using these nodes.
struct GenTreeILOffset : public GenTree
{
DebugInfo gtStmtDI; // debug info
#ifdef DEBUG
IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
: GenTree(GT_IL_OFFSET, TYP_VOID)
, gtStmtDI(di)
#ifdef DEBUG
, gtStmtLastILoffs(lastOffset)
#endif
{
}
#if DEBUGGABLE_GENTREE
GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
{
}
#endif
};
// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
// using range-based `for`, normally used via Statement::TreeList(), e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
class GenTreeList
{
GenTree* m_trees;
// Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
//
class iterator
{
GenTree* m_tree;
public:
iterator(GenTree* tree) : m_tree(tree)
{
}
GenTree* operator*() const
{
return m_tree;
}
iterator& operator++()
{
m_tree = m_tree->gtNext;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_tree != i.m_tree;
}
};
public:
GenTreeList(GenTree* trees) : m_trees(trees)
{
}
iterator begin() const
{
return iterator(m_trees);
}
iterator end() const
{
return iterator(nullptr);
}
};
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
struct Statement
{
public:
Statement(GenTree* expr DEBUGARG(unsigned stmtID))
: m_rootNode(expr)
, m_treeList(nullptr)
, m_next(nullptr)
, m_prev(nullptr)
#ifdef DEBUG
, m_lastILOffset(BAD_IL_OFFSET)
, m_stmtID(stmtID)
#endif
{
}
GenTree* GetRootNode() const
{
return m_rootNode;
}
GenTree** GetRootNodePointer()
{
return &m_rootNode;
}
void SetRootNode(GenTree* treeRoot)
{
m_rootNode = treeRoot;
}
GenTree* GetTreeList() const
{
return m_treeList;
}
void SetTreeList(GenTree* treeHead)
{
m_treeList = treeHead;
}
// TreeList: convenience method for enabling range-based `for` iteration over the
// execution order of the GenTree linked list, e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
GenTreeList TreeList() const
{
return GenTreeList(GetTreeList());
}
const DebugInfo& GetDebugInfo() const
{
return m_debugInfo;
}
void SetDebugInfo(const DebugInfo& di)
{
m_debugInfo = di;
di.Validate();
}
#ifdef DEBUG
IL_OFFSET GetLastILOffset() const
{
return m_lastILOffset;
}
void SetLastILOffset(IL_OFFSET lastILOffset)
{
m_lastILOffset = lastILOffset;
}
unsigned GetID() const
{
return m_stmtID;
}
#endif // DEBUG
Statement* GetNextStmt() const
{
return m_next;
}
void SetNextStmt(Statement* nextStmt)
{
m_next = nextStmt;
}
Statement* GetPrevStmt() const
{
return m_prev;
}
void SetPrevStmt(Statement* prevStmt)
{
m_prev = prevStmt;
}
bool IsPhiDefnStmt() const
{
return m_rootNode->IsPhiDefn();
}
unsigned char GetCostSz() const
{
return m_rootNode->GetCostSz();
}
unsigned char GetCostEx() const
{
return m_rootNode->GetCostEx();
}
private:
// The root of the expression tree.
// Note: It will be the last node in evaluation order.
GenTree* m_rootNode;
// The tree list head (for forward walks in evaluation order).
// The value is `nullptr` until we have set the sequencing of the nodes.
GenTree* m_treeList;
// The statement nodes are doubly-linked. The first statement node in a block points
// to the last node in the block via its `m_prev` link. Note that the last statement node
// does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
Statement* m_next;
Statement* m_prev;
DebugInfo m_debugInfo;
#ifdef DEBUG
IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
unsigned m_stmtID;
#endif
};
// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
// normally used via BasicBlock::Statements(), e.g.:
// for (Statement* const stmt : block->Statements()) ...
// or:
// for (Statement* const stmt : block->NonPhiStatements()) ...
//
class StatementList
{
Statement* m_stmts;
// Forward iterator for the statement linked list.
//
class iterator
{
Statement* m_stmt;
public:
iterator(Statement* stmt) : m_stmt(stmt)
{
}
Statement* operator*() const
{
return m_stmt;
}
iterator& operator++()
{
m_stmt = m_stmt->GetNextStmt();
return *this;
}
bool operator!=(const iterator& i) const
{
return m_stmt != i.m_stmt;
}
};
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
iterator begin() const
{
return iterator(m_stmts);
}
iterator end() const
{
return iterator(nullptr);
}
};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
/* AsClsVar() -- 'static data member' (GT_CLS_VAR) */
struct GenTreeClsVar : public GenTree
{
CORINFO_FIELD_HANDLE gtClsVarHnd;
FieldSeqNode* gtFieldSeq;
GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
gtFlags |= GTF_GLOB_REF;
}
GenTreeClsVar(genTreeOps oper, var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(oper, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
assert((oper == GT_CLS_VAR) || (oper == GT_CLS_VAR_ADDR));
gtFlags |= GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeClsVar() : GenTree()
{
}
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
struct GenTreeArgPlace : public GenTree
{
CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArgPlace() : GenTree()
{
}
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
struct GenTreePhiArg : public GenTreeLclVarCommon
{
BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
: GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
{
SetSsaNum(ssaNum);
}
#if DEBUGGABLE_GENTREE
GenTreePhiArg() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
struct GenTreePutArgStk : public GenTreeUnOp
{
private:
unsigned m_byteOffset;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
#endif
public:
#if defined(DEBUG_ARG_SLOTS)
unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#endif
#endif
#if defined(UNIX_X86_ABI)
unsigned gtPadAlign; // Number of padding slots for stack alignment
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
GenTreeCall* gtCall; // the call node to which this argument belongs
#endif
#if FEATURE_FASTTAILCALL
bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
// By default this is false and will be placed in out-going arg area.
// Fast tail calls set this to true.
// In future if we need to add more such bool fields consider bit fields.
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
// block node.
enum class Kind : __int8{
Invalid, RepInstr, PartialRepInstr, Unroll, Push, PushAllSlots,
};
Kind gtPutArgStkKind;
#endif
GenTreePutArgStk(genTreeOps oper,
var_types type,
GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
GenTreeCall* callNode,
bool putInIncomingArgArea)
: GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
, m_byteOffset(stackByteOffset)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, m_byteSize(stackByteSize)
#endif
#if defined(DEBUG_ARG_SLOTS)
, gtSlotNum(slotNum)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtNumSlots(numSlots)
#endif
#endif
#if defined(UNIX_X86_ABI)
, gtPadAlign(0)
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
#endif
#if FEATURE_FASTTAILCALL
, gtPutInIncomingArgArea(putInIncomingArgArea)
#endif // FEATURE_FASTTAILCALL
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtPutArgStkKind(Kind::Invalid)
#endif
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset == slotNum * TARGET_POINTER_SIZE);
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
DEBUG_ARG_SLOTS_ASSERT(m_byteSize == gtNumSlots * TARGET_POINTER_SIZE);
#endif
}
GenTree*& Data()
{
return gtOp1;
}
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return gtPutInIncomingArgArea;
}
#else // !FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
unsigned getArgOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == gtSlotNum);
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset % TARGET_POINTER_SIZE == 0);
return m_byteOffset;
}
#if defined(UNIX_X86_ABI)
unsigned getArgPadding() const
{
return gtPadAlign;
}
void setArgPadding(unsigned padAlign)
{
gtPadAlign = padAlign;
}
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const
{
return m_byteSize;
}
// Return true if this is a PutArgStk of a SIMD12 struct.
// This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
unsigned isSIMD12() const
{
return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
}
bool isPushKind() const
{
return (gtPutArgStkKind == Kind::Push) || (gtPutArgStkKind == Kind::PushAllSlots);
}
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const;
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if DEBUGGABLE_GENTREE
GenTreePutArgStk() : GenTreeUnOp()
{
}
#endif
};
#if FEATURE_ARG_SPLIT
// Represent the struct argument: split value in register(s) and stack
struct GenTreePutArgSplit : public GenTreePutArgStk
{
unsigned gtNumRegs;
GenTreePutArgSplit(GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
unsigned numRegs,
GenTreeCall* callNode,
bool putIncomingArgArea)
: GenTreePutArgStk(GT_PUTARG_SPLIT,
TYP_STRUCT,
op1,
stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
numSlots,
#endif
#endif
callNode,
putIncomingArgArea)
, gtNumRegs(numRegs)
{
ClearOtherRegs();
ClearOtherRegFlags();
}
// Type required to support multi-reg struct arg.
var_types m_regType[MAX_REG_ARG];
// First reg of struct is always given by GetRegNum().
// gtOtherRegs holds the other reg numbers of struct.
regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
MultiRegSpillFlags gtSpillFlags;
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the struct
//
// Return Value:
// Return regNumber of i'th register of this struct argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
return GetRegNum();
}
return (regNumber)gtOtherRegs[idx - 1];
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th register of this struct argument
//
// Arguments:
// reg - reg number
// idx - index of the struct
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
SetRegNum(reg);
}
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index) const
{
assert(index < gtNumRegs);
var_types result = m_regType[index];
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreePutArgSplit() : GenTreePutArgStk()
{
}
#endif
};
#endif // FEATURE_ARG_SPLIT
// Represents GT_COPY or GT_RELOAD node
//
// As it turns out, these are only needed on targets that happen to have multi-reg returns.
// However, they are actually needed on any target that has any multi-reg ops. It is just
// coincidence that those are the same (and there isn't a FEATURE_MULTIREG_OPS).
//
struct GenTreeCopyOrReload : public GenTreeUnOp
{
#if FEATURE_MULTIREG_RET
// State required to support copy/reload of a multi-reg call node.
// The first register is always given by GetRegNum().
//
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
#endif
//----------------------------------------------------------
// ClearOtherRegs: set gtOtherRegs to REG_NA.
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//-----------------------------------------------------------
// GetRegNumByIdx: Get regNumber of i'th position.
//
// Arguments:
// idx - register position.
//
// Return Value:
// Returns regNumber assigned to i'th position.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//-----------------------------------------------------------
// SetRegNumByIdx: Set the regNumber for i'th position.
//
// Arguments:
// reg - reg number
// idx - register position.
//
// Return Value:
// None.
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
else
{
unreached();
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
// node.
//
// Arguments:
// from - GenTree node from which to copy multi-reg state
//
// Return Value:
// None
//
// TODO-ARM: Implement this routine for Arm64 and Arm32
// TODO-X86: Implement this routine for x86
void CopyOtherRegs(GenTreeCopyOrReload* from)
{
assert(OperGet() == from->OperGet());
#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
}
#endif
}
unsigned GetRegCount() const
{
#if FEATURE_MULTIREG_RET
// We need to return the highest index for which we have a valid register.
// Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
// If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
// Note that for most nodes, the set of valid registers must be contiguous,
// but for COPY or RELOAD there is only a valid register for the register positions
// that must be copied or reloaded.
//
for (unsigned i = MAX_RET_REG_COUNT; i > 1; i--)
{
if (gtOtherRegs[i - 2] != REG_NA)
{
return i;
}
}
#endif
// We should never have a COPY or RELOAD with no valid registers.
assert(GetRegNum() != REG_NA);
return 1;
}
GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
assert(type != TYP_STRUCT || op1->IsMultiRegNode());
SetRegNum(REG_NA);
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
GenTreeCopyOrReload() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_ALLOCOBJ node
struct GenTreeAllocObj final : public GenTreeUnOp
{
unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
bool gtHelperHasSideEffects;
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeAllocObj(
var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
: GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
, // This node in most cases will be changed to a call node
gtNewHelper(helper)
, gtHelperHasSideEffects(helperHasSideEffects)
, gtAllocObjClsHnd(clsHnd)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeAllocObj() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_RUNTIMELOOKUP node
struct GenTreeRuntimeLookup final : public GenTreeUnOp
{
CORINFO_GENERIC_HANDLE gtHnd;
CorInfoGenericHandleType gtHndType;
GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
: GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
{
assert(hnd != nullptr);
}
#if DEBUGGABLE_GENTREE
GenTreeRuntimeLookup() : GenTreeUnOp()
{
}
#endif
// Return reference to the actual tree that does the lookup
GenTree*& Lookup()
{
return gtOp1;
}
bool IsClassHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_CLASS;
}
bool IsMethodHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_METHOD;
}
bool IsFieldHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_FIELD;
}
// Note these operations describe the handle that is input to the
// lookup, not the handle produced by the lookup.
CORINFO_CLASS_HANDLE GetClassHandle() const
{
assert(IsClassHandle());
return (CORINFO_CLASS_HANDLE)gtHnd;
}
CORINFO_METHOD_HANDLE GetMethodHandle() const
{
assert(IsMethodHandle());
return (CORINFO_METHOD_HANDLE)gtHnd;
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(IsMethodHandle());
return (CORINFO_FIELD_HANDLE)gtHnd;
}
};
// Represents the condition of a GT_JCC or GT_SETCC node.
struct GenCondition
{
// clang-format off
enum Code : unsigned char
{
OperMask = 7,
Unsigned = 8,
Unordered = Unsigned,
Float = 16,
// 0 would be the encoding of "signed EQ" but since equality is sign insensitive
// we'll use 0 as invalid/uninitialized condition code. This will also leave 1
// as a spare code.
NONE = 0,
SLT = 2,
SLE = 3,
SGE = 4,
SGT = 5,
S = 6,
NS = 7,
EQ = Unsigned | 0, // = 8
NE = Unsigned | 1, // = 9
ULT = Unsigned | SLT, // = 10
ULE = Unsigned | SLE, // = 11
UGE = Unsigned | SGE, // = 12
UGT = Unsigned | SGT, // = 13
C = Unsigned | S, // = 14
NC = Unsigned | NS, // = 15
FEQ = Float | 0, // = 16
FNE = Float | 1, // = 17
FLT = Float | SLT, // = 18
FLE = Float | SLE, // = 19
FGE = Float | SGE, // = 20
FGT = Float | SGT, // = 21
O = Float | S, // = 22
NO = Float | NS, // = 23
FEQU = Unordered | FEQ, // = 24
FNEU = Unordered | FNE, // = 25
FLTU = Unordered | FLT, // = 26
FLEU = Unordered | FLE, // = 27
FGEU = Unordered | FGE, // = 28
FGTU = Unordered | FGT, // = 29
P = Unordered | O, // = 30
NP = Unordered | NO, // = 31
};
// clang-format on
private:
Code m_code;
public:
Code GetCode() const
{
return m_code;
}
bool IsFlag() const
{
return (m_code & OperMask) >= S;
}
bool IsUnsigned() const
{
return (ULT <= m_code) && (m_code <= UGT);
}
bool IsFloat() const
{
return !IsFlag() && (m_code & Float) != 0;
}
bool IsUnordered() const
{
return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
}
bool Is(Code cond) const
{
return m_code == cond;
}
template <typename... TRest>
bool Is(Code c, TRest... rest) const
{
return Is(c) || Is(rest...);
}
// Indicate whether the condition should be swapped in order to avoid generating
// multiple branches. This happens for certain floating point conditions on XARCH,
// see GenConditionDesc and its associated mapping table for more details.
bool PreferSwap() const
{
#ifdef TARGET_XARCH
return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
#else
return false;
#endif
}
const char* Name() const
{
// clang-format off
static const char* names[]
{
"NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
"UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
"FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
"FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
};
// clang-format on
assert(m_code < ArrLen(names));
return names[m_code];
}
GenCondition() : m_code()
{
}
GenCondition(Code cond) : m_code(cond)
{
}
static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
static GenCondition FromRelop(GenTree* relop)
{
assert(relop->OperIsCompare());
if (varTypeIsFloating(relop->gtGetOp1()))
{
return FromFloatRelop(relop);
}
else
{
return FromIntegralRelop(relop);
}
}
static GenCondition FromFloatRelop(GenTree* relop)
{
assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
}
static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
{
assert(GenTree::OperIsCompare(oper));
unsigned code = oper - GT_EQ;
assert(code <= SGT);
code |= Float;
if (isUnordered)
{
code |= Unordered;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition FromIntegralRelop(GenTree* relop)
{
assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
}
static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
{
assert(GenTree::OperIsCompare(oper));
// GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE
unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ);
if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
{
code |= Unsigned;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition Reverse(GenCondition condition)
{
// clang-format off
static const Code reverse[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
NE, EQ, UGE, UGT, ULT, ULE, NC, C,
FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
};
// clang-format on
assert(condition.m_code < ArrLen(reverse));
return GenCondition(reverse[condition.m_code]);
}
static GenCondition Swap(GenCondition condition)
{
// clang-format off
static const Code swap[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
EQ, NE, UGT, UGE, ULE, ULT, C, NC,
FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
};
// clang-format on
assert(condition.m_code < ArrLen(swap));
return GenCondition(swap[condition.m_code]);
}
};
// Represents a GT_JCC or GT_SETCC node.
struct GenTreeCC final : public GenTree
{
GenCondition gtCondition;
GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID)
: GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIs(GT_JCC, GT_SETCC));
}
#if DEBUGGABLE_GENTREE
GenTreeCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
};
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
inline bool GenTree::OperIsBlkOp()
{
return ((gtOper == GT_ASG) && varTypeIsStruct(AsOp()->gtOp1)) || OperIsStoreBlk();
}
inline bool GenTree::OperIsInitBlkOp()
{
if (!OperIsBlkOp())
{
return false;
}
GenTree* src;
if (gtOper == GT_ASG)
{
src = gtGetOp2();
}
else
{
src = AsBlk()->Data()->gtSkipReloadOrCopy();
}
return src->OperIsInitVal() || src->OperIsConst();
}
inline bool GenTree::OperIsCopyBlkOp()
{
return OperIsBlkOp() && !OperIsInitBlkOp();
}
//------------------------------------------------------------------------
// IsFPZero: Checks whether this is a floating point constant with value 0.0
//
// Return Value:
// Returns true iff the tree is an GT_CNS_DBL, with value of 0.0.
inline bool GenTree::IsFPZero() const
{
if ((gtOper == GT_CNS_DBL) && (AsDblCon()->gtDconVal == 0.0))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsIntegralConst: Checks whether this is a constant node with the given value
//
// Arguments:
// constVal - the value of interest
//
// Return Value:
// Returns true iff the tree is an integral constant opcode, with
// the given value.
//
// Notes:
// Like gtIconVal, the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
{
if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
{
return true;
}
if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
{
return true;
}
return false;
}
//-------------------------------------------------------------------
// IsIntegralConstVector: returns true if this this is a SIMD vector
// with all its elements equal to an integral constant.
//
// Arguments:
// constVal - const value of vector element
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsIntegralConstVector(ssize_t constVal) const
{
#ifdef FEATURE_SIMD
// SIMDIntrinsicInit intrinsic with a const value as initializer
// represents a const vector.
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit) &&
AsSIMD()->Op(1)->IsIntegralConst(constVal))
{
assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(AsSIMD()->GetOperandCount() == 1);
return true;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
if ((node->GetOperandCount() == 0) && (constVal == 0))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
else if ((node->GetOperandCount() == 1) && node->Op(1)->IsIntegralConst(constVal))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_Create) || (intrinsicId == NI_Vector256_Create);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_Create) || (intrinsicId == NI_Vector128_Create);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//-------------------------------------------------------------------
// IsSIMDZero: returns true if this this is a SIMD vector
// with all its elements equal to zero.
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsSIMDZero() const
{
#ifdef FEATURE_SIMD
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit))
{
return (AsSIMD()->Op(1)->IsIntegralConst(0) || AsSIMD()->Op(1)->IsFPZero());
}
#endif
return false;
}
//-------------------------------------------------------------------
// IsFloatPositiveZero: returns true if this is exactly a const float value of postive zero (+0.0)
//
// Returns:
// True if this represents a const floating-point value of exactly positive zero (+0.0).
// Will return false if the value is negative zero (-0.0).
//
inline bool GenTree::IsFloatPositiveZero() const
{
if (IsCnsFltOrDbl())
{
// This implementation is almost identical to IsCnsNonZeroFltOrDbl
// but it is easier to parse out
// rather than using !IsCnsNonZeroFltOrDbl.
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue == 0;
}
return false;
}
//-------------------------------------------------------------------
// IsVectorZero: returns true if this node is a HWIntrinsic that is Vector*_get_Zero.
//
// Returns:
// True if this represents a HWIntrinsic node that is Vector*_get_Zero.
//
// TODO: We already have IsSIMDZero() and IsIntegralConstVector(0),
// however, IsSIMDZero() does not cover hardware intrinsics, and IsIntegralConstVector(0) does not cover floating
// point. In order to not risk adverse behaviour by modifying those, this function 'IsVectorZero' was introduced.
// At some point, it makes sense to normalize this logic to be a single function call rather than have several
// separate ones; preferably this one.
inline bool GenTree::IsVectorZero() const
{
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
inline bool GenTree::IsBoxedValue()
{
assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
#ifdef DEBUG
//------------------------------------------------------------------------
// IsValidCallArgument: Given an GenTree node that represents an argument
// enforce (or don't enforce) the following invariant.
//
// Arguments:
// instance method for a GenTree node
//
// Return values:
// true: the GenTree node is accepted as a valid argument
// false: the GenTree node is not accepted as a valid argumeny
//
// Notes:
// For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
//
// Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes the current ARM64 target),
// or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
// These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
inline bool GenTree::IsValidCallArgument()
{
if (OperIs(GT_FIELD_LIST))
{
#if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
return false;
#else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
// We allow this GT_FIELD_LIST as an argument
return true;
#endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
}
// We don't have either kind of list, so it satisfies the invariant.
return true;
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp1() const
{
return AsOp()->gtOp1;
}
#ifdef DEBUG
/* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
case GT_ADD:
case GT_SUB:
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
case GT_INDEX:
case GT_ASG:
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
return true;
default:
return false;
}
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp2() const
{
assert(OperIsBinary());
GenTree* op2 = AsOp()->gtOp2;
// Only allow null op2 if the node type allows it, e.g. GT_LEA.
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtGetOp2IfPresent() const
{
/* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
// This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
// This helps prefix in its analysis of code which calls gtGetOp2()
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
effectiveVal = effectiveVal->AsOp()->gtOp1;
}
else
{
return effectiveVal;
}
}
}
//-------------------------------------------------------------------------
// gtCommaAssignVal - find value being assigned to a comma wrapped assigment
//
// Returns:
// tree representing value being assigned if this tree represents a
// comma-wrapped local definition and use.
//
// original tree, of not.
//
inline GenTree* GenTree::gtCommaAssignVal()
{
GenTree* result = this;
if (OperIs(GT_COMMA))
{
GenTree* commaOp1 = AsOp()->gtOp1;
GenTree* commaOp2 = AsOp()->gtOp2;
if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_ASG))
{
GenTree* asgOp1 = commaOp1->AsOp()->gtOp1;
GenTree* asgOp2 = commaOp1->AsOp()->gtOp2;
if (asgOp1->OperIs(GT_LCL_VAR) && (asgOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
{
result = asgOp2;
}
}
}
return result;
}
//-------------------------------------------------------------------------
// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
//
// Returns:
// the original tree or its child if it was a PUTARG_TYPE.
//
// Notes:
// PUTARG_TYPE should be skipped when we are doing transformations
// that are not affected by ABI, for example: inlining, implicit byref morphing.
//
inline GenTree* GenTree::gtSkipPutArgType()
{
if (OperIs(GT_PUTARG_TYPE))
{
GenTree* res = AsUnOp()->gtGetOp1();
assert(!res->OperIs(GT_PUTARG_TYPE));
return res;
}
return this;
}
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
{
assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
return gtGetOp1();
}
return this;
}
//-----------------------------------------------------------------------------------
// IsMultiRegCall: whether a call node returns its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register returning call
//
inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
return AsCall()->HasMultiRegRetVal();
}
return false;
}
//-----------------------------------------------------------------------------------
// IsMultiRegLclVar: whether a local var node defines multiple registers
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register defining local var
//
inline bool GenTree::IsMultiRegLclVar() const
{
if (OperIsScalarLocal())
{
return AsLclVar()->IsMultiReg();
}
return false;
}
//-----------------------------------------------------------------------------------
// GetRegByIndex: Get a specific register, based on regIndex, that is produced by this node.
//
// Arguments:
// regIndex - which register to return (must be 0 for non-multireg nodes)
//
// Return Value:
// The register, if any, assigned to this index for this node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline regNumber GenTree::GetRegByIndex(int regIndex) const
{
if (regIndex == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegNumByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegNumByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegNumByIdx(regIndex);
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegNumByIdx(regIndex);
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIs(GT_HWINTRINSIC))
{
assert(regIndex == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
return AsHWIntrinsic()->GetOtherReg();
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegNumByIdx(regIndex);
}
assert(!"Invalid regIndex for GetRegFromMultiRegNode");
return REG_NA;
}
//-----------------------------------------------------------------------------------
// GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// regIndex - index of register whose type will be returned
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg node that is *not* a copy or reload (which must retrieve the
// type from its source), and 'regIndex' must be a valid index for this node.
//
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline var_types GenTree::GetRegTypeByIndex(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegType(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegType(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsHWIntrinsic())
{
assert(TypeGet() == TYP_STRUCT);
#ifdef TARGET_ARM64
if (AsHWIntrinsic()->GetSimdSize() == 16)
{
return TYP_SIMD16;
}
else
{
assert(AsHWIntrinsic()->GetSimdSize() == 8);
return TYP_SIMD8;
}
#elif defined(TARGET_XARCH)
// At this time, the only multi-reg HW intrinsics all return the type of their
// arguments. If this changes, we will need a way to record or determine this.
return gtGetOp1()->TypeGet();
#endif
}
if (OperIsScalarLocal())
{
if (TypeGet() == TYP_LONG)
{
return TYP_INT;
}
assert(TypeGet() == TYP_STRUCT);
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register type for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
// on GenTreeLclVar.
assert(!"GetRegTypeByIndex for LclVar");
}
assert(!"Invalid node type for GetRegTypeByIndex");
return TYP_UNDEF;
}
//-----------------------------------------------------------------------------------
// GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
// for this multi-reg node.
//
// Arguments:
// regIndex - which register's spill flags to return
//
// Return Value:
// The spill flags (GTF_SPILL GTF_SPILLED) for this register.
//
// Notes:
// This must be a multireg node and 'regIndex' must be a valid index for this node.
// This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
//
inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegSpillFlagByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
}
assert(!"Invalid node type for GetRegSpillFlagByIdx");
return GTF_EMPTY;
}
//-----------------------------------------------------------------------------------
// GetLastUseBit: Get the last use bit for regIndex
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// The bit to set, clear or query for the last-use of the regIndex'th value.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline GenTreeFlags GenTree::GetLastUseBit(int regIndex) const
{
assert(regIndex < 4);
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
static_assert_no_msg((1 << MULTIREG_LAST_USE_SHIFT) == GTF_VAR_MULTIREG_DEATH0);
return (GenTreeFlags)(1 << (MULTIREG_LAST_USE_SHIFT + regIndex));
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of the regIndex'th value
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// true iff this is a last use.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::IsLastUse(int regIndex) const
{
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
return (gtFlags & GetLastUseBit(regIndex)) != 0;
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of any value
//
// Return Value:
// true iff this has any last uses (i.e. at any index).
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::HasLastUse() const
{
return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
}
//-----------------------------------------------------------------------------------
// SetLastUse: Set the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::SetLastUse(int regIndex)
{
gtFlags |= GetLastUseBit(regIndex);
}
//-----------------------------------------------------------------------------------
// ClearLastUse: Clear the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::ClearLastUse(int regIndex)
{
gtFlags &= ~GetLastUseBit(regIndex);
}
//-------------------------------------------------------------------------
// IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload node.
//
inline bool GenTree::IsCopyOrReload() const
{
return (gtOper == GT_COPY || gtOper == GT_RELOAD);
}
//-----------------------------------------------------------------------------------
// IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
// call node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload of multi-reg call node.
//
inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
{
if (IsCopyOrReload())
{
return gtGetOp1()->IsMultiRegCall();
}
return false;
}
inline bool GenTree::IsCnsIntOrI() const
{
return (gtOper == GT_CNS_INT);
}
inline bool GenTree::IsIntegralConst() const
{
#ifdef TARGET_64BIT
return IsCnsIntOrI();
#else // !TARGET_64BIT
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !TARGET_64BIT
}
// Is this node an integer constant that fits in a 32-bit signed integer (INT32)
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
return IsCnsIntOrI() && AsIntCon()->FitsInI32();
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
}
inline bool GenTree::IsCnsFltOrDbl() const
{
return OperGet() == GT_CNS_DBL;
}
inline bool GenTree::IsCnsNonZeroFltOrDbl() const
{
if (OperGet() == GT_CNS_DBL)
{
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue != 0;
}
return false;
}
inline bool GenTree::IsHelperCall()
{
return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
}
inline var_types GenTree::CastFromType()
{
return this->AsCast()->CastOp()->TypeGet();
}
inline var_types& GenTree::CastToType()
{
return this->AsCast()->gtCastType;
}
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
return false;
}
/*****************************************************************************/
#ifndef HOST_64BIT
#include <poppack.h>
#endif
/*****************************************************************************/
const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
VR_INVARIANT = 0x00, // an invariant value
VR_NONE = 0x00,
VR_IND_REF = 0x01, // an object reference
VR_IND_SCL = 0x02, // a non-object reference
VR_GLB_VAR = 0x04, // a global (clsVar)
};
/*****************************************************************************/
#endif // !GENTREE_H
/*****************************************************************************/
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/hwintrinsiccodegenarm64.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef FEATURE_HW_INTRINSICS
#include "codegen.h"
// HWIntrinsicImmOpHelper: constructs the helper class instance.
// This also determines what type of "switch" table is being used (if an immediate operand is not constant) and do
// some preparation work:
//
// a) If an immediate operand can be either 0 or 1, this creates <nonZeroLabel>.
//
// b) If an immediate operand can take any value in [0, upperBound), this extract a internal register from an
// intrinsic node. The register will be later used to store computed branch target address.
//
// Arguments:
// codeGen -- an instance of CodeGen class.
// immOp -- an immediate operand of the intrinsic.
// intrin -- a hardware intrinsic tree node.
//
// Note: This class is designed to be used in the following way
// HWIntrinsicImmOpHelper helper(this, immOp, intrin);
//
// for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
// {
// -- emit an instruction for a given value of helper.ImmValue()
// }
//
// This allows to combine logic for cases when immOp->isContainedIntOrIImmed() is either true or false in a form
// of a for-loop.
//
CodeGen::HWIntrinsicImmOpHelper::HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTree* immOp, GenTreeHWIntrinsic* intrin)
: codeGen(codeGen), endLabel(nullptr), nonZeroLabel(nullptr), branchTargetReg(REG_NA)
{
assert(codeGen != nullptr);
assert(varTypeIsIntegral(immOp));
if (immOp->isContainedIntOrIImmed())
{
nonConstImmReg = REG_NA;
immValue = (int)immOp->AsIntCon()->IconValue();
immLowerBound = immValue;
immUpperBound = immValue;
}
else
{
const HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrin->GetHWIntrinsicId());
if (category == HW_Category_SIMDByIndexedElement)
{
const HWIntrinsic intrinInfo(intrin);
var_types indexedElementOpType;
if (intrinInfo.numOperands == 3)
{
indexedElementOpType = intrinInfo.op2->TypeGet();
}
else
{
assert(intrinInfo.numOperands == 4);
indexedElementOpType = intrinInfo.op3->TypeGet();
}
assert(varTypeIsSIMD(indexedElementOpType));
const unsigned int indexedElementSimdSize = genTypeSize(indexedElementOpType);
HWIntrinsicInfo::lookupImmBounds(intrin->GetHWIntrinsicId(), indexedElementSimdSize,
intrin->GetSimdBaseType(), &immLowerBound, &immUpperBound);
}
else
{
HWIntrinsicInfo::lookupImmBounds(intrin->GetHWIntrinsicId(), intrin->GetSimdSize(),
intrin->GetSimdBaseType(), &immLowerBound, &immUpperBound);
}
nonConstImmReg = immOp->GetRegNum();
immValue = immLowerBound;
if (TestImmOpZeroOrOne())
{
nonZeroLabel = codeGen->genCreateTempLabel();
}
else
{
// At the moment, this helper supports only intrinsics that correspond to one machine instruction.
// If we ever encounter an intrinsic that is either lowered into multiple instructions or
// the number of instructions that correspond to each case is unknown apriori - we can extend support to
// these by
// using the same approach as in hwintrinsicxarch.cpp - adding an additional indirection level in form of a
// branch table.
assert(!HWIntrinsicInfo::GeneratesMultipleIns(intrin->GetHWIntrinsicId()));
branchTargetReg = intrin->GetSingleTempReg();
}
endLabel = codeGen->genCreateTempLabel();
}
}
//------------------------------------------------------------------------
// EmitBegin: emits the beginning of a "switch" table, no-op if an immediate operand is constant.
//
// Note: The function is called at the beginning of code generation and emits
// a) If an immediate operand can be either 0 or 1
//
// cbnz <nonZeroLabel>, nonConstImmReg
//
// b) If an immediate operand can take any value in [0, upperBound) range
//
// adr branchTargetReg, <beginLabel>
// add branchTargetReg, branchTargetReg, nonConstImmReg, lsl #3
// br branchTargetReg
//
// When an immediate operand is non constant this also defines <beginLabel> right after the emitted code.
//
void CodeGen::HWIntrinsicImmOpHelper::EmitBegin()
{
if (NonConstImmOp())
{
BasicBlock* beginLabel = codeGen->genCreateTempLabel();
if (TestImmOpZeroOrOne())
{
GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, nonZeroLabel, nonConstImmReg);
}
else
{
// Here we assume that each case consists of one arm64 instruction followed by "b endLabel".
// Since an arm64 instruction is 4 bytes, we branch to AddressOf(beginLabel) + (nonConstImmReg << 3).
GetEmitter()->emitIns_R_L(INS_adr, EA_8BYTE, beginLabel, branchTargetReg);
GetEmitter()->emitIns_R_R_R_I(INS_add, EA_8BYTE, branchTargetReg, branchTargetReg, nonConstImmReg, 3,
INS_OPTS_LSL);
// If the lower bound is non zero we need to adjust the branch target value by subtracting
// (immLowerBound << 3).
if (immLowerBound != 0)
{
GetEmitter()->emitIns_R_R_I(INS_sub, EA_8BYTE, branchTargetReg, branchTargetReg,
((ssize_t)immLowerBound << 3));
}
GetEmitter()->emitIns_R(INS_br, EA_8BYTE, branchTargetReg);
}
codeGen->genDefineInlineTempLabel(beginLabel);
}
}
//------------------------------------------------------------------------
// EmitCaseEnd: emits the end of a "case", no-op if an immediate operand is constant.
//
// Note: The function is called at the end of each "case" (i.e. after an instruction has been emitted for a given
// immediate value ImmValue())
// and emits
//
// b <endLabel>
//
// After the last "case" this defines <endLabel>.
//
// If an immediate operand is either 0 or 1 it also defines <nonZeroLabel> after the first "case".
//
void CodeGen::HWIntrinsicImmOpHelper::EmitCaseEnd()
{
assert(!Done());
if (NonConstImmOp())
{
const bool isLastCase = (immValue == immUpperBound);
if (isLastCase)
{
codeGen->genDefineInlineTempLabel(endLabel);
}
else
{
GetEmitter()->emitIns_J(INS_b, endLabel);
if (TestImmOpZeroOrOne())
{
codeGen->genDefineInlineTempLabel(nonZeroLabel);
}
else
{
BasicBlock* tempLabel = codeGen->genCreateTempLabel();
codeGen->genDefineInlineTempLabel(tempLabel);
}
}
}
immValue++;
}
//------------------------------------------------------------------------
// genHWIntrinsic: Generates the code for a given hardware intrinsic node.
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
{
const HWIntrinsic intrin(node);
// We need to validate that other phases of the compiler haven't introduced unsupported intrinsics
assert(compiler->compIsaSupportedDebugOnly(HWIntrinsicInfo::lookupIsa(intrin.id)));
regNumber targetReg = node->GetRegNum();
regNumber op1Reg = REG_NA;
regNumber op2Reg = REG_NA;
regNumber op3Reg = REG_NA;
regNumber op4Reg = REG_NA;
switch (intrin.numOperands)
{
case 4:
assert(intrin.op4 != nullptr);
op4Reg = intrin.op4->GetRegNum();
FALLTHROUGH;
case 3:
assert(intrin.op3 != nullptr);
op3Reg = intrin.op3->GetRegNum();
FALLTHROUGH;
case 2:
assert(intrin.op2 != nullptr);
op2Reg = intrin.op2->GetRegNum();
FALLTHROUGH;
case 1:
assert(intrin.op1 != nullptr);
op1Reg = intrin.op1->GetRegNum();
break;
case 0:
break;
default:
unreached();
}
emitAttr emitSize;
insOpts opt;
if (HWIntrinsicInfo::SIMDScalar(intrin.id))
{
emitSize = emitTypeSize(intrin.baseType);
opt = INS_OPTS_NONE;
}
else if (intrin.category == HW_Category_Scalar)
{
emitSize = emitActualTypeSize(intrin.baseType);
opt = INS_OPTS_NONE;
}
else if (intrin.category == HW_Category_Special)
{
assert(intrin.id == NI_ArmBase_Yield);
emitSize = EA_UNKNOWN;
opt = INS_OPTS_NONE;
}
else
{
emitSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
opt = genGetSimdInsOpt(emitSize, intrin.baseType);
}
const bool isRMW = node->isRMWHWIntrinsic(compiler);
const bool hasImmediateOperand = HWIntrinsicInfo::HasImmediateOperand(intrin.id);
genConsumeMultiOpOperands(node);
if (intrin.IsTableDriven())
{
const instruction ins = HWIntrinsicInfo::lookupIns(intrin.id, intrin.baseType);
assert(ins != INS_invalid);
if (intrin.category == HW_Category_SIMDByIndexedElement)
{
if (hasImmediateOperand)
{
if (isRMW)
{
assert(targetReg != op2Reg);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
HWIntrinsicImmOpHelper helper(this, intrin.op4, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op2Reg, op3Reg, elementIndex, opt);
}
}
else
{
HWIntrinsicImmOpHelper helper(this, intrin.op3, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op1Reg, op2Reg, elementIndex, opt);
}
}
}
else
{
if (isRMW)
{
assert(targetReg != op2Reg);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op2Reg, op3Reg, 0, opt);
}
else
{
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op1Reg, op2Reg, 0, opt);
}
}
}
else if ((intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate))
{
assert(hasImmediateOperand);
if (isRMW)
{
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
HWIntrinsicImmOpHelper helper(this, intrin.op3, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int shiftAmount = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op2Reg, shiftAmount, opt);
}
}
else
{
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int shiftAmount = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op1Reg, shiftAmount, opt);
}
}
}
else
{
assert(!hasImmediateOperand);
switch (intrin.numOperands)
{
case 1:
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg, opt);
break;
case 2:
// This handles optimizations for instructions that have
// an implicit 'zero' vector of what would be the second operand.
if (HWIntrinsicInfo::SupportsContainment(intrin.id) && intrin.op2->isContained() &&
intrin.op2->IsVectorZero())
{
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg, opt);
}
else if (isRMW)
{
assert(targetReg != op2Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op2Reg, opt);
}
else
{
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, opt);
}
break;
case 3:
assert(isRMW);
assert(targetReg != op2Reg);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op2Reg, op3Reg, opt);
break;
default:
unreached();
}
}
}
else
{
instruction ins = INS_invalid;
switch (intrin.id)
{
case NI_AdvSimd_AddWideningLower:
assert(varTypeIsIntegral(intrin.baseType));
if (intrin.op1->TypeGet() == TYP_SIMD8)
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_uaddl : INS_saddl;
}
else
{
assert(intrin.op1->TypeGet() == TYP_SIMD16);
ins = varTypeIsUnsigned(intrin.baseType) ? INS_uaddw : INS_saddw;
}
break;
case NI_AdvSimd_SubtractWideningLower:
assert(varTypeIsIntegral(intrin.baseType));
if (intrin.op1->TypeGet() == TYP_SIMD8)
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usubl : INS_ssubl;
}
else
{
assert(intrin.op1->TypeGet() == TYP_SIMD16);
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usubw : INS_ssubw;
}
break;
case NI_AdvSimd_AddWideningUpper:
assert(varTypeIsIntegral(intrin.baseType));
if (node->GetAuxiliaryType() == intrin.baseType)
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_uaddl2 : INS_saddl2;
}
else
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_uaddw2 : INS_saddw2;
}
break;
case NI_AdvSimd_SubtractWideningUpper:
assert(varTypeIsIntegral(intrin.baseType));
if (node->GetAuxiliaryType() == intrin.baseType)
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usubl2 : INS_ssubl2;
}
else
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usubw2 : INS_ssubw2;
}
break;
case NI_ArmBase_Yield:
{
ins = INS_yield;
break;
}
default:
ins = HWIntrinsicInfo::lookupIns(intrin.id, intrin.baseType);
break;
}
assert(ins != INS_invalid);
switch (intrin.id)
{
case NI_AdvSimd_BitwiseSelect:
// Even though BitwiseSelect is an RMW intrinsic per se, we don't want to mark it as such
// since we can handle all possible allocation decisions for targetReg.
assert(!isRMW);
if (targetReg == op1Reg)
{
GetEmitter()->emitIns_R_R_R(INS_bsl, emitSize, targetReg, op2Reg, op3Reg, opt);
}
else if (targetReg == op2Reg)
{
GetEmitter()->emitIns_R_R_R(INS_bif, emitSize, targetReg, op3Reg, op1Reg, opt);
}
else if (targetReg == op3Reg)
{
GetEmitter()->emitIns_R_R_R(INS_bit, emitSize, targetReg, op2Reg, op1Reg, opt);
}
else
{
GetEmitter()->emitIns_Mov(INS_mov, emitSize, targetReg, op1Reg, /* canSkip */ false);
GetEmitter()->emitIns_R_R_R(INS_bsl, emitSize, targetReg, op2Reg, op3Reg, opt);
}
break;
case NI_Crc32_ComputeCrc32:
case NI_Crc32_ComputeCrc32C:
case NI_Crc32_Arm64_ComputeCrc32:
case NI_Crc32_Arm64_ComputeCrc32C:
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, opt);
break;
case NI_AdvSimd_AbsoluteCompareLessThan:
case NI_AdvSimd_AbsoluteCompareLessThanOrEqual:
case NI_AdvSimd_CompareLessThan:
case NI_AdvSimd_CompareLessThanOrEqual:
case NI_AdvSimd_Arm64_AbsoluteCompareLessThan:
case NI_AdvSimd_Arm64_AbsoluteCompareLessThanScalar:
case NI_AdvSimd_Arm64_AbsoluteCompareLessThanOrEqual:
case NI_AdvSimd_Arm64_AbsoluteCompareLessThanOrEqualScalar:
case NI_AdvSimd_Arm64_CompareLessThan:
case NI_AdvSimd_Arm64_CompareLessThanScalar:
case NI_AdvSimd_Arm64_CompareLessThanOrEqual:
case NI_AdvSimd_Arm64_CompareLessThanOrEqualScalar:
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op2Reg, op1Reg, opt);
break;
case NI_AdvSimd_FusedMultiplyAddScalar:
case NI_AdvSimd_FusedMultiplyAddNegatedScalar:
case NI_AdvSimd_FusedMultiplySubtractNegatedScalar:
case NI_AdvSimd_FusedMultiplySubtractScalar:
assert(opt == INS_OPTS_NONE);
GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op2Reg, op3Reg, op1Reg);
break;
case NI_AdvSimd_DuplicateSelectedScalarToVector64:
case NI_AdvSimd_DuplicateSelectedScalarToVector128:
case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128:
{
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
// Prior to codegen, the emitSize is based on node->GetSimdSize() which
// tracks the size of the first operand and is used to tell if the index
// is in range. However, when actually emitting it needs to be the size
// of the return and the size of the operand is interpreted based on the
// index value.
assert(
GetEmitter()->isValidVectorIndex(emitSize, GetEmitter()->optGetElemsize(opt), helper.ImmValue()));
emitSize = emitActualTypeSize(node->gtType);
opt = genGetSimdInsOpt(emitSize, intrin.baseType);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
assert(opt != INS_OPTS_NONE);
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op1Reg, elementIndex, opt);
}
break;
}
case NI_AdvSimd_Extract:
{
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg, elementIndex,
INS_OPTS_NONE);
}
}
break;
case NI_AdvSimd_ExtractVector64:
case NI_AdvSimd_ExtractVector128:
{
opt = (intrin.id == NI_AdvSimd_ExtractVector64) ? INS_OPTS_8B : INS_OPTS_16B;
HWIntrinsicImmOpHelper helper(this, intrin.op3, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
const int byteIndex = genTypeSize(intrin.baseType) * elementIndex;
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op1Reg, op2Reg, byteIndex, opt);
}
}
break;
case NI_AdvSimd_Insert:
assert(isRMW);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
if (intrin.op3->isContainedFltOrDblImmed())
{
assert(intrin.op2->isContainedIntOrIImmed());
assert(intrin.op2->AsIntCon()->gtIconVal == 0);
const double dataValue = intrin.op3->AsDblCon()->gtDconVal;
GetEmitter()->emitIns_R_F(INS_fmov, emitSize, targetReg, dataValue, opt);
}
else
{
assert(targetReg != op3Reg);
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
if (varTypeIsFloating(intrin.baseType))
{
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I_I(ins, emitSize, targetReg, op3Reg, elementIndex, 0, opt);
}
}
else
{
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op3Reg, elementIndex, opt);
}
}
}
break;
case NI_AdvSimd_InsertScalar:
{
assert(isRMW);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I_I(ins, emitSize, targetReg, op3Reg, elementIndex, 0, opt);
}
}
break;
case NI_AdvSimd_Arm64_InsertSelectedScalar:
{
assert(isRMW);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
const int resultIndex = (int)intrin.op2->AsIntCon()->gtIconVal;
const int valueIndex = (int)intrin.op4->AsIntCon()->gtIconVal;
GetEmitter()->emitIns_R_R_I_I(ins, emitSize, targetReg, op3Reg, resultIndex, valueIndex, opt);
}
break;
case NI_AdvSimd_LoadAndInsertScalar:
{
assert(isRMW);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op3Reg, elementIndex);
}
}
break;
case NI_AdvSimd_Arm64_LoadPairVector128:
case NI_AdvSimd_Arm64_LoadPairVector128NonTemporal:
case NI_AdvSimd_Arm64_LoadPairVector64:
case NI_AdvSimd_Arm64_LoadPairVector64NonTemporal:
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, node->GetOtherReg(), op1Reg);
break;
case NI_AdvSimd_Arm64_LoadPairScalarVector64:
case NI_AdvSimd_Arm64_LoadPairScalarVector64NonTemporal:
GetEmitter()->emitIns_R_R_R(ins, emitTypeSize(intrin.baseType), targetReg, node->GetOtherReg(), op1Reg);
break;
case NI_AdvSimd_Store:
GetEmitter()->emitIns_R_R(ins, emitSize, op2Reg, op1Reg, opt);
break;
case NI_AdvSimd_StoreSelectedScalar:
{
HWIntrinsicImmOpHelper helper(this, intrin.op3, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, op2Reg, op1Reg, elementIndex, opt);
}
}
break;
case NI_AdvSimd_Arm64_StorePair:
case NI_AdvSimd_Arm64_StorePairNonTemporal:
GetEmitter()->emitIns_R_R_R(ins, emitSize, op2Reg, op3Reg, op1Reg);
break;
case NI_AdvSimd_Arm64_StorePairScalar:
case NI_AdvSimd_Arm64_StorePairScalarNonTemporal:
GetEmitter()->emitIns_R_R_R(ins, emitTypeSize(intrin.baseType), op2Reg, op3Reg, op1Reg);
break;
case NI_Vector64_CreateScalarUnsafe:
case NI_Vector128_CreateScalarUnsafe:
if (intrin.op1->isContainedFltOrDblImmed())
{
// fmov reg, #imm8
const double dataValue = intrin.op1->AsDblCon()->gtDconVal;
GetEmitter()->emitIns_R_F(ins, emitTypeSize(intrin.baseType), targetReg, dataValue, INS_OPTS_NONE);
}
else if (varTypeIsFloating(intrin.baseType))
{
// fmov reg1, reg2
assert(GetEmitter()->IsMovInstruction(ins));
GetEmitter()->emitIns_Mov(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg,
/* canSkip */ false, INS_OPTS_NONE);
}
else
{
if (intrin.op1->isContainedIntOrIImmed())
{
// movi/movni reg, #imm8
const ssize_t dataValue = intrin.op1->AsIntCon()->gtIconVal;
GetEmitter()->emitIns_R_I(INS_movi, emitSize, targetReg, dataValue, opt);
}
else
{
// ins reg1[0], reg2
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg, 0,
INS_OPTS_NONE);
}
}
break;
case NI_AdvSimd_AddWideningLower:
case NI_AdvSimd_AddWideningUpper:
case NI_AdvSimd_SubtractWideningLower:
case NI_AdvSimd_SubtractWideningUpper:
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, opt);
break;
case NI_AdvSimd_Arm64_AddSaturateScalar:
if (varTypeIsUnsigned(node->GetAuxiliaryType()) != varTypeIsUnsigned(intrin.baseType))
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usqadd : INS_suqadd;
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op2Reg, opt);
}
else
{
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, opt);
}
break;
case NI_ArmBase_Yield:
{
GetEmitter()->emitIns(ins);
break;
}
// mvni doesn't support the range of element types, so hard code the 'opts' value.
case NI_Vector64_get_Zero:
case NI_Vector64_get_AllBitsSet:
GetEmitter()->emitIns_R_I(ins, emitSize, targetReg, 0, INS_OPTS_2S);
break;
case NI_Vector128_get_Zero:
case NI_Vector128_get_AllBitsSet:
GetEmitter()->emitIns_R_I(ins, emitSize, targetReg, 0, INS_OPTS_4S);
break;
case NI_AdvSimd_DuplicateToVector64:
case NI_AdvSimd_DuplicateToVector128:
case NI_AdvSimd_Arm64_DuplicateToVector64:
case NI_AdvSimd_Arm64_DuplicateToVector128:
{
if (varTypeIsFloating(intrin.baseType))
{
if (intrin.op1->isContainedFltOrDblImmed())
{
const double dataValue = intrin.op1->AsDblCon()->gtDconVal;
GetEmitter()->emitIns_R_F(INS_fmov, emitSize, targetReg, dataValue, opt);
}
else if (intrin.id == NI_AdvSimd_Arm64_DuplicateToVector64)
{
assert(intrin.baseType == TYP_DOUBLE);
assert(GetEmitter()->IsMovInstruction(ins));
GetEmitter()->emitIns_Mov(ins, emitSize, targetReg, op1Reg, /* canSkip */ false, opt);
}
else
{
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op1Reg, 0, opt);
}
}
else if (intrin.op1->isContainedIntOrIImmed())
{
const ssize_t dataValue = intrin.op1->AsIntCon()->gtIconVal;
GetEmitter()->emitIns_R_I(INS_movi, emitSize, targetReg, dataValue, opt);
}
else if (GetEmitter()->IsMovInstruction(ins))
{
GetEmitter()->emitIns_Mov(ins, emitSize, targetReg, op1Reg, /* canSkip */ false, opt);
}
else
{
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg, opt);
}
}
break;
case NI_Vector64_ToVector128:
GetEmitter()->emitIns_Mov(ins, emitSize, targetReg, op1Reg, /* canSkip */ false);
break;
case NI_Vector64_ToVector128Unsafe:
case NI_Vector128_GetLower:
GetEmitter()->emitIns_Mov(ins, emitSize, targetReg, op1Reg, /* canSkip */ true);
break;
case NI_Vector64_GetElement:
case NI_Vector128_GetElement:
{
assert(intrin.numOperands == 2);
var_types simdType = Compiler::getSIMDTypeForSize(node->GetSimdSize());
if (simdType == TYP_SIMD12)
{
// op1 of TYP_SIMD12 should be considered as TYP_SIMD16
simdType = TYP_SIMD16;
}
if (!intrin.op2->OperIsConst())
{
assert(!intrin.op2->isContained());
emitAttr baseTypeSize = emitTypeSize(intrin.baseType);
unsigned baseTypeScale = genLog2(EA_SIZE_IN_BYTES(baseTypeSize));
regNumber baseReg;
regNumber indexReg = op2Reg;
// Optimize the case of op1 is in memory and trying to access ith element.
if (!intrin.op1->isUsedFromReg())
{
assert(intrin.op1->isContained());
if (intrin.op1->OperIsLocal())
{
unsigned varNum = intrin.op1->AsLclVarCommon()->GetLclNum();
baseReg = node->ExtractTempReg();
// Load the address of varNum
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, baseReg, varNum, 0);
}
else
{
// Require GT_IND addr to be not contained.
assert(intrin.op1->OperIs(GT_IND));
GenTree* addr = intrin.op1->AsIndir()->Addr();
assert(!addr->isContained());
baseReg = addr->GetRegNum();
}
}
else
{
unsigned simdInitTempVarNum = compiler->lvaSIMDInitTempVarNum;
noway_assert(simdInitTempVarNum != BAD_VAR_NUM);
baseReg = node->ExtractTempReg();
// Load the address of simdInitTempVarNum
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, baseReg, simdInitTempVarNum, 0);
// Store the vector to simdInitTempVarNum
GetEmitter()->emitIns_R_R(INS_str, emitTypeSize(simdType), op1Reg, baseReg);
}
assert(genIsValidIntReg(indexReg));
assert(genIsValidIntReg(baseReg));
assert(baseReg != indexReg);
// Load item at baseReg[index]
GetEmitter()->emitIns_R_R_R_Ext(ins_Load(intrin.baseType), baseTypeSize, targetReg, baseReg,
indexReg, INS_OPTS_LSL, baseTypeScale);
}
else if (!GetEmitter()->isValidVectorIndex(emitTypeSize(simdType), emitTypeSize(intrin.baseType),
intrin.op2->AsIntCon()->IconValue()))
{
// We only need to generate code for the get if the index is valid
// If the index is invalid, previously generated for the range check will throw
}
else if (!intrin.op1->isUsedFromReg())
{
assert(intrin.op1->isContained());
assert(intrin.op2->IsCnsIntOrI());
int offset = (int)intrin.op2->AsIntCon()->IconValue() * genTypeSize(intrin.baseType);
instruction ins = ins_Load(intrin.baseType);
assert(!intrin.op1->isUsedFromReg());
if (intrin.op1->OperIsLocal())
{
unsigned varNum = intrin.op1->AsLclVarCommon()->GetLclNum();
GetEmitter()->emitIns_R_S(ins, emitActualTypeSize(intrin.baseType), targetReg, varNum, offset);
}
else
{
assert(intrin.op1->OperIs(GT_IND));
GenTree* addr = intrin.op1->AsIndir()->Addr();
assert(!addr->isContained());
regNumber baseReg = addr->GetRegNum();
// ldr targetReg, [baseReg, #offset]
GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(intrin.baseType), targetReg, baseReg,
offset);
}
}
else
{
assert(intrin.op2->IsCnsIntOrI());
ssize_t indexValue = intrin.op2->AsIntCon()->IconValue();
// no-op if vector is float/double, targetReg == op1Reg and fetching for 0th index.
if ((varTypeIsFloating(intrin.baseType) && (targetReg == op1Reg) && (indexValue == 0)))
{
break;
}
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg, indexValue,
INS_OPTS_NONE);
}
break;
}
case NI_Vector64_ToScalar:
case NI_Vector128_ToScalar:
{
const ssize_t indexValue = 0;
// no-op if vector is float/double, targetReg == op1Reg and fetching for 0th index.
if ((varTypeIsFloating(intrin.baseType) && (targetReg == op1Reg) && (indexValue == 0)))
{
break;
}
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg, indexValue,
INS_OPTS_NONE);
}
break;
case NI_AdvSimd_ReverseElement16:
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg,
(emitSize == EA_8BYTE) ? INS_OPTS_4H : INS_OPTS_8H);
break;
case NI_AdvSimd_ReverseElement32:
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg,
(emitSize == EA_8BYTE) ? INS_OPTS_2S : INS_OPTS_4S);
break;
case NI_AdvSimd_ReverseElement8:
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg,
(emitSize == EA_8BYTE) ? INS_OPTS_8B : INS_OPTS_16B);
break;
default:
unreached();
}
}
genProduceReg(node);
}
#endif // FEATURE_HW_INTRINSICS
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef FEATURE_HW_INTRINSICS
#include "codegen.h"
// HWIntrinsicImmOpHelper: constructs the helper class instance.
// This also determines what type of "switch" table is being used (if an immediate operand is not constant) and do
// some preparation work:
//
// a) If an immediate operand can be either 0 or 1, this creates <nonZeroLabel>.
//
// b) If an immediate operand can take any value in [0, upperBound), this extract a internal register from an
// intrinsic node. The register will be later used to store computed branch target address.
//
// Arguments:
// codeGen -- an instance of CodeGen class.
// immOp -- an immediate operand of the intrinsic.
// intrin -- a hardware intrinsic tree node.
//
// Note: This class is designed to be used in the following way
// HWIntrinsicImmOpHelper helper(this, immOp, intrin);
//
// for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
// {
// -- emit an instruction for a given value of helper.ImmValue()
// }
//
// This allows to combine logic for cases when immOp->isContainedIntOrIImmed() is either true or false in a form
// of a for-loop.
//
CodeGen::HWIntrinsicImmOpHelper::HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTree* immOp, GenTreeHWIntrinsic* intrin)
: codeGen(codeGen), endLabel(nullptr), nonZeroLabel(nullptr), branchTargetReg(REG_NA)
{
assert(codeGen != nullptr);
assert(varTypeIsIntegral(immOp));
if (immOp->isContainedIntOrIImmed())
{
nonConstImmReg = REG_NA;
immValue = (int)immOp->AsIntCon()->IconValue();
immLowerBound = immValue;
immUpperBound = immValue;
}
else
{
const HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrin->GetHWIntrinsicId());
if (category == HW_Category_SIMDByIndexedElement)
{
const HWIntrinsic intrinInfo(intrin);
var_types indexedElementOpType;
if (intrinInfo.numOperands == 3)
{
indexedElementOpType = intrinInfo.op2->TypeGet();
}
else
{
assert(intrinInfo.numOperands == 4);
indexedElementOpType = intrinInfo.op3->TypeGet();
}
assert(varTypeIsSIMD(indexedElementOpType));
const unsigned int indexedElementSimdSize = genTypeSize(indexedElementOpType);
HWIntrinsicInfo::lookupImmBounds(intrin->GetHWIntrinsicId(), indexedElementSimdSize,
intrin->GetSimdBaseType(), &immLowerBound, &immUpperBound);
}
else
{
HWIntrinsicInfo::lookupImmBounds(intrin->GetHWIntrinsicId(), intrin->GetSimdSize(),
intrin->GetSimdBaseType(), &immLowerBound, &immUpperBound);
}
nonConstImmReg = immOp->GetRegNum();
immValue = immLowerBound;
if (TestImmOpZeroOrOne())
{
nonZeroLabel = codeGen->genCreateTempLabel();
}
else
{
// At the moment, this helper supports only intrinsics that correspond to one machine instruction.
// If we ever encounter an intrinsic that is either lowered into multiple instructions or
// the number of instructions that correspond to each case is unknown apriori - we can extend support to
// these by
// using the same approach as in hwintrinsicxarch.cpp - adding an additional indirection level in form of a
// branch table.
assert(!HWIntrinsicInfo::GeneratesMultipleIns(intrin->GetHWIntrinsicId()));
branchTargetReg = intrin->GetSingleTempReg();
}
endLabel = codeGen->genCreateTempLabel();
}
}
//------------------------------------------------------------------------
// EmitBegin: emits the beginning of a "switch" table, no-op if an immediate operand is constant.
//
// Note: The function is called at the beginning of code generation and emits
// a) If an immediate operand can be either 0 or 1
//
// cbnz <nonZeroLabel>, nonConstImmReg
//
// b) If an immediate operand can take any value in [0, upperBound) range
//
// adr branchTargetReg, <beginLabel>
// add branchTargetReg, branchTargetReg, nonConstImmReg, lsl #3
// br branchTargetReg
//
// When an immediate operand is non constant this also defines <beginLabel> right after the emitted code.
//
void CodeGen::HWIntrinsicImmOpHelper::EmitBegin()
{
if (NonConstImmOp())
{
BasicBlock* beginLabel = codeGen->genCreateTempLabel();
if (TestImmOpZeroOrOne())
{
GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, nonZeroLabel, nonConstImmReg);
}
else
{
// Here we assume that each case consists of one arm64 instruction followed by "b endLabel".
// Since an arm64 instruction is 4 bytes, we branch to AddressOf(beginLabel) + (nonConstImmReg << 3).
GetEmitter()->emitIns_R_L(INS_adr, EA_8BYTE, beginLabel, branchTargetReg);
GetEmitter()->emitIns_R_R_R_I(INS_add, EA_8BYTE, branchTargetReg, branchTargetReg, nonConstImmReg, 3,
INS_OPTS_LSL);
// If the lower bound is non zero we need to adjust the branch target value by subtracting
// (immLowerBound << 3).
if (immLowerBound != 0)
{
GetEmitter()->emitIns_R_R_I(INS_sub, EA_8BYTE, branchTargetReg, branchTargetReg,
((ssize_t)immLowerBound << 3));
}
GetEmitter()->emitIns_R(INS_br, EA_8BYTE, branchTargetReg);
}
codeGen->genDefineInlineTempLabel(beginLabel);
}
}
//------------------------------------------------------------------------
// EmitCaseEnd: emits the end of a "case", no-op if an immediate operand is constant.
//
// Note: The function is called at the end of each "case" (i.e. after an instruction has been emitted for a given
// immediate value ImmValue())
// and emits
//
// b <endLabel>
//
// After the last "case" this defines <endLabel>.
//
// If an immediate operand is either 0 or 1 it also defines <nonZeroLabel> after the first "case".
//
void CodeGen::HWIntrinsicImmOpHelper::EmitCaseEnd()
{
assert(!Done());
if (NonConstImmOp())
{
const bool isLastCase = (immValue == immUpperBound);
if (isLastCase)
{
codeGen->genDefineInlineTempLabel(endLabel);
}
else
{
GetEmitter()->emitIns_J(INS_b, endLabel);
if (TestImmOpZeroOrOne())
{
codeGen->genDefineInlineTempLabel(nonZeroLabel);
}
else
{
BasicBlock* tempLabel = codeGen->genCreateTempLabel();
codeGen->genDefineInlineTempLabel(tempLabel);
}
}
}
immValue++;
}
//------------------------------------------------------------------------
// genHWIntrinsic: Generates the code for a given hardware intrinsic node.
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
{
const HWIntrinsic intrin(node);
// We need to validate that other phases of the compiler haven't introduced unsupported intrinsics
assert(compiler->compIsaSupportedDebugOnly(HWIntrinsicInfo::lookupIsa(intrin.id)));
regNumber targetReg = node->GetRegNum();
regNumber op1Reg = REG_NA;
regNumber op2Reg = REG_NA;
regNumber op3Reg = REG_NA;
regNumber op4Reg = REG_NA;
switch (intrin.numOperands)
{
case 4:
assert(intrin.op4 != nullptr);
op4Reg = intrin.op4->GetRegNum();
FALLTHROUGH;
case 3:
assert(intrin.op3 != nullptr);
op3Reg = intrin.op3->GetRegNum();
FALLTHROUGH;
case 2:
assert(intrin.op2 != nullptr);
op2Reg = intrin.op2->GetRegNum();
FALLTHROUGH;
case 1:
assert(intrin.op1 != nullptr);
op1Reg = intrin.op1->GetRegNum();
break;
case 0:
break;
default:
unreached();
}
emitAttr emitSize;
insOpts opt;
if (HWIntrinsicInfo::SIMDScalar(intrin.id))
{
emitSize = emitTypeSize(intrin.baseType);
opt = INS_OPTS_NONE;
}
else if (intrin.category == HW_Category_Scalar)
{
emitSize = emitActualTypeSize(intrin.baseType);
opt = INS_OPTS_NONE;
}
else if (intrin.category == HW_Category_Special)
{
assert(intrin.id == NI_ArmBase_Yield);
emitSize = EA_UNKNOWN;
opt = INS_OPTS_NONE;
}
else
{
emitSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
opt = genGetSimdInsOpt(emitSize, intrin.baseType);
}
const bool isRMW = node->isRMWHWIntrinsic(compiler);
const bool hasImmediateOperand = HWIntrinsicInfo::HasImmediateOperand(intrin.id);
genConsumeMultiOpOperands(node);
if (intrin.IsTableDriven())
{
const instruction ins = HWIntrinsicInfo::lookupIns(intrin.id, intrin.baseType);
assert(ins != INS_invalid);
if (intrin.category == HW_Category_SIMDByIndexedElement)
{
if (hasImmediateOperand)
{
if (isRMW)
{
assert(targetReg != op2Reg);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
HWIntrinsicImmOpHelper helper(this, intrin.op4, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op2Reg, op3Reg, elementIndex, opt);
}
}
else
{
HWIntrinsicImmOpHelper helper(this, intrin.op3, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op1Reg, op2Reg, elementIndex, opt);
}
}
}
else
{
if (isRMW)
{
assert(targetReg != op2Reg);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op2Reg, op3Reg, 0, opt);
}
else
{
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op1Reg, op2Reg, 0, opt);
}
}
}
else if ((intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate))
{
assert(hasImmediateOperand);
if (isRMW)
{
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
HWIntrinsicImmOpHelper helper(this, intrin.op3, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int shiftAmount = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op2Reg, shiftAmount, opt);
}
}
else
{
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int shiftAmount = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op1Reg, shiftAmount, opt);
}
}
}
else
{
assert(!hasImmediateOperand);
switch (intrin.numOperands)
{
case 1:
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg, opt);
break;
case 2:
// This handles optimizations for instructions that have
// an implicit 'zero' vector of what would be the second operand.
if (HWIntrinsicInfo::SupportsContainment(intrin.id) && intrin.op2->isContained() &&
intrin.op2->IsVectorZero())
{
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg, opt);
}
else if (isRMW)
{
assert(targetReg != op2Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op2Reg, opt);
}
else
{
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, opt);
}
break;
case 3:
assert(isRMW);
assert(targetReg != op2Reg);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op2Reg, op3Reg, opt);
break;
default:
unreached();
}
}
}
else
{
instruction ins = INS_invalid;
switch (intrin.id)
{
case NI_AdvSimd_AddWideningLower:
assert(varTypeIsIntegral(intrin.baseType));
if (intrin.op1->TypeGet() == TYP_SIMD8)
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_uaddl : INS_saddl;
}
else
{
assert(intrin.op1->TypeGet() == TYP_SIMD16);
ins = varTypeIsUnsigned(intrin.baseType) ? INS_uaddw : INS_saddw;
}
break;
case NI_AdvSimd_SubtractWideningLower:
assert(varTypeIsIntegral(intrin.baseType));
if (intrin.op1->TypeGet() == TYP_SIMD8)
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usubl : INS_ssubl;
}
else
{
assert(intrin.op1->TypeGet() == TYP_SIMD16);
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usubw : INS_ssubw;
}
break;
case NI_AdvSimd_AddWideningUpper:
assert(varTypeIsIntegral(intrin.baseType));
if (node->GetAuxiliaryType() == intrin.baseType)
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_uaddl2 : INS_saddl2;
}
else
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_uaddw2 : INS_saddw2;
}
break;
case NI_AdvSimd_SubtractWideningUpper:
assert(varTypeIsIntegral(intrin.baseType));
if (node->GetAuxiliaryType() == intrin.baseType)
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usubl2 : INS_ssubl2;
}
else
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usubw2 : INS_ssubw2;
}
break;
case NI_ArmBase_Yield:
{
ins = INS_yield;
break;
}
default:
ins = HWIntrinsicInfo::lookupIns(intrin.id, intrin.baseType);
break;
}
assert(ins != INS_invalid);
switch (intrin.id)
{
case NI_AdvSimd_BitwiseSelect:
// Even though BitwiseSelect is an RMW intrinsic per se, we don't want to mark it as such
// since we can handle all possible allocation decisions for targetReg.
assert(!isRMW);
if (targetReg == op1Reg)
{
GetEmitter()->emitIns_R_R_R(INS_bsl, emitSize, targetReg, op2Reg, op3Reg, opt);
}
else if (targetReg == op2Reg)
{
GetEmitter()->emitIns_R_R_R(INS_bif, emitSize, targetReg, op3Reg, op1Reg, opt);
}
else if (targetReg == op3Reg)
{
GetEmitter()->emitIns_R_R_R(INS_bit, emitSize, targetReg, op2Reg, op1Reg, opt);
}
else
{
GetEmitter()->emitIns_Mov(INS_mov, emitSize, targetReg, op1Reg, /* canSkip */ false);
GetEmitter()->emitIns_R_R_R(INS_bsl, emitSize, targetReg, op2Reg, op3Reg, opt);
}
break;
case NI_Crc32_ComputeCrc32:
case NI_Crc32_ComputeCrc32C:
case NI_Crc32_Arm64_ComputeCrc32:
case NI_Crc32_Arm64_ComputeCrc32C:
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, opt);
break;
case NI_AdvSimd_AbsoluteCompareLessThan:
case NI_AdvSimd_AbsoluteCompareLessThanOrEqual:
case NI_AdvSimd_CompareLessThan:
case NI_AdvSimd_CompareLessThanOrEqual:
case NI_AdvSimd_Arm64_AbsoluteCompareLessThan:
case NI_AdvSimd_Arm64_AbsoluteCompareLessThanScalar:
case NI_AdvSimd_Arm64_AbsoluteCompareLessThanOrEqual:
case NI_AdvSimd_Arm64_AbsoluteCompareLessThanOrEqualScalar:
case NI_AdvSimd_Arm64_CompareLessThan:
case NI_AdvSimd_Arm64_CompareLessThanScalar:
case NI_AdvSimd_Arm64_CompareLessThanOrEqual:
case NI_AdvSimd_Arm64_CompareLessThanOrEqualScalar:
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op2Reg, op1Reg, opt);
break;
case NI_AdvSimd_FusedMultiplyAddScalar:
case NI_AdvSimd_FusedMultiplyAddNegatedScalar:
case NI_AdvSimd_FusedMultiplySubtractNegatedScalar:
case NI_AdvSimd_FusedMultiplySubtractScalar:
assert(opt == INS_OPTS_NONE);
GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op2Reg, op3Reg, op1Reg);
break;
case NI_AdvSimd_DuplicateSelectedScalarToVector64:
case NI_AdvSimd_DuplicateSelectedScalarToVector128:
case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128:
{
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
// Prior to codegen, the emitSize is based on node->GetSimdSize() which
// tracks the size of the first operand and is used to tell if the index
// is in range. However, when actually emitting it needs to be the size
// of the return and the size of the operand is interpreted based on the
// index value.
assert(
GetEmitter()->isValidVectorIndex(emitSize, GetEmitter()->optGetElemsize(opt), helper.ImmValue()));
emitSize = emitActualTypeSize(node->gtType);
opt = genGetSimdInsOpt(emitSize, intrin.baseType);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
assert(opt != INS_OPTS_NONE);
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op1Reg, elementIndex, opt);
}
break;
}
case NI_AdvSimd_Extract:
{
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg, elementIndex,
INS_OPTS_NONE);
}
}
break;
case NI_AdvSimd_ExtractVector64:
case NI_AdvSimd_ExtractVector128:
{
opt = (intrin.id == NI_AdvSimd_ExtractVector64) ? INS_OPTS_8B : INS_OPTS_16B;
HWIntrinsicImmOpHelper helper(this, intrin.op3, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
const int byteIndex = genTypeSize(intrin.baseType) * elementIndex;
GetEmitter()->emitIns_R_R_R_I(ins, emitSize, targetReg, op1Reg, op2Reg, byteIndex, opt);
}
}
break;
case NI_AdvSimd_Insert:
assert(isRMW);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
if (intrin.op3->isContainedFltOrDblImmed())
{
assert(intrin.op2->isContainedIntOrIImmed());
assert(intrin.op2->AsIntCon()->gtIconVal == 0);
const double dataValue = intrin.op3->AsDblCon()->gtDconVal;
GetEmitter()->emitIns_R_F(INS_fmov, emitSize, targetReg, dataValue, opt);
}
else
{
assert(targetReg != op3Reg);
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
if (varTypeIsFloating(intrin.baseType))
{
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I_I(ins, emitSize, targetReg, op3Reg, elementIndex, 0, opt);
}
}
else
{
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op3Reg, elementIndex, opt);
}
}
}
break;
case NI_AdvSimd_InsertScalar:
{
assert(isRMW);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I_I(ins, emitSize, targetReg, op3Reg, elementIndex, 0, opt);
}
}
break;
case NI_AdvSimd_Arm64_InsertSelectedScalar:
{
assert(isRMW);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
const int resultIndex = (int)intrin.op2->AsIntCon()->gtIconVal;
const int valueIndex = (int)intrin.op4->AsIntCon()->gtIconVal;
GetEmitter()->emitIns_R_R_I_I(ins, emitSize, targetReg, op3Reg, resultIndex, valueIndex, opt);
}
break;
case NI_AdvSimd_LoadAndInsertScalar:
{
assert(isRMW);
assert(targetReg != op3Reg);
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op3Reg, elementIndex);
}
}
break;
case NI_AdvSimd_Arm64_LoadPairVector128:
case NI_AdvSimd_Arm64_LoadPairVector128NonTemporal:
case NI_AdvSimd_Arm64_LoadPairVector64:
case NI_AdvSimd_Arm64_LoadPairVector64NonTemporal:
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, node->GetOtherReg(), op1Reg);
break;
case NI_AdvSimd_Arm64_LoadPairScalarVector64:
case NI_AdvSimd_Arm64_LoadPairScalarVector64NonTemporal:
GetEmitter()->emitIns_R_R_R(ins, emitTypeSize(intrin.baseType), targetReg, node->GetOtherReg(), op1Reg);
break;
case NI_AdvSimd_Store:
GetEmitter()->emitIns_R_R(ins, emitSize, op2Reg, op1Reg, opt);
break;
case NI_AdvSimd_StoreSelectedScalar:
{
HWIntrinsicImmOpHelper helper(this, intrin.op3, node);
for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
{
const int elementIndex = helper.ImmValue();
GetEmitter()->emitIns_R_R_I(ins, emitSize, op2Reg, op1Reg, elementIndex, opt);
}
}
break;
case NI_AdvSimd_Arm64_StorePair:
case NI_AdvSimd_Arm64_StorePairNonTemporal:
GetEmitter()->emitIns_R_R_R(ins, emitSize, op2Reg, op3Reg, op1Reg);
break;
case NI_AdvSimd_Arm64_StorePairScalar:
case NI_AdvSimd_Arm64_StorePairScalarNonTemporal:
GetEmitter()->emitIns_R_R_R(ins, emitTypeSize(intrin.baseType), op2Reg, op3Reg, op1Reg);
break;
case NI_Vector64_CreateScalarUnsafe:
case NI_Vector128_CreateScalarUnsafe:
if (intrin.op1->isContainedFltOrDblImmed())
{
// fmov reg, #imm8
const double dataValue = intrin.op1->AsDblCon()->gtDconVal;
GetEmitter()->emitIns_R_F(ins, emitTypeSize(intrin.baseType), targetReg, dataValue, INS_OPTS_NONE);
}
else if (varTypeIsFloating(intrin.baseType))
{
// fmov reg1, reg2
assert(GetEmitter()->IsMovInstruction(ins));
GetEmitter()->emitIns_Mov(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg,
/* canSkip */ false, INS_OPTS_NONE);
}
else
{
if (intrin.op1->isContainedIntOrIImmed())
{
// movi/movni reg, #imm8
const ssize_t dataValue = intrin.op1->AsIntCon()->gtIconVal;
GetEmitter()->emitIns_R_I(INS_movi, emitSize, targetReg, dataValue, opt);
}
else
{
// ins reg1[0], reg2
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg, 0,
INS_OPTS_NONE);
}
}
break;
case NI_AdvSimd_AddWideningLower:
case NI_AdvSimd_AddWideningUpper:
case NI_AdvSimd_SubtractWideningLower:
case NI_AdvSimd_SubtractWideningUpper:
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, opt);
break;
case NI_AdvSimd_Arm64_AddSaturateScalar:
if (varTypeIsUnsigned(node->GetAuxiliaryType()) != varTypeIsUnsigned(intrin.baseType))
{
ins = varTypeIsUnsigned(intrin.baseType) ? INS_usqadd : INS_suqadd;
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op2Reg, opt);
}
else
{
GetEmitter()->emitIns_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, opt);
}
break;
case NI_ArmBase_Yield:
{
GetEmitter()->emitIns(ins);
break;
}
// mvni doesn't support the range of element types, so hard code the 'opts' value.
case NI_Vector64_get_Zero:
case NI_Vector64_get_AllBitsSet:
GetEmitter()->emitIns_R_I(ins, emitSize, targetReg, 0, INS_OPTS_2S);
break;
case NI_Vector128_get_Zero:
case NI_Vector128_get_AllBitsSet:
GetEmitter()->emitIns_R_I(ins, emitSize, targetReg, 0, INS_OPTS_4S);
break;
case NI_AdvSimd_DuplicateToVector64:
case NI_AdvSimd_DuplicateToVector128:
case NI_AdvSimd_Arm64_DuplicateToVector64:
case NI_AdvSimd_Arm64_DuplicateToVector128:
{
if (varTypeIsFloating(intrin.baseType))
{
if (intrin.op1->isContainedFltOrDblImmed())
{
const double dataValue = intrin.op1->AsDblCon()->gtDconVal;
GetEmitter()->emitIns_R_F(INS_fmov, emitSize, targetReg, dataValue, opt);
}
else if (intrin.id == NI_AdvSimd_Arm64_DuplicateToVector64)
{
assert(intrin.baseType == TYP_DOUBLE);
assert(GetEmitter()->IsMovInstruction(ins));
GetEmitter()->emitIns_Mov(ins, emitSize, targetReg, op1Reg, /* canSkip */ false, opt);
}
else
{
GetEmitter()->emitIns_R_R_I(ins, emitSize, targetReg, op1Reg, 0, opt);
}
}
else if (intrin.op1->isContainedIntOrIImmed())
{
const ssize_t dataValue = intrin.op1->AsIntCon()->gtIconVal;
GetEmitter()->emitIns_R_I(INS_movi, emitSize, targetReg, dataValue, opt);
}
else if (GetEmitter()->IsMovInstruction(ins))
{
GetEmitter()->emitIns_Mov(ins, emitSize, targetReg, op1Reg, /* canSkip */ false, opt);
}
else
{
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg, opt);
}
}
break;
case NI_Vector64_ToVector128:
GetEmitter()->emitIns_Mov(ins, emitSize, targetReg, op1Reg, /* canSkip */ false);
break;
case NI_Vector64_ToVector128Unsafe:
case NI_Vector128_GetLower:
GetEmitter()->emitIns_Mov(ins, emitSize, targetReg, op1Reg, /* canSkip */ true);
break;
case NI_Vector64_GetElement:
case NI_Vector128_GetElement:
{
assert(intrin.numOperands == 2);
var_types simdType = Compiler::getSIMDTypeForSize(node->GetSimdSize());
if (simdType == TYP_SIMD12)
{
// op1 of TYP_SIMD12 should be considered as TYP_SIMD16
simdType = TYP_SIMD16;
}
if (!intrin.op2->OperIsConst())
{
assert(!intrin.op2->isContained());
emitAttr baseTypeSize = emitTypeSize(intrin.baseType);
unsigned baseTypeScale = genLog2(EA_SIZE_IN_BYTES(baseTypeSize));
regNumber baseReg;
regNumber indexReg = op2Reg;
// Optimize the case of op1 is in memory and trying to access i'th element.
if (!intrin.op1->isUsedFromReg())
{
assert(intrin.op1->isContained());
if (intrin.op1->OperIsLocal())
{
unsigned varNum = intrin.op1->AsLclVarCommon()->GetLclNum();
baseReg = node->ExtractTempReg();
// Load the address of varNum
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, baseReg, varNum, 0);
}
else
{
// Require GT_IND addr to be not contained.
assert(intrin.op1->OperIs(GT_IND));
GenTree* addr = intrin.op1->AsIndir()->Addr();
assert(!addr->isContained());
baseReg = addr->GetRegNum();
}
}
else
{
unsigned simdInitTempVarNum = compiler->lvaSIMDInitTempVarNum;
noway_assert(simdInitTempVarNum != BAD_VAR_NUM);
baseReg = node->ExtractTempReg();
// Load the address of simdInitTempVarNum
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, baseReg, simdInitTempVarNum, 0);
// Store the vector to simdInitTempVarNum
GetEmitter()->emitIns_R_R(INS_str, emitTypeSize(simdType), op1Reg, baseReg);
}
assert(genIsValidIntReg(indexReg));
assert(genIsValidIntReg(baseReg));
assert(baseReg != indexReg);
// Load item at baseReg[index]
GetEmitter()->emitIns_R_R_R_Ext(ins_Load(intrin.baseType), baseTypeSize, targetReg, baseReg,
indexReg, INS_OPTS_LSL, baseTypeScale);
}
else if (!GetEmitter()->isValidVectorIndex(emitTypeSize(simdType), emitTypeSize(intrin.baseType),
intrin.op2->AsIntCon()->IconValue()))
{
// We only need to generate code for the get if the index is valid
// If the index is invalid, previously generated for the range check will throw
}
else if (!intrin.op1->isUsedFromReg())
{
assert(intrin.op1->isContained());
assert(intrin.op2->IsCnsIntOrI());
int offset = (int)intrin.op2->AsIntCon()->IconValue() * genTypeSize(intrin.baseType);
instruction ins = ins_Load(intrin.baseType);
assert(!intrin.op1->isUsedFromReg());
if (intrin.op1->OperIsLocal())
{
unsigned varNum = intrin.op1->AsLclVarCommon()->GetLclNum();
GetEmitter()->emitIns_R_S(ins, emitActualTypeSize(intrin.baseType), targetReg, varNum, offset);
}
else
{
assert(intrin.op1->OperIs(GT_IND));
GenTree* addr = intrin.op1->AsIndir()->Addr();
assert(!addr->isContained());
regNumber baseReg = addr->GetRegNum();
// ldr targetReg, [baseReg, #offset]
GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(intrin.baseType), targetReg, baseReg,
offset);
}
}
else
{
assert(intrin.op2->IsCnsIntOrI());
ssize_t indexValue = intrin.op2->AsIntCon()->IconValue();
// no-op if vector is float/double, targetReg == op1Reg and fetching for 0th index.
if ((varTypeIsFloating(intrin.baseType) && (targetReg == op1Reg) && (indexValue == 0)))
{
break;
}
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg, indexValue,
INS_OPTS_NONE);
}
break;
}
case NI_Vector64_ToScalar:
case NI_Vector128_ToScalar:
{
const ssize_t indexValue = 0;
// no-op if vector is float/double, targetReg == op1Reg and fetching for 0th index.
if ((varTypeIsFloating(intrin.baseType) && (targetReg == op1Reg) && (indexValue == 0)))
{
break;
}
GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(intrin.baseType), targetReg, op1Reg, indexValue,
INS_OPTS_NONE);
}
break;
case NI_AdvSimd_ReverseElement16:
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg,
(emitSize == EA_8BYTE) ? INS_OPTS_4H : INS_OPTS_8H);
break;
case NI_AdvSimd_ReverseElement32:
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg,
(emitSize == EA_8BYTE) ? INS_OPTS_2S : INS_OPTS_4S);
break;
case NI_AdvSimd_ReverseElement8:
GetEmitter()->emitIns_R_R(ins, emitSize, targetReg, op1Reg,
(emitSize == EA_8BYTE) ? INS_OPTS_8B : INS_OPTS_16B);
break;
default:
unreached();
}
}
genProduceReg(node);
}
#endif // FEATURE_HW_INTRINSICS
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/hwintrinsiccodegenxarch.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Intel hardware intrinsic Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef FEATURE_HW_INTRINSICS
#include "emit.h"
#include "codegen.h"
#include "sideeffects.h"
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
//------------------------------------------------------------------------
// assertIsContainableHWIntrinsicOp: Asserts that op is containable by node
//
// Arguments:
// lowering - The lowering phase from the compiler
// containingNode - The HWIntrinsic node that has the contained node
// containedNode - The node that is contained
//
static void assertIsContainableHWIntrinsicOp(Lowering* lowering,
GenTreeHWIntrinsic* containingNode,
GenTree* containedNode)
{
#if DEBUG
// The Lowering::IsContainableHWIntrinsicOp call is not quite right, since it follows pre-register allocation
// logic. However, this check is still important due to the various containment rules that SIMD intrinsics follow.
//
// We use isContainable to track the special HWIntrinsic node containment rules (for things like LoadAligned and
// LoadUnaligned) and we use the supportsRegOptional check to support general-purpose loads (both from stack
// spillage and for isUsedFromMemory contained nodes, in the case where the register allocator decided to not
// allocate a register in the first place).
GenTree* node = containedNode;
// Now that we are doing full memory containment safety checks, we can't properly check nodes that are not
// linked into an evaluation tree, like the special nodes we create in genHWIntrinsic.
// So, just say those are ok.
//
if (node->gtNext == nullptr)
{
return;
}
bool supportsRegOptional = false;
bool isContainable = lowering->TryGetContainableHWIntrinsicOp(containingNode, &node, &supportsRegOptional);
assert(isContainable || supportsRegOptional);
assert(node == containedNode);
#endif // DEBUG
}
//------------------------------------------------------------------------
// genIsTableDrivenHWIntrinsic:
//
// Arguments:
// category - category of a HW intrinsic
//
// Return Value:
// returns true if this category can be table-driven in CodeGen
//
static bool genIsTableDrivenHWIntrinsic(NamedIntrinsic intrinsicId, HWIntrinsicCategory category)
{
// TODO - make more categories to the table-driven framework
// HW_Category_Helper and HW_Flag_MultiIns/HW_Flag_SpecialCodeGen usually need manual codegen
const bool tableDrivenCategory =
(category != HW_Category_Special) && (category != HW_Category_Scalar) && (category != HW_Category_Helper);
const bool tableDrivenFlag =
!HWIntrinsicInfo::GeneratesMultipleIns(intrinsicId) && !HWIntrinsicInfo::HasSpecialCodegen(intrinsicId);
return tableDrivenCategory && tableDrivenFlag;
}
//------------------------------------------------------------------------
// genHWIntrinsic: Generates the code for a given hardware intrinsic node.
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsicId);
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
size_t numArgs = node->GetOperandCount();
// We need to validate that other phases of the compiler haven't introduced unsupported intrinsics
assert(compiler->compIsaSupportedDebugOnly(isa));
int ival = HWIntrinsicInfo::lookupIval(intrinsicId, compiler->compOpportunisticallyDependsOn(InstructionSet_AVX));
assert(HWIntrinsicInfo::RequiresCodegen(intrinsicId));
if (genIsTableDrivenHWIntrinsic(intrinsicId, category))
{
regNumber targetReg = node->GetRegNum();
var_types baseType = node->GetSimdBaseType();
GenTree* op1 = nullptr;
GenTree* op2 = nullptr;
GenTree* op3 = nullptr;
regNumber op1Reg = REG_NA;
regNumber op2Reg = REG_NA;
emitter* emit = GetEmitter();
assert(numArgs >= 0);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
assert(ins != INS_invalid);
emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
assert(simdSize != 0);
switch (numArgs)
{
case 1:
{
op1 = node->Op(1);
if (node->OperIsMemoryLoad())
{
genConsumeAddress(op1);
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir load = indirForm(node->TypeGet(), op1);
emit->emitInsLoadInd(ins, simdSize, node->GetRegNum(), &load);
}
else
{
genConsumeRegs(op1);
op1Reg = op1->GetRegNum();
if ((ival != -1) && varTypeIsFloating(baseType))
{
assert((ival >= 0) && (ival <= 127));
if ((category == HW_Category_SIMDScalar) && HWIntrinsicInfo::CopiesUpperBits(intrinsicId))
{
assert(!op1->isContained());
emit->emitIns_SIMD_R_R_R_I(ins, simdSize, targetReg, op1Reg, op1Reg,
static_cast<int8_t>(ival));
}
else
{
genHWIntrinsic_R_RM_I(node, ins, simdSize, static_cast<int8_t>(ival));
}
}
else if ((category == HW_Category_SIMDScalar) && HWIntrinsicInfo::CopiesUpperBits(intrinsicId))
{
emit->emitIns_SIMD_R_R_R(ins, simdSize, targetReg, op1Reg, op1Reg);
}
else
{
genHWIntrinsic_R_RM(node, ins, simdSize, targetReg, op1);
}
}
break;
}
case 2:
{
op1 = node->Op(1);
op2 = node->Op(2);
if (category == HW_Category_MemoryStore)
{
genConsumeAddress(op1);
if (((intrinsicId == NI_SSE_Store) || (intrinsicId == NI_SSE2_Store)) && op2->isContained())
{
GenTreeHWIntrinsic* extract = op2->AsHWIntrinsic();
assert((extract->GetHWIntrinsicId() == NI_AVX_ExtractVector128) ||
(extract->GetHWIntrinsicId() == NI_AVX2_ExtractVector128));
regNumber regData = genConsumeReg(extract->Op(1));
ins = HWIntrinsicInfo::lookupIns(extract->GetHWIntrinsicId(), extract->GetSimdBaseType());
ival = static_cast<int>(extract->Op(2)->AsIntCon()->IconValue());
GenTreeIndir indir = indirForm(TYP_SIMD16, op1);
emit->emitIns_A_R_I(ins, EA_32BYTE, &indir, regData, ival);
}
else
{
genConsumeReg(op2);
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_STORE_IND to generate code with.
GenTreeStoreInd store = storeIndirForm(node->TypeGet(), op1, op2);
emit->emitInsStoreInd(ins, simdSize, &store);
}
break;
}
genConsumeRegs(op1);
genConsumeRegs(op2);
op1Reg = op1->GetRegNum();
op2Reg = op2->GetRegNum();
if ((op1Reg != targetReg) && (op2Reg == targetReg) && node->isRMWHWIntrinsic(compiler))
{
// We have "reg2 = reg1 op reg2" where "reg1 != reg2" on a RMW intrinsic.
//
// For non-commutative intrinsics, we should have ensured that op2 was marked
// delay free in order to prevent it from getting assigned the same register
// as target. However, for commutative intrinsics, we can just swap the operands
// in order to have "reg2 = reg2 op reg1" which will end up producing the right code.
noway_assert(node->OperIsCommutative());
op2Reg = op1Reg;
op1Reg = targetReg;
}
if ((ival != -1) && varTypeIsFloating(baseType))
{
assert((ival >= 0) && (ival <= 127));
genHWIntrinsic_R_R_RM_I(node, ins, simdSize, static_cast<int8_t>(ival));
}
else if (category == HW_Category_MemoryLoad)
{
// Get the address and the 'other' register.
GenTree* addr;
regNumber otherReg;
if (intrinsicId == NI_AVX_MaskLoad || intrinsicId == NI_AVX2_MaskLoad)
{
addr = op1;
otherReg = op2Reg;
}
else
{
addr = op2;
otherReg = op1Reg;
}
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir load = indirForm(node->TypeGet(), addr);
genHWIntrinsic_R_R_RM(node, ins, simdSize, targetReg, otherReg, &load);
}
else if (HWIntrinsicInfo::isImmOp(intrinsicId, op2))
{
assert(ival == -1);
auto emitSwCase = [&](int8_t i) { genHWIntrinsic_R_RM_I(node, ins, simdSize, i); };
if (op2->IsCnsIntOrI())
{
ssize_t ival = op2->AsIntCon()->IconValue();
assert((ival >= 0) && (ival <= 255));
emitSwCase((int8_t)ival);
}
else
{
// We emit a fallback case for the scenario when the imm-op is not a constant. This should
// normally happen when the intrinsic is called indirectly, such as via Reflection. However, it
// can also occur if the consumer calls it directly and just doesn't pass a constant value.
regNumber baseReg = node->ExtractTempReg();
regNumber offsReg = node->GetSingleTempReg();
genHWIntrinsicJumpTableFallback(intrinsicId, op2Reg, baseReg, offsReg, emitSwCase);
}
}
else if (node->TypeGet() == TYP_VOID)
{
genHWIntrinsic_R_RM(node, ins, simdSize, op1Reg, op2);
}
else
{
genHWIntrinsic_R_R_RM(node, ins, simdSize);
}
break;
}
case 3:
{
op1 = node->Op(1);
op2 = node->Op(2);
op3 = node->Op(3);
genConsumeRegs(op1);
op1Reg = op1->GetRegNum();
genConsumeRegs(op2);
op2Reg = op2->GetRegNum();
genConsumeRegs(op3);
regNumber op3Reg = op3->GetRegNum();
if (HWIntrinsicInfo::isImmOp(intrinsicId, op3))
{
assert(ival == -1);
auto emitSwCase = [&](int8_t i) { genHWIntrinsic_R_R_RM_I(node, ins, simdSize, i); };
if (op3->IsCnsIntOrI())
{
ssize_t ival = op3->AsIntCon()->IconValue();
assert((ival >= 0) && (ival <= 255));
emitSwCase((int8_t)ival);
}
else
{
// We emit a fallback case for the scenario when the imm-op is not a constant. This should
// normally happen when the intrinsic is called indirectly, such as via Reflection. However, it
// can also occur if the consumer calls it directly and just doesn't pass a constant value.
regNumber baseReg = node->ExtractTempReg();
regNumber offsReg = node->GetSingleTempReg();
genHWIntrinsicJumpTableFallback(intrinsicId, op3Reg, baseReg, offsReg, emitSwCase);
}
}
else if (category == HW_Category_MemoryStore)
{
// The Mask instructions do not currently support containment of the address.
assert(!op2->isContained());
if (intrinsicId == NI_AVX_MaskStore || intrinsicId == NI_AVX2_MaskStore)
{
emit->emitIns_AR_R_R(ins, simdSize, op2Reg, op3Reg, op1Reg, 0);
}
else
{
assert(intrinsicId == NI_SSE2_MaskMove);
assert(targetReg == REG_NA);
// SSE2 MaskMove hardcodes the destination (op3) in DI/EDI/RDI
emit->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_EDI, op3Reg, /* canSkip */ true);
emit->emitIns_R_R(ins, simdSize, op1Reg, op2Reg);
}
}
else
{
switch (intrinsicId)
{
case NI_SSE41_BlendVariable:
case NI_AVX_BlendVariable:
case NI_AVX2_BlendVariable:
{
genHWIntrinsic_R_R_RM_R(node, ins, simdSize);
break;
}
case NI_AVXVNNI_MultiplyWideningAndAdd:
case NI_AVXVNNI_MultiplyWideningAndAddSaturate:
{
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
assert(op2Reg != REG_NA);
genHWIntrinsic_R_R_R_RM(ins, simdSize, targetReg, op1Reg, op2Reg, op3);
break;
}
default:
{
unreached();
break;
};
}
}
break;
}
default:
unreached();
break;
}
genProduceReg(node);
return;
}
switch (isa)
{
case InstructionSet_Vector128:
case InstructionSet_Vector256:
genBaseIntrinsic(node);
break;
case InstructionSet_X86Base:
case InstructionSet_X86Base_X64:
genX86BaseIntrinsic(node);
break;
case InstructionSet_SSE:
case InstructionSet_SSE_X64:
genSSEIntrinsic(node);
break;
case InstructionSet_SSE2:
case InstructionSet_SSE2_X64:
genSSE2Intrinsic(node);
break;
case InstructionSet_SSE41:
case InstructionSet_SSE41_X64:
genSSE41Intrinsic(node);
break;
case InstructionSet_SSE42:
case InstructionSet_SSE42_X64:
genSSE42Intrinsic(node);
break;
case InstructionSet_AVX:
case InstructionSet_AVX2:
genAvxOrAvx2Intrinsic(node);
break;
case InstructionSet_AES:
genAESIntrinsic(node);
break;
case InstructionSet_BMI1:
case InstructionSet_BMI1_X64:
case InstructionSet_BMI2:
case InstructionSet_BMI2_X64:
genBMI1OrBMI2Intrinsic(node);
break;
case InstructionSet_FMA:
genFMAIntrinsic(node);
break;
case InstructionSet_LZCNT:
case InstructionSet_LZCNT_X64:
genLZCNTIntrinsic(node);
break;
case InstructionSet_PCLMULQDQ:
genPCLMULQDQIntrinsic(node);
break;
case InstructionSet_POPCNT:
case InstructionSet_POPCNT_X64:
genPOPCNTIntrinsic(node);
break;
default:
unreached();
break;
}
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_RM: Generates code for a hardware intrinsic node that takes a
// register operand and a register/memory operand.
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// attr - The emit attribute for the instruciton being generated
// reg - The register
// rmOp - The register/memory operand node
//
void CodeGen::genHWIntrinsic_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber reg, GenTree* rmOp)
{
emitter* emit = GetEmitter();
OperandDesc rmOpDesc = genOperandDesc(rmOp);
if (rmOpDesc.IsContained())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, rmOp);
}
switch (rmOpDesc.GetKind())
{
case OperandKind::ClsVar:
emit->emitIns_R_C(ins, attr, reg, rmOpDesc.GetFieldHnd(), 0);
break;
case OperandKind::Local:
emit->emitIns_R_S(ins, attr, reg, rmOpDesc.GetVarNum(), rmOpDesc.GetLclOffset());
break;
case OperandKind::Indir:
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir indirForm;
GenTreeIndir* indir = rmOpDesc.GetIndirForm(&indirForm);
emit->emitIns_R_A(ins, attr, reg, indir);
}
break;
case OperandKind::Reg:
if (emit->IsMovInstruction(ins))
{
emit->emitIns_Mov(ins, attr, reg, rmOp->GetRegNum(), /* canSkip */ false);
}
else
{
emit->emitIns_R_R(ins, attr, reg, rmOp->GetRegNum());
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_RM_I: Generates the code for a hardware intrinsic node that takes a register/memory operand,
// an immediate operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// ival - The immediate value
//
void CodeGen::genHWIntrinsic_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr simdSize, int8_t ival)
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
// TODO-XArch-CQ: Commutative operations can have op1 be contained
// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained
assert(targetReg != REG_NA);
assert(!node->OperIsCommutative()); // One operand intrinsics cannot be commutative
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, op1);
}
inst_RV_TT_IV(ins, simdSize, targetReg, op1, ival);
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_RM: Generates the code for a hardware intrinsic node that takes a register operand, a
// register/memory operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// attr - The emit attribute for the instruciton being generated
//
void CodeGen::genHWIntrinsic_R_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr)
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
regNumber op1Reg = op1->GetRegNum();
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
genHWIntrinsic_R_R_RM(node, ins, attr, targetReg, op1Reg, op2);
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_RM: Generates the code for a hardware intrinsic node that takes a register operand, a
// register/memory operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// attr - The emit attribute for the instruciton being generated
// targetReg - The register allocated to the result
// op1Reg - The register allocated to the first operand
// op2 - Another operand that maybe in register or memory
//
void CodeGen::genHWIntrinsic_R_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTree* op2)
{
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
if (op2->isContained() || op2->isUsedFromSpillTemp())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, op2);
}
bool isRMW = node->isRMWHWIntrinsic(compiler);
inst_RV_RV_TT(ins, attr, targetReg, op1Reg, op2, isRMW);
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_RM_I: Generates the code for a hardware intrinsic node that takes a register operand, a
// register/memory operand, an immediate operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// ival - The immediate value
//
void CodeGen::genHWIntrinsic_R_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr simdSize, int8_t ival)
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
emitter* emit = GetEmitter();
// TODO-XArch-CQ: Commutative operations can have op1 be contained
// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained
regNumber op1Reg = op1->GetRegNum();
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
OperandDesc op2Desc = genOperandDesc(op2);
if (op2Desc.IsContained())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, op2);
}
switch (op2Desc.GetKind())
{
case OperandKind::ClsVar:
emit->emitIns_SIMD_R_R_C_I(ins, simdSize, targetReg, op1Reg, op2Desc.GetFieldHnd(), 0, ival);
break;
case OperandKind::Local:
emit->emitIns_SIMD_R_R_S_I(ins, simdSize, targetReg, op1Reg, op2Desc.GetVarNum(), op2Desc.GetLclOffset(),
ival);
break;
case OperandKind::Indir:
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir indirForm;
GenTreeIndir* indir = op2Desc.GetIndirForm(&indirForm);
emit->emitIns_SIMD_R_R_A_I(ins, simdSize, targetReg, op1Reg, indir, ival);
}
break;
case OperandKind::Reg:
{
regNumber op2Reg = op2->GetRegNum();
if ((op1Reg != targetReg) && (op2Reg == targetReg) && node->isRMWHWIntrinsic(compiler))
{
// We have "reg2 = reg1 op reg2" where "reg1 != reg2" on a RMW intrinsic.
//
// For non-commutative intrinsics, we should have ensured that op2 was marked
// delay free in order to prevent it from getting assigned the same register
// as target. However, for commutative intrinsics, we can just swap the operands
// in order to have "reg2 = reg2 op reg1" which will end up producing the right code.
noway_assert(node->OperIsCommutative());
op2Reg = op1Reg;
op1Reg = targetReg;
}
emit->emitIns_SIMD_R_R_R_I(ins, simdSize, targetReg, op1Reg, op2Reg, ival);
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_RM_R: Generates the code for a hardware intrinsic node that takes a register operand, a
// register/memory operand, another register operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
//
void CodeGen::genHWIntrinsic_R_R_RM_R(GenTreeHWIntrinsic* node, instruction ins, emitAttr simdSize)
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
GenTree* op3 = node->Op(3);
emitter* emit = GetEmitter();
regNumber op1Reg = op1->GetRegNum();
regNumber op3Reg = op3->GetRegNum();
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
assert(op3Reg != REG_NA);
OperandDesc op2Desc = genOperandDesc(op2);
if (op2Desc.IsContained())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, op2);
}
switch (op2Desc.GetKind())
{
case OperandKind::ClsVar:
emit->emitIns_SIMD_R_R_C_R(ins, simdSize, targetReg, op1Reg, op3Reg, op2Desc.GetFieldHnd(), 0);
break;
case OperandKind::Local:
emit->emitIns_SIMD_R_R_S_R(ins, simdSize, targetReg, op1Reg, op3Reg, op2Desc.GetVarNum(),
op2Desc.GetLclOffset());
break;
case OperandKind::Indir:
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir indirForm;
GenTreeIndir* indir = op2Desc.GetIndirForm(&indirForm);
emit->emitIns_SIMD_R_R_A_R(ins, simdSize, targetReg, op1Reg, op3Reg, indir);
}
break;
case OperandKind::Reg:
emit->emitIns_SIMD_R_R_R_R(ins, simdSize, targetReg, op1Reg, op2->GetRegNum(), op3Reg);
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_R_RM: Generates the code for a hardware intrinsic node that takes two register operands,
// a register/memory operand, and that returns a value in register
//
// Arguments:
// ins - The instruction being generated
// attr - The emit attribute
// targetReg - The target register
// op1Reg - The register of the first operand
// op2Reg - The register of the second operand
// op3 - The third operand
//
void CodeGen::genHWIntrinsic_R_R_R_RM(
instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, GenTree* op3)
{
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
assert(op2Reg != REG_NA);
emitter* emit = GetEmitter();
OperandDesc op3Desc = genOperandDesc(op3);
switch (op3Desc.GetKind())
{
case OperandKind::ClsVar:
emit->emitIns_SIMD_R_R_R_C(ins, attr, targetReg, op1Reg, op2Reg, op3Desc.GetFieldHnd(), 0);
break;
case OperandKind::Local:
emit->emitIns_SIMD_R_R_R_S(ins, attr, targetReg, op1Reg, op2Reg, op3Desc.GetVarNum(),
op3Desc.GetLclOffset());
break;
case OperandKind::Indir:
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir indirForm;
GenTreeIndir* indir = op3Desc.GetIndirForm(&indirForm);
emit->emitIns_SIMD_R_R_R_A(ins, attr, targetReg, op1Reg, op2Reg, indir);
}
break;
case OperandKind::Reg:
emit->emitIns_SIMD_R_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, op3->GetRegNum());
break;
default:
unreached();
}
}
// genHWIntrinsicJumpTableFallback : generate the jump-table fallback for imm-intrinsics
// with non-constant argument
//
// Arguments:
// intrinsic - intrinsic ID
// nonConstImmReg - the register contains non-constant imm8 argument
// baseReg - a register for the start of the switch table
// offsReg - a register for the offset into the switch table
// emitSwCase - the lambda to generate a switch case
//
// Return Value:
// generate the jump-table fallback for imm-intrinsics with non-constant argument.
// Note:
// This function can be used for all imm-intrinsics (whether full-range or not),
// The compiler front-end (i.e. importer) is responsible to insert a range-check IR
// (GT_BOUNDS_CHECK) for imm8 argument, so this function does not need to do range-check.
//
template <typename HWIntrinsicSwitchCaseBody>
void CodeGen::genHWIntrinsicJumpTableFallback(NamedIntrinsic intrinsic,
regNumber nonConstImmReg,
regNumber baseReg,
regNumber offsReg,
HWIntrinsicSwitchCaseBody emitSwCase)
{
assert(nonConstImmReg != REG_NA);
// AVX2 Gather intrinsics use managed non-const fallback since they have discrete imm8 value range
// that does work with the current compiler generated jump-table fallback
assert(!HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic));
emitter* emit = GetEmitter();
const unsigned maxByte = (unsigned)HWIntrinsicInfo::lookupImmUpperBound(intrinsic) + 1;
assert(maxByte <= 256);
BasicBlock* jmpTable[256];
unsigned jmpTableBase = emit->emitBBTableDataGenBeg(maxByte, true);
// Emit the jump table
for (unsigned i = 0; i < maxByte; i++)
{
jmpTable[i] = genCreateTempLabel();
emit->emitDataGenData(i, jmpTable[i]);
}
emit->emitDataGenEnd();
// Compute and jump to the appropriate offset in the switch table
emit->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), offsReg, compiler->eeFindJitDataOffs(jmpTableBase), 0);
emit->emitIns_R_ARX(INS_mov, EA_4BYTE, offsReg, offsReg, nonConstImmReg, 4, 0);
emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, baseReg);
emit->emitIns_R_R(INS_add, EA_PTRSIZE, offsReg, baseReg);
emit->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), offsReg);
// Emit the switch table entries
BasicBlock* switchTableBeg = genCreateTempLabel();
BasicBlock* switchTableEnd = genCreateTempLabel();
genDefineTempLabel(switchTableBeg);
for (unsigned i = 0; i < maxByte; i++)
{
genDefineTempLabel(jmpTable[i]);
emitSwCase((int8_t)i);
emit->emitIns_J(INS_jmp, switchTableEnd);
}
genDefineTempLabel(switchTableEnd);
}
//------------------------------------------------------------------------
// genBaseIntrinsic: Generates the code for a base hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
// Note:
// We currently assume that all base intrinsics have zero or one operand.
//
void CodeGen::genBaseIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
var_types baseType = node->GetSimdBaseType();
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_SSE));
assert((baseType >= TYP_BYTE) && (baseType <= TYP_DOUBLE));
GenTree* op1 = (node->GetOperandCount() >= 1) ? node->Op(1) : nullptr;
GenTree* op2 = (node->GetOperandCount() >= 2) ? node->Op(2) : nullptr;
genConsumeMultiOpOperands(node);
regNumber op1Reg = (op1 == nullptr) ? REG_NA : op1->GetRegNum();
emitter* emit = GetEmitter();
var_types simdType = Compiler::getSIMDTypeForSize(node->GetSimdSize());
emitAttr attr = emitActualTypeSize(simdType);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
switch (intrinsicId)
{
case NI_Vector128_CreateScalarUnsafe:
case NI_Vector256_CreateScalarUnsafe:
{
if (varTypeIsIntegral(baseType))
{
genHWIntrinsic_R_RM(node, ins, emitActualTypeSize(baseType), targetReg, op1);
}
else
{
assert(varTypeIsFloating(baseType));
attr = emitTypeSize(baseType);
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
genHWIntrinsic_R_RM(node, ins, attr, targetReg, op1);
}
else
{
// Just use movaps for reg->reg moves as it has zero-latency on modern CPUs
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true);
}
}
break;
}
case NI_Vector128_GetElement:
case NI_Vector256_GetElement:
{
if (simdType == TYP_SIMD12)
{
// op1 of TYP_SIMD12 should be considered as TYP_SIMD16
simdType = TYP_SIMD16;
}
// Optimize the case of op1 is in memory and trying to access ith element.
if (!op1->isUsedFromReg())
{
assert(op1->isContained());
regNumber baseReg;
regNumber indexReg;
int offset = 0;
if (op1->OperIsLocal())
{
// There are three parts to the total offset here:
// {offset of local} + {offset of vector field (lclFld only)} + {offset of element within vector}.
bool isEBPbased;
unsigned varNum = op1->AsLclVarCommon()->GetLclNum();
offset += compiler->lvaFrameAddress(varNum, &isEBPbased);
#if !FEATURE_FIXED_OUT_ARGS
if (!isEBPbased)
{
// Adjust the offset by the amount currently pushed on the CPU stack
offset += genStackLevel;
}
#else
assert(genStackLevel == 0);
#endif // !FEATURE_FIXED_OUT_ARGS
if (op1->OperIs(GT_LCL_FLD))
{
offset += op1->AsLclFld()->GetLclOffs();
}
baseReg = (isEBPbased) ? REG_EBP : REG_ESP;
}
else
{
// Require GT_IND addr to be not contained.
assert(op1->OperIs(GT_IND));
GenTree* addr = op1->AsIndir()->Addr();
assert(!addr->isContained());
baseReg = addr->GetRegNum();
}
if (op2->OperIsConst())
{
assert(op2->isContained());
indexReg = REG_NA;
offset += (int)op2->AsIntCon()->IconValue() * genTypeSize(baseType);
}
else
{
indexReg = op2->GetRegNum();
assert(genIsValidIntReg(indexReg));
}
// Now, load the desired element.
GetEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
emitTypeSize(baseType), // Of the vector baseType
targetReg, // To targetReg
baseReg, // Base Reg
indexReg, // Indexed
genTypeSize(baseType), // by the size of the baseType
offset);
}
else if (op2->OperIsConst())
{
assert(intrinsicId == NI_Vector128_GetElement);
assert(varTypeIsFloating(baseType));
assert(op1Reg != REG_NA);
ssize_t ival = op2->AsIntCon()->IconValue();
if (baseType == TYP_FLOAT)
{
if (ival == 1)
{
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE3))
{
emit->emitIns_R_R(INS_movshdup, attr, targetReg, op1Reg);
}
else
{
emit->emitIns_SIMD_R_R_R_I(INS_shufps, attr, targetReg, op1Reg, op1Reg,
static_cast<int8_t>(0x55));
}
}
else if (ival == 2)
{
emit->emitIns_SIMD_R_R_R(INS_unpckhps, attr, targetReg, op1Reg, op1Reg);
}
else
{
assert(ival == 3);
emit->emitIns_SIMD_R_R_R_I(INS_shufps, attr, targetReg, op1Reg, op1Reg,
static_cast<int8_t>(0xFF));
}
}
else
{
assert(baseType == TYP_DOUBLE);
assert(ival == 1);
emit->emitIns_SIMD_R_R_R(INS_unpckhpd, attr, targetReg, op1Reg, op1Reg);
}
}
else
{
// We don't have an instruction to implement this intrinsic if the index is not a constant.
// So we will use the SIMD temp location to store the vector, and the load the desired element.
// The range check will already have been performed, so at this point we know we have an index
// within the bounds of the vector.
unsigned simdInitTempVarNum = compiler->lvaSIMDInitTempVarNum;
noway_assert(simdInitTempVarNum != BAD_VAR_NUM);
bool isEBPbased;
unsigned offs = compiler->lvaFrameAddress(simdInitTempVarNum, &isEBPbased);
#if !FEATURE_FIXED_OUT_ARGS
if (!isEBPbased)
{
// Adjust the offset by the amount currently pushed on the CPU stack
offs += genStackLevel;
}
#else
assert(genStackLevel == 0);
#endif // !FEATURE_FIXED_OUT_ARGS
regNumber indexReg = op2->GetRegNum();
// Store the vector to the temp location.
GetEmitter()->emitIns_S_R(ins_Store(simdType, compiler->isSIMDTypeLocalAligned(simdInitTempVarNum)),
emitTypeSize(simdType), op1Reg, simdInitTempVarNum, 0);
// Now, load the desired element.
GetEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
emitTypeSize(baseType), // Of the vector baseType
targetReg, // To targetReg
(isEBPbased) ? REG_EBP : REG_ESP, // Stack-based
indexReg, // Indexed
genTypeSize(baseType), // by the size of the baseType
offs);
}
break;
}
case NI_Vector128_ToScalar:
case NI_Vector256_ToScalar:
{
assert(varTypeIsFloating(baseType));
attr = emitTypeSize(TYP_SIMD16);
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
genHWIntrinsic_R_RM(node, ins, attr, targetReg, op1);
}
else
{
// Just use movaps for reg->reg moves as it has zero-latency on modern CPUs
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true);
}
break;
}
case NI_Vector128_ToVector256:
{
// ToVector256 has zero-extend semantics in order to ensure it is deterministic
// We always emit a move to the target register, even when op1Reg == targetReg,
// in order to ensure that Bits MAXVL-1:128 are zeroed.
attr = emitTypeSize(TYP_SIMD16);
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
genHWIntrinsic_R_RM(node, ins, attr, targetReg, op1);
}
else
{
// Just use movaps for reg->reg moves as it has zero-latency on modern CPUs
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ false);
}
break;
}
case NI_Vector128_ToVector256Unsafe:
case NI_Vector256_GetLower:
{
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
genHWIntrinsic_R_RM(node, ins, attr, targetReg, op1);
}
else
{
// Just use movaps for reg->reg moves as it has zero-latency on modern CPUs
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true);
}
break;
}
case NI_Vector128_get_Zero:
case NI_Vector256_get_Zero:
{
emit->emitIns_SIMD_R_R_R(ins, attr, targetReg, targetReg, targetReg);
break;
}
case NI_Vector128_get_AllBitsSet:
if (varTypeIsFloating(baseType) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX))
{
// The following corresponds to vcmptrueps pseudo-op and not available without VEX prefix.
emit->emitIns_SIMD_R_R_R_I(ins, attr, targetReg, targetReg, targetReg, 15);
}
else
{
emit->emitIns_SIMD_R_R_R(INS_pcmpeqd, attr, targetReg, targetReg, targetReg);
}
break;
case NI_Vector256_get_AllBitsSet:
if (varTypeIsIntegral(baseType) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
emit->emitIns_SIMD_R_R_R(ins, attr, targetReg, targetReg, targetReg);
}
else
{
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_AVX));
// The following corresponds to vcmptrueps pseudo-op.
emit->emitIns_SIMD_R_R_R_I(INS_cmpps, attr, targetReg, targetReg, targetReg, 15);
}
break;
default:
{
unreached();
break;
}
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genX86BaseIntrinsic: Generates the code for an X86 base hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genX86BaseIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_X86Base_BitScanForward:
case NI_X86Base_BitScanReverse:
case NI_X86Base_X64_BitScanForward:
case NI_X86Base_X64_BitScanReverse:
{
GenTree* op1 = node->Op(1);
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, targetType);
genHWIntrinsic_R_RM(node, ins, emitTypeSize(targetType), targetReg, op1);
break;
}
case NI_X86Base_Pause:
{
assert(node->GetSimdBaseType() == TYP_UNKNOWN);
GetEmitter()->emitIns(INS_pause);
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genSSEIntrinsic: Generates the code for an SSE hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genSSEIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
var_types baseType = node->GetSimdBaseType();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_SSE_X64_ConvertToInt64:
case NI_SSE_X64_ConvertToInt64WithTruncation:
{
assert(targetType == TYP_LONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_RM(node, ins, EA_8BYTE, targetReg, node->Op(1));
break;
}
case NI_SSE_X64_ConvertScalarToVector128Single:
{
assert(baseType == TYP_LONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_R_RM(node, ins, EA_8BYTE);
break;
}
case NI_SSE_Prefetch0:
case NI_SSE_Prefetch1:
case NI_SSE_Prefetch2:
case NI_SSE_PrefetchNonTemporal:
{
assert(baseType == TYP_UBYTE);
// These do not support containment.
assert(!node->Op(1)->isContained());
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, node->GetSimdBaseType());
emit->emitIns_AR(ins, emitTypeSize(baseType), node->Op(1)->GetRegNum(), 0);
break;
}
case NI_SSE_StoreFence:
{
assert(baseType == TYP_UNKNOWN);
emit->emitIns(INS_sfence);
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genSSE2Intrinsic: Generates the code for an SSE2 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genSSE2Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
var_types baseType = node->GetSimdBaseType();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_SSE2_X64_ConvertScalarToVector128Double:
{
assert(baseType == TYP_LONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_R_RM(node, ins, EA_8BYTE);
break;
}
case NI_SSE2_X64_ConvertScalarToVector128Int64:
case NI_SSE2_X64_ConvertScalarToVector128UInt64:
{
assert(baseType == TYP_LONG || baseType == TYP_ULONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_RM(node, ins, emitTypeSize(baseType), targetReg, node->Op(1));
break;
}
case NI_SSE2_ConvertToInt32:
case NI_SSE2_ConvertToInt32WithTruncation:
case NI_SSE2_ConvertToUInt32:
case NI_SSE2_X64_ConvertToInt64:
case NI_SSE2_X64_ConvertToInt64WithTruncation:
case NI_SSE2_X64_ConvertToUInt64:
{
emitAttr attr;
if (varTypeIsIntegral(baseType))
{
assert(baseType == TYP_INT || baseType == TYP_UINT || baseType == TYP_LONG || baseType == TYP_ULONG);
attr = emitActualTypeSize(baseType);
}
else
{
assert(baseType == TYP_DOUBLE || baseType == TYP_FLOAT);
attr = emitTypeSize(targetType);
}
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_RM(node, ins, attr, targetReg, node->Op(1));
break;
}
case NI_SSE2_LoadFence:
{
assert(baseType == TYP_UNKNOWN);
emit->emitIns(INS_lfence);
break;
}
case NI_SSE2_MemoryFence:
{
assert(baseType == TYP_UNKNOWN);
emit->emitIns(INS_mfence);
break;
}
case NI_SSE2_StoreNonTemporal:
case NI_SSE2_X64_StoreNonTemporal:
{
assert(baseType == TYP_INT || baseType == TYP_UINT || baseType == TYP_LONG || baseType == TYP_ULONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
GenTreeStoreInd store = storeIndirForm(node->TypeGet(), node->Op(1), node->Op(2));
emit->emitInsStoreInd(ins, emitTypeSize(baseType), &store);
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genSSE41Intrinsic: Generates the code for an SSE4.1 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genSSE41Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
GenTree* op1 = node->Op(1);
regNumber targetReg = node->GetRegNum();
var_types baseType = node->GetSimdBaseType();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
{
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
if (!varTypeIsSIMD(op1->gtType))
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir load = indirForm(node->TypeGet(), op1);
emit->emitInsLoadInd(ins, emitTypeSize(TYP_SIMD16), node->GetRegNum(), &load);
}
else
{
genHWIntrinsic_R_RM(node, ins, EA_16BYTE, targetReg, op1);
}
break;
}
case NI_SSE41_Extract:
case NI_SSE41_X64_Extract:
{
assert(!varTypeIsFloating(baseType));
GenTree* op2 = node->Op(2);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
emitAttr attr = emitActualTypeSize(node->TypeGet());
auto emitSwCase = [&](int8_t i) { inst_RV_TT_IV(ins, attr, targetReg, op1, i); };
if (op2->IsCnsIntOrI())
{
ssize_t ival = op2->AsIntCon()->IconValue();
assert((ival >= 0) && (ival <= 255));
emitSwCase((int8_t)ival);
}
else
{
// We emit a fallback case for the scenario when the imm-op is not a constant. This should
// normally happen when the intrinsic is called indirectly, such as via Reflection. However, it
// can also occur if the consumer calls it directly and just doesn't pass a constant value.
regNumber baseReg = node->ExtractTempReg();
regNumber offsReg = node->GetSingleTempReg();
genHWIntrinsicJumpTableFallback(intrinsicId, op2->GetRegNum(), baseReg, offsReg, emitSwCase);
}
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genSSE42Intrinsic: Generates the code for an SSE4.2 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genSSE42Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
var_types baseType = node->GetSimdBaseType();
var_types targetType = node->TypeGet();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
regNumber op1Reg = op1->GetRegNum();
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
assert(!node->OperIsCommutative());
switch (intrinsicId)
{
case NI_SSE42_Crc32:
case NI_SSE42_X64_Crc32:
{
assert((op2->GetRegNum() != targetReg) || (op1Reg == targetReg));
emit->emitIns_Mov(INS_mov, emitTypeSize(targetType), targetReg, op1Reg, /* canSkip */ true);
if ((baseType == TYP_UBYTE) || (baseType == TYP_USHORT)) // baseType is the type of the second argument
{
assert(targetType == TYP_INT);
genHWIntrinsic_R_RM(node, INS_crc32, emitTypeSize(baseType), targetReg, op2);
}
else
{
assert(op1->TypeGet() == op2->TypeGet());
assert((targetType == TYP_INT) || (targetType == TYP_LONG));
genHWIntrinsic_R_RM(node, INS_crc32, emitTypeSize(targetType), targetReg, op2);
}
break;
}
default:
{
unreached();
break;
}
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genAvxOrAvx2Intrinsic: Generates the code for an AVX/AVX2 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genAvxOrAvx2Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
var_types baseType = node->GetSimdBaseType();
emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
var_types targetType = node->TypeGet();
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
size_t numArgs = node->GetOperandCount();
GenTree* op1 = node->Op(1);
regNumber op1Reg = REG_NA;
regNumber targetReg = node->GetRegNum();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_AVX2_ConvertToInt32:
case NI_AVX2_ConvertToUInt32:
{
op1Reg = op1->GetRegNum();
assert((baseType == TYP_INT) || (baseType == TYP_UINT));
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
emit->emitIns_Mov(ins, emitActualTypeSize(baseType), targetReg, op1Reg, /* canSkip */ false);
break;
}
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
if (!varTypeIsSIMD(op1->gtType))
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir load = indirForm(node->TypeGet(), op1);
emit->emitInsLoadInd(ins, emitTypeSize(TYP_SIMD32), node->GetRegNum(), &load);
}
else
{
genHWIntrinsic_R_RM(node, ins, EA_32BYTE, targetReg, op1);
}
break;
}
case NI_AVX2_GatherVector128:
case NI_AVX2_GatherVector256:
case NI_AVX2_GatherMaskVector128:
case NI_AVX2_GatherMaskVector256:
{
GenTree* op2 = node->Op(2);
GenTree* op3 = node->Op(3);
GenTree* lastOp = nullptr;
GenTree* indexOp = nullptr;
op1Reg = op1->GetRegNum();
regNumber op2Reg = op2->GetRegNum();
regNumber addrBaseReg = REG_NA;
regNumber addrIndexReg = REG_NA;
regNumber maskReg = node->ExtractTempReg(RBM_ALLFLOAT);
if (numArgs == 5)
{
assert(intrinsicId == NI_AVX2_GatherMaskVector128 || intrinsicId == NI_AVX2_GatherMaskVector256);
GenTree* op4 = node->Op(4);
lastOp = node->Op(5);
regNumber op3Reg = op3->GetRegNum();
regNumber op4Reg = op4->GetRegNum();
addrBaseReg = op2Reg;
addrIndexReg = op3Reg;
indexOp = op3;
// copy op4Reg into the tmp mask register,
// the mask register will be cleared by gather instructions
emit->emitIns_Mov(INS_movaps, attr, maskReg, op4Reg, /* canSkip */ false);
// copy source vector to the target register for masking merge
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true);
}
else
{
assert(intrinsicId == NI_AVX2_GatherVector128 || intrinsicId == NI_AVX2_GatherVector256);
addrBaseReg = op1Reg;
addrIndexReg = op2Reg;
indexOp = op2;
lastOp = op3;
// generate all-one mask vector
emit->emitIns_SIMD_R_R_R(INS_pcmpeqd, attr, maskReg, maskReg, maskReg);
}
bool isVector128GatherWithVector256Index = (targetType == TYP_SIMD16) && (indexOp->TypeGet() == TYP_SIMD32);
// hwintrinsiclistxarch.h uses Dword index instructions in default
if (varTypeIsLong(node->GetAuxiliaryType()))
{
switch (ins)
{
case INS_vpgatherdd:
ins = INS_vpgatherqd;
if (isVector128GatherWithVector256Index)
{
// YMM index in address mode
attr = emitTypeSize(TYP_SIMD32);
}
break;
case INS_vpgatherdq:
ins = INS_vpgatherqq;
break;
case INS_vgatherdps:
ins = INS_vgatherqps;
if (isVector128GatherWithVector256Index)
{
// YMM index in address mode
attr = emitTypeSize(TYP_SIMD32);
}
break;
case INS_vgatherdpd:
ins = INS_vgatherqpd;
break;
default:
unreached();
}
}
assert(lastOp->IsCnsIntOrI());
ssize_t ival = lastOp->AsIntCon()->IconValue();
assert((ival >= 0) && (ival <= 255));
assert(targetReg != maskReg);
assert(targetReg != addrIndexReg);
assert(maskReg != addrIndexReg);
emit->emitIns_R_AR_R(ins, attr, targetReg, maskReg, addrBaseReg, addrIndexReg, (int8_t)ival, 0);
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genAESIntrinsic: Generates the code for an AES hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genAESIntrinsic(GenTreeHWIntrinsic* node)
{
NYI("Implement AES intrinsic code generation");
}
//------------------------------------------------------------------------
// genBMI1OrBMI2Intrinsic: Generates the code for a BMI1 and BMI2 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genBMI1OrBMI2Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, targetType);
emitter* emit = GetEmitter();
assert(targetReg != REG_NA);
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_BMI1_AndNot:
case NI_BMI1_X64_AndNot:
case NI_BMI1_BitFieldExtract:
case NI_BMI1_X64_BitFieldExtract:
case NI_BMI2_ParallelBitDeposit:
case NI_BMI2_ParallelBitExtract:
case NI_BMI2_X64_ParallelBitDeposit:
case NI_BMI2_X64_ParallelBitExtract:
case NI_BMI2_ZeroHighBits:
case NI_BMI2_X64_ZeroHighBits:
{
assert((targetType == TYP_INT) || (targetType == TYP_LONG));
genHWIntrinsic_R_R_RM(node, ins, emitTypeSize(node->TypeGet()));
break;
}
case NI_BMI1_ExtractLowestSetBit:
case NI_BMI1_GetMaskUpToLowestSetBit:
case NI_BMI1_ResetLowestSetBit:
case NI_BMI1_X64_ExtractLowestSetBit:
case NI_BMI1_X64_GetMaskUpToLowestSetBit:
case NI_BMI1_X64_ResetLowestSetBit:
{
assert((targetType == TYP_INT) || (targetType == TYP_LONG));
genHWIntrinsic_R_RM(node, ins, emitTypeSize(node->TypeGet()), targetReg, node->Op(1));
break;
}
case NI_BMI1_TrailingZeroCount:
case NI_BMI1_X64_TrailingZeroCount:
{
assert((targetType == TYP_INT) || (targetType == TYP_LONG));
genXCNTIntrinsic(node, ins);
break;
}
case NI_BMI2_MultiplyNoFlags:
case NI_BMI2_X64_MultiplyNoFlags:
{
size_t numArgs = node->GetOperandCount();
assert(numArgs == 2 || numArgs == 3);
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
regNumber op1Reg = op1->GetRegNum();
regNumber op2Reg = op2->GetRegNum();
regNumber op3Reg = REG_NA;
regNumber lowReg = REG_NA;
if (numArgs == 2)
{
lowReg = targetReg;
}
else
{
op3Reg = node->Op(3)->GetRegNum();
assert(!node->Op(3)->isContained());
assert(op3Reg != op1Reg);
assert(op3Reg != targetReg);
assert(op3Reg != REG_EDX);
lowReg = node->GetSingleTempReg();
assert(op3Reg != lowReg);
assert(lowReg != targetReg);
}
// These do not support containment
assert(!op2->isContained());
emitAttr attr = emitTypeSize(targetType);
// mov the first operand into implicit source operand EDX/RDX
assert((op2Reg != REG_EDX) || (op1Reg == REG_EDX));
emit->emitIns_Mov(INS_mov, attr, REG_EDX, op1Reg, /* canSkip */ true);
// generate code for MULX
genHWIntrinsic_R_R_RM(node, ins, attr, targetReg, lowReg, op2);
// If requires the lower half result, store in the memory pointed to by op3
if (numArgs == 3)
{
emit->emitIns_AR_R(INS_mov, attr, lowReg, op3Reg, 0);
}
break;
}
default:
{
unreached();
break;
}
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genFMAIntrinsic: Generates the code for an FMA hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genFMAIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
var_types baseType = node->GetSimdBaseType();
emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
instruction _213form = HWIntrinsicInfo::lookupIns(intrinsicId, baseType); // 213 form
instruction _132form = (instruction)(_213form - 1);
instruction _231form = (instruction)(_213form + 1);
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
GenTree* op3 = node->Op(3);
regNumber targetReg = node->GetRegNum();
genConsumeMultiOpOperands(node);
regNumber op1NodeReg = op1->GetRegNum();
regNumber op2NodeReg = op2->GetRegNum();
regNumber op3NodeReg = op3->GetRegNum();
GenTree* emitOp1 = op1;
GenTree* emitOp2 = op2;
GenTree* emitOp3 = op3;
const bool copiesUpperBits = HWIntrinsicInfo::CopiesUpperBits(intrinsicId);
// Intrinsics with CopyUpperBits semantics cannot have op1 be contained
assert(!copiesUpperBits || !op1->isContained());
// We need to keep this in sync with lsraxarch.cpp
// Ideally we'd actually swap the operands in lsra and simplify codegen
// but its a bit more complicated to do so for many operands as well
// as being complicated to tell codegen how to pick the right instruction
instruction ins = INS_invalid;
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
// targetReg == op3NodeReg or targetReg == ?
// op3 = ([op1] * op2) + op3
// 231 form: XMM1 = (XMM2 * [XMM3]) + XMM1
ins = _231form;
std::swap(emitOp1, emitOp3);
if (targetReg == op2NodeReg)
{
// op2 = ([op1] * op2) + op3
// 132 form: XMM1 = (XMM1 * [XMM3]) + XMM2
ins = _132form;
std::swap(emitOp1, emitOp2);
}
}
else if (op3->isContained() || op3->isUsedFromSpillTemp())
{
// targetReg could be op1NodeReg, op2NodeReg, or not equal to any op
// op1 = (op1 * op2) + [op3] or op2 = (op1 * op2) + [op3]
// ? = (op1 * op2) + [op3] or ? = (op1 * op2) + op3
// 213 form: XMM1 = (XMM2 * XMM1) + [XMM3]
ins = _213form;
if (!copiesUpperBits && (targetReg == op2NodeReg))
{
// op2 = (op1 * op2) + [op3]
// 213 form: XMM1 = (XMM2 * XMM1) + [XMM3]
std::swap(emitOp1, emitOp2);
}
}
else if (op2->isContained() || op2->isUsedFromSpillTemp())
{
// targetReg == op1NodeReg or targetReg == ?
// op1 = (op1 * [op2]) + op3
// 132 form: XMM1 = (XMM1 * [XMM3]) + XMM2
ins = _132form;
std::swap(emitOp2, emitOp3);
if (!copiesUpperBits && (targetReg == op3NodeReg))
{
// op3 = (op1 * [op2]) + op3
// 231 form: XMM1 = (XMM2 * [XMM3]) + XMM1
ins = _231form;
std::swap(emitOp1, emitOp2);
}
}
else
{
// When we don't have a contained operand we still want to
// preference based on the target register if possible.
if (targetReg == op2NodeReg)
{
ins = _213form;
std::swap(emitOp1, emitOp2);
}
else if (targetReg == op3NodeReg)
{
ins = _231form;
std::swap(emitOp1, emitOp3);
}
else
{
ins = _213form;
}
}
assert(ins != INS_invalid);
genHWIntrinsic_R_R_R_RM(ins, attr, targetReg, emitOp1->GetRegNum(), emitOp2->GetRegNum(), emitOp3);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genLZCNTIntrinsic: Generates the code for a LZCNT hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genLZCNTIntrinsic(GenTreeHWIntrinsic* node)
{
assert((node->GetHWIntrinsicId() == NI_LZCNT_LeadingZeroCount) ||
(node->GetHWIntrinsicId() == NI_LZCNT_X64_LeadingZeroCount));
genConsumeMultiOpOperands(node);
genXCNTIntrinsic(node, INS_lzcnt);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genPCLMULQDQIntrinsic: Generates the code for a PCLMULQDQ hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genPCLMULQDQIntrinsic(GenTreeHWIntrinsic* node)
{
NYI("Implement PCLMULQDQ intrinsic code generation");
}
//------------------------------------------------------------------------
// genPOPCNTIntrinsic: Generates the code for a POPCNT hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genPOPCNTIntrinsic(GenTreeHWIntrinsic* node)
{
assert(node->GetHWIntrinsicId() == NI_POPCNT_PopCount || node->GetHWIntrinsicId() == NI_POPCNT_X64_PopCount);
genConsumeMultiOpOperands(node);
genXCNTIntrinsic(node, INS_popcnt);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genXCNTIntrinsic: Generates the code for a lzcnt/tzcnt/popcnt hardware intrinsic node, breaks false dependencies on
// the target register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
//
void CodeGen::genXCNTIntrinsic(GenTreeHWIntrinsic* node, instruction ins)
{
// LZCNT/TZCNT/POPCNT have a false dependency on the target register on Intel Sandy Bridge, Haswell, and Skylake
// (POPCNT only) processors, so insert a `XOR target, target` to break the dependency via XOR triggering register
// renaming, but only if it's not an actual dependency.
GenTree* op1 = node->Op(1);
regNumber sourceReg1 = REG_NA;
regNumber sourceReg2 = REG_NA;
if (!op1->isContained())
{
sourceReg1 = op1->GetRegNum();
}
else if (op1->isIndir())
{
GenTreeIndir* indir = op1->AsIndir();
GenTree* memBase = indir->Base();
if (memBase != nullptr)
{
sourceReg1 = memBase->GetRegNum();
}
if (indir->HasIndex())
{
sourceReg2 = indir->Index()->GetRegNum();
}
}
regNumber targetReg = node->GetRegNum();
if ((targetReg != sourceReg1) && (targetReg != sourceReg2))
{
GetEmitter()->emitIns_R_R(INS_xor, EA_4BYTE, targetReg, targetReg);
}
genHWIntrinsic_R_RM(node, ins, emitTypeSize(node->TypeGet()), targetReg, op1);
}
#endif // FEATURE_HW_INTRINSICS
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Intel hardware intrinsic Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef FEATURE_HW_INTRINSICS
#include "emit.h"
#include "codegen.h"
#include "sideeffects.h"
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
//------------------------------------------------------------------------
// assertIsContainableHWIntrinsicOp: Asserts that op is containable by node
//
// Arguments:
// lowering - The lowering phase from the compiler
// containingNode - The HWIntrinsic node that has the contained node
// containedNode - The node that is contained
//
static void assertIsContainableHWIntrinsicOp(Lowering* lowering,
GenTreeHWIntrinsic* containingNode,
GenTree* containedNode)
{
#if DEBUG
// The Lowering::IsContainableHWIntrinsicOp call is not quite right, since it follows pre-register allocation
// logic. However, this check is still important due to the various containment rules that SIMD intrinsics follow.
//
// We use isContainable to track the special HWIntrinsic node containment rules (for things like LoadAligned and
// LoadUnaligned) and we use the supportsRegOptional check to support general-purpose loads (both from stack
// spillage and for isUsedFromMemory contained nodes, in the case where the register allocator decided to not
// allocate a register in the first place).
GenTree* node = containedNode;
// Now that we are doing full memory containment safety checks, we can't properly check nodes that are not
// linked into an evaluation tree, like the special nodes we create in genHWIntrinsic.
// So, just say those are ok.
//
if (node->gtNext == nullptr)
{
return;
}
bool supportsRegOptional = false;
bool isContainable = lowering->TryGetContainableHWIntrinsicOp(containingNode, &node, &supportsRegOptional);
assert(isContainable || supportsRegOptional);
assert(node == containedNode);
#endif // DEBUG
}
//------------------------------------------------------------------------
// genIsTableDrivenHWIntrinsic:
//
// Arguments:
// category - category of a HW intrinsic
//
// Return Value:
// returns true if this category can be table-driven in CodeGen
//
static bool genIsTableDrivenHWIntrinsic(NamedIntrinsic intrinsicId, HWIntrinsicCategory category)
{
// TODO - make more categories to the table-driven framework
// HW_Category_Helper and HW_Flag_MultiIns/HW_Flag_SpecialCodeGen usually need manual codegen
const bool tableDrivenCategory =
(category != HW_Category_Special) && (category != HW_Category_Scalar) && (category != HW_Category_Helper);
const bool tableDrivenFlag =
!HWIntrinsicInfo::GeneratesMultipleIns(intrinsicId) && !HWIntrinsicInfo::HasSpecialCodegen(intrinsicId);
return tableDrivenCategory && tableDrivenFlag;
}
//------------------------------------------------------------------------
// genHWIntrinsic: Generates the code for a given hardware intrinsic node.
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsicId);
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
size_t numArgs = node->GetOperandCount();
// We need to validate that other phases of the compiler haven't introduced unsupported intrinsics
assert(compiler->compIsaSupportedDebugOnly(isa));
int ival = HWIntrinsicInfo::lookupIval(intrinsicId, compiler->compOpportunisticallyDependsOn(InstructionSet_AVX));
assert(HWIntrinsicInfo::RequiresCodegen(intrinsicId));
if (genIsTableDrivenHWIntrinsic(intrinsicId, category))
{
regNumber targetReg = node->GetRegNum();
var_types baseType = node->GetSimdBaseType();
GenTree* op1 = nullptr;
GenTree* op2 = nullptr;
GenTree* op3 = nullptr;
regNumber op1Reg = REG_NA;
regNumber op2Reg = REG_NA;
emitter* emit = GetEmitter();
assert(numArgs >= 0);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
assert(ins != INS_invalid);
emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
assert(simdSize != 0);
switch (numArgs)
{
case 1:
{
op1 = node->Op(1);
if (node->OperIsMemoryLoad())
{
genConsumeAddress(op1);
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir load = indirForm(node->TypeGet(), op1);
emit->emitInsLoadInd(ins, simdSize, node->GetRegNum(), &load);
}
else
{
genConsumeRegs(op1);
op1Reg = op1->GetRegNum();
if ((ival != -1) && varTypeIsFloating(baseType))
{
assert((ival >= 0) && (ival <= 127));
if ((category == HW_Category_SIMDScalar) && HWIntrinsicInfo::CopiesUpperBits(intrinsicId))
{
assert(!op1->isContained());
emit->emitIns_SIMD_R_R_R_I(ins, simdSize, targetReg, op1Reg, op1Reg,
static_cast<int8_t>(ival));
}
else
{
genHWIntrinsic_R_RM_I(node, ins, simdSize, static_cast<int8_t>(ival));
}
}
else if ((category == HW_Category_SIMDScalar) && HWIntrinsicInfo::CopiesUpperBits(intrinsicId))
{
emit->emitIns_SIMD_R_R_R(ins, simdSize, targetReg, op1Reg, op1Reg);
}
else
{
genHWIntrinsic_R_RM(node, ins, simdSize, targetReg, op1);
}
}
break;
}
case 2:
{
op1 = node->Op(1);
op2 = node->Op(2);
if (category == HW_Category_MemoryStore)
{
genConsumeAddress(op1);
if (((intrinsicId == NI_SSE_Store) || (intrinsicId == NI_SSE2_Store)) && op2->isContained())
{
GenTreeHWIntrinsic* extract = op2->AsHWIntrinsic();
assert((extract->GetHWIntrinsicId() == NI_AVX_ExtractVector128) ||
(extract->GetHWIntrinsicId() == NI_AVX2_ExtractVector128));
regNumber regData = genConsumeReg(extract->Op(1));
ins = HWIntrinsicInfo::lookupIns(extract->GetHWIntrinsicId(), extract->GetSimdBaseType());
ival = static_cast<int>(extract->Op(2)->AsIntCon()->IconValue());
GenTreeIndir indir = indirForm(TYP_SIMD16, op1);
emit->emitIns_A_R_I(ins, EA_32BYTE, &indir, regData, ival);
}
else
{
genConsumeReg(op2);
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_STORE_IND to generate code with.
GenTreeStoreInd store = storeIndirForm(node->TypeGet(), op1, op2);
emit->emitInsStoreInd(ins, simdSize, &store);
}
break;
}
genConsumeRegs(op1);
genConsumeRegs(op2);
op1Reg = op1->GetRegNum();
op2Reg = op2->GetRegNum();
if ((op1Reg != targetReg) && (op2Reg == targetReg) && node->isRMWHWIntrinsic(compiler))
{
// We have "reg2 = reg1 op reg2" where "reg1 != reg2" on a RMW intrinsic.
//
// For non-commutative intrinsics, we should have ensured that op2 was marked
// delay free in order to prevent it from getting assigned the same register
// as target. However, for commutative intrinsics, we can just swap the operands
// in order to have "reg2 = reg2 op reg1" which will end up producing the right code.
noway_assert(node->OperIsCommutative());
op2Reg = op1Reg;
op1Reg = targetReg;
}
if ((ival != -1) && varTypeIsFloating(baseType))
{
assert((ival >= 0) && (ival <= 127));
genHWIntrinsic_R_R_RM_I(node, ins, simdSize, static_cast<int8_t>(ival));
}
else if (category == HW_Category_MemoryLoad)
{
// Get the address and the 'other' register.
GenTree* addr;
regNumber otherReg;
if (intrinsicId == NI_AVX_MaskLoad || intrinsicId == NI_AVX2_MaskLoad)
{
addr = op1;
otherReg = op2Reg;
}
else
{
addr = op2;
otherReg = op1Reg;
}
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir load = indirForm(node->TypeGet(), addr);
genHWIntrinsic_R_R_RM(node, ins, simdSize, targetReg, otherReg, &load);
}
else if (HWIntrinsicInfo::isImmOp(intrinsicId, op2))
{
assert(ival == -1);
auto emitSwCase = [&](int8_t i) { genHWIntrinsic_R_RM_I(node, ins, simdSize, i); };
if (op2->IsCnsIntOrI())
{
ssize_t ival = op2->AsIntCon()->IconValue();
assert((ival >= 0) && (ival <= 255));
emitSwCase((int8_t)ival);
}
else
{
// We emit a fallback case for the scenario when the imm-op is not a constant. This should
// normally happen when the intrinsic is called indirectly, such as via Reflection. However, it
// can also occur if the consumer calls it directly and just doesn't pass a constant value.
regNumber baseReg = node->ExtractTempReg();
regNumber offsReg = node->GetSingleTempReg();
genHWIntrinsicJumpTableFallback(intrinsicId, op2Reg, baseReg, offsReg, emitSwCase);
}
}
else if (node->TypeGet() == TYP_VOID)
{
genHWIntrinsic_R_RM(node, ins, simdSize, op1Reg, op2);
}
else
{
genHWIntrinsic_R_R_RM(node, ins, simdSize);
}
break;
}
case 3:
{
op1 = node->Op(1);
op2 = node->Op(2);
op3 = node->Op(3);
genConsumeRegs(op1);
op1Reg = op1->GetRegNum();
genConsumeRegs(op2);
op2Reg = op2->GetRegNum();
genConsumeRegs(op3);
regNumber op3Reg = op3->GetRegNum();
if (HWIntrinsicInfo::isImmOp(intrinsicId, op3))
{
assert(ival == -1);
auto emitSwCase = [&](int8_t i) { genHWIntrinsic_R_R_RM_I(node, ins, simdSize, i); };
if (op3->IsCnsIntOrI())
{
ssize_t ival = op3->AsIntCon()->IconValue();
assert((ival >= 0) && (ival <= 255));
emitSwCase((int8_t)ival);
}
else
{
// We emit a fallback case for the scenario when the imm-op is not a constant. This should
// normally happen when the intrinsic is called indirectly, such as via Reflection. However, it
// can also occur if the consumer calls it directly and just doesn't pass a constant value.
regNumber baseReg = node->ExtractTempReg();
regNumber offsReg = node->GetSingleTempReg();
genHWIntrinsicJumpTableFallback(intrinsicId, op3Reg, baseReg, offsReg, emitSwCase);
}
}
else if (category == HW_Category_MemoryStore)
{
// The Mask instructions do not currently support containment of the address.
assert(!op2->isContained());
if (intrinsicId == NI_AVX_MaskStore || intrinsicId == NI_AVX2_MaskStore)
{
emit->emitIns_AR_R_R(ins, simdSize, op2Reg, op3Reg, op1Reg, 0);
}
else
{
assert(intrinsicId == NI_SSE2_MaskMove);
assert(targetReg == REG_NA);
// SSE2 MaskMove hardcodes the destination (op3) in DI/EDI/RDI
emit->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_EDI, op3Reg, /* canSkip */ true);
emit->emitIns_R_R(ins, simdSize, op1Reg, op2Reg);
}
}
else
{
switch (intrinsicId)
{
case NI_SSE41_BlendVariable:
case NI_AVX_BlendVariable:
case NI_AVX2_BlendVariable:
{
genHWIntrinsic_R_R_RM_R(node, ins, simdSize);
break;
}
case NI_AVXVNNI_MultiplyWideningAndAdd:
case NI_AVXVNNI_MultiplyWideningAndAddSaturate:
{
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
assert(op2Reg != REG_NA);
genHWIntrinsic_R_R_R_RM(ins, simdSize, targetReg, op1Reg, op2Reg, op3);
break;
}
default:
{
unreached();
break;
};
}
}
break;
}
default:
unreached();
break;
}
genProduceReg(node);
return;
}
switch (isa)
{
case InstructionSet_Vector128:
case InstructionSet_Vector256:
genBaseIntrinsic(node);
break;
case InstructionSet_X86Base:
case InstructionSet_X86Base_X64:
genX86BaseIntrinsic(node);
break;
case InstructionSet_SSE:
case InstructionSet_SSE_X64:
genSSEIntrinsic(node);
break;
case InstructionSet_SSE2:
case InstructionSet_SSE2_X64:
genSSE2Intrinsic(node);
break;
case InstructionSet_SSE41:
case InstructionSet_SSE41_X64:
genSSE41Intrinsic(node);
break;
case InstructionSet_SSE42:
case InstructionSet_SSE42_X64:
genSSE42Intrinsic(node);
break;
case InstructionSet_AVX:
case InstructionSet_AVX2:
genAvxOrAvx2Intrinsic(node);
break;
case InstructionSet_AES:
genAESIntrinsic(node);
break;
case InstructionSet_BMI1:
case InstructionSet_BMI1_X64:
case InstructionSet_BMI2:
case InstructionSet_BMI2_X64:
genBMI1OrBMI2Intrinsic(node);
break;
case InstructionSet_FMA:
genFMAIntrinsic(node);
break;
case InstructionSet_LZCNT:
case InstructionSet_LZCNT_X64:
genLZCNTIntrinsic(node);
break;
case InstructionSet_PCLMULQDQ:
genPCLMULQDQIntrinsic(node);
break;
case InstructionSet_POPCNT:
case InstructionSet_POPCNT_X64:
genPOPCNTIntrinsic(node);
break;
default:
unreached();
break;
}
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_RM: Generates code for a hardware intrinsic node that takes a
// register operand and a register/memory operand.
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// attr - The emit attribute for the instruciton being generated
// reg - The register
// rmOp - The register/memory operand node
//
void CodeGen::genHWIntrinsic_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber reg, GenTree* rmOp)
{
emitter* emit = GetEmitter();
OperandDesc rmOpDesc = genOperandDesc(rmOp);
if (rmOpDesc.IsContained())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, rmOp);
}
switch (rmOpDesc.GetKind())
{
case OperandKind::ClsVar:
emit->emitIns_R_C(ins, attr, reg, rmOpDesc.GetFieldHnd(), 0);
break;
case OperandKind::Local:
emit->emitIns_R_S(ins, attr, reg, rmOpDesc.GetVarNum(), rmOpDesc.GetLclOffset());
break;
case OperandKind::Indir:
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir indirForm;
GenTreeIndir* indir = rmOpDesc.GetIndirForm(&indirForm);
emit->emitIns_R_A(ins, attr, reg, indir);
}
break;
case OperandKind::Reg:
if (emit->IsMovInstruction(ins))
{
emit->emitIns_Mov(ins, attr, reg, rmOp->GetRegNum(), /* canSkip */ false);
}
else
{
emit->emitIns_R_R(ins, attr, reg, rmOp->GetRegNum());
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_RM_I: Generates the code for a hardware intrinsic node that takes a register/memory operand,
// an immediate operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// ival - The immediate value
//
void CodeGen::genHWIntrinsic_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr simdSize, int8_t ival)
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
// TODO-XArch-CQ: Commutative operations can have op1 be contained
// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained
assert(targetReg != REG_NA);
assert(!node->OperIsCommutative()); // One operand intrinsics cannot be commutative
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, op1);
}
inst_RV_TT_IV(ins, simdSize, targetReg, op1, ival);
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_RM: Generates the code for a hardware intrinsic node that takes a register operand, a
// register/memory operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// attr - The emit attribute for the instruciton being generated
//
void CodeGen::genHWIntrinsic_R_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr)
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
regNumber op1Reg = op1->GetRegNum();
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
genHWIntrinsic_R_R_RM(node, ins, attr, targetReg, op1Reg, op2);
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_RM: Generates the code for a hardware intrinsic node that takes a register operand, a
// register/memory operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// attr - The emit attribute for the instruciton being generated
// targetReg - The register allocated to the result
// op1Reg - The register allocated to the first operand
// op2 - Another operand that maybe in register or memory
//
void CodeGen::genHWIntrinsic_R_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTree* op2)
{
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
if (op2->isContained() || op2->isUsedFromSpillTemp())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, op2);
}
bool isRMW = node->isRMWHWIntrinsic(compiler);
inst_RV_RV_TT(ins, attr, targetReg, op1Reg, op2, isRMW);
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_RM_I: Generates the code for a hardware intrinsic node that takes a register operand, a
// register/memory operand, an immediate operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
// ival - The immediate value
//
void CodeGen::genHWIntrinsic_R_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr simdSize, int8_t ival)
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
emitter* emit = GetEmitter();
// TODO-XArch-CQ: Commutative operations can have op1 be contained
// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained
regNumber op1Reg = op1->GetRegNum();
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
OperandDesc op2Desc = genOperandDesc(op2);
if (op2Desc.IsContained())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, op2);
}
switch (op2Desc.GetKind())
{
case OperandKind::ClsVar:
emit->emitIns_SIMD_R_R_C_I(ins, simdSize, targetReg, op1Reg, op2Desc.GetFieldHnd(), 0, ival);
break;
case OperandKind::Local:
emit->emitIns_SIMD_R_R_S_I(ins, simdSize, targetReg, op1Reg, op2Desc.GetVarNum(), op2Desc.GetLclOffset(),
ival);
break;
case OperandKind::Indir:
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir indirForm;
GenTreeIndir* indir = op2Desc.GetIndirForm(&indirForm);
emit->emitIns_SIMD_R_R_A_I(ins, simdSize, targetReg, op1Reg, indir, ival);
}
break;
case OperandKind::Reg:
{
regNumber op2Reg = op2->GetRegNum();
if ((op1Reg != targetReg) && (op2Reg == targetReg) && node->isRMWHWIntrinsic(compiler))
{
// We have "reg2 = reg1 op reg2" where "reg1 != reg2" on a RMW intrinsic.
//
// For non-commutative intrinsics, we should have ensured that op2 was marked
// delay free in order to prevent it from getting assigned the same register
// as target. However, for commutative intrinsics, we can just swap the operands
// in order to have "reg2 = reg2 op reg1" which will end up producing the right code.
noway_assert(node->OperIsCommutative());
op2Reg = op1Reg;
op1Reg = targetReg;
}
emit->emitIns_SIMD_R_R_R_I(ins, simdSize, targetReg, op1Reg, op2Reg, ival);
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_RM_R: Generates the code for a hardware intrinsic node that takes a register operand, a
// register/memory operand, another register operand, and that returns a value in register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
//
void CodeGen::genHWIntrinsic_R_R_RM_R(GenTreeHWIntrinsic* node, instruction ins, emitAttr simdSize)
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
GenTree* op3 = node->Op(3);
emitter* emit = GetEmitter();
regNumber op1Reg = op1->GetRegNum();
regNumber op3Reg = op3->GetRegNum();
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
assert(op3Reg != REG_NA);
OperandDesc op2Desc = genOperandDesc(op2);
if (op2Desc.IsContained())
{
assert(HWIntrinsicInfo::SupportsContainment(node->GetHWIntrinsicId()));
assertIsContainableHWIntrinsicOp(compiler->m_pLowering, node, op2);
}
switch (op2Desc.GetKind())
{
case OperandKind::ClsVar:
emit->emitIns_SIMD_R_R_C_R(ins, simdSize, targetReg, op1Reg, op3Reg, op2Desc.GetFieldHnd(), 0);
break;
case OperandKind::Local:
emit->emitIns_SIMD_R_R_S_R(ins, simdSize, targetReg, op1Reg, op3Reg, op2Desc.GetVarNum(),
op2Desc.GetLclOffset());
break;
case OperandKind::Indir:
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir indirForm;
GenTreeIndir* indir = op2Desc.GetIndirForm(&indirForm);
emit->emitIns_SIMD_R_R_A_R(ins, simdSize, targetReg, op1Reg, op3Reg, indir);
}
break;
case OperandKind::Reg:
emit->emitIns_SIMD_R_R_R_R(ins, simdSize, targetReg, op1Reg, op2->GetRegNum(), op3Reg);
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genHWIntrinsic_R_R_R_RM: Generates the code for a hardware intrinsic node that takes two register operands,
// a register/memory operand, and that returns a value in register
//
// Arguments:
// ins - The instruction being generated
// attr - The emit attribute
// targetReg - The target register
// op1Reg - The register of the first operand
// op2Reg - The register of the second operand
// op3 - The third operand
//
void CodeGen::genHWIntrinsic_R_R_R_RM(
instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, GenTree* op3)
{
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
assert(op2Reg != REG_NA);
emitter* emit = GetEmitter();
OperandDesc op3Desc = genOperandDesc(op3);
switch (op3Desc.GetKind())
{
case OperandKind::ClsVar:
emit->emitIns_SIMD_R_R_R_C(ins, attr, targetReg, op1Reg, op2Reg, op3Desc.GetFieldHnd(), 0);
break;
case OperandKind::Local:
emit->emitIns_SIMD_R_R_R_S(ins, attr, targetReg, op1Reg, op2Reg, op3Desc.GetVarNum(),
op3Desc.GetLclOffset());
break;
case OperandKind::Indir:
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir indirForm;
GenTreeIndir* indir = op3Desc.GetIndirForm(&indirForm);
emit->emitIns_SIMD_R_R_R_A(ins, attr, targetReg, op1Reg, op2Reg, indir);
}
break;
case OperandKind::Reg:
emit->emitIns_SIMD_R_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, op3->GetRegNum());
break;
default:
unreached();
}
}
// genHWIntrinsicJumpTableFallback : generate the jump-table fallback for imm-intrinsics
// with non-constant argument
//
// Arguments:
// intrinsic - intrinsic ID
// nonConstImmReg - the register contains non-constant imm8 argument
// baseReg - a register for the start of the switch table
// offsReg - a register for the offset into the switch table
// emitSwCase - the lambda to generate a switch case
//
// Return Value:
// generate the jump-table fallback for imm-intrinsics with non-constant argument.
// Note:
// This function can be used for all imm-intrinsics (whether full-range or not),
// The compiler front-end (i.e. importer) is responsible to insert a range-check IR
// (GT_BOUNDS_CHECK) for imm8 argument, so this function does not need to do range-check.
//
template <typename HWIntrinsicSwitchCaseBody>
void CodeGen::genHWIntrinsicJumpTableFallback(NamedIntrinsic intrinsic,
regNumber nonConstImmReg,
regNumber baseReg,
regNumber offsReg,
HWIntrinsicSwitchCaseBody emitSwCase)
{
assert(nonConstImmReg != REG_NA);
// AVX2 Gather intrinsics use managed non-const fallback since they have discrete imm8 value range
// that does work with the current compiler generated jump-table fallback
assert(!HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic));
emitter* emit = GetEmitter();
const unsigned maxByte = (unsigned)HWIntrinsicInfo::lookupImmUpperBound(intrinsic) + 1;
assert(maxByte <= 256);
BasicBlock* jmpTable[256];
unsigned jmpTableBase = emit->emitBBTableDataGenBeg(maxByte, true);
// Emit the jump table
for (unsigned i = 0; i < maxByte; i++)
{
jmpTable[i] = genCreateTempLabel();
emit->emitDataGenData(i, jmpTable[i]);
}
emit->emitDataGenEnd();
// Compute and jump to the appropriate offset in the switch table
emit->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), offsReg, compiler->eeFindJitDataOffs(jmpTableBase), 0);
emit->emitIns_R_ARX(INS_mov, EA_4BYTE, offsReg, offsReg, nonConstImmReg, 4, 0);
emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, baseReg);
emit->emitIns_R_R(INS_add, EA_PTRSIZE, offsReg, baseReg);
emit->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), offsReg);
// Emit the switch table entries
BasicBlock* switchTableBeg = genCreateTempLabel();
BasicBlock* switchTableEnd = genCreateTempLabel();
genDefineTempLabel(switchTableBeg);
for (unsigned i = 0; i < maxByte; i++)
{
genDefineTempLabel(jmpTable[i]);
emitSwCase((int8_t)i);
emit->emitIns_J(INS_jmp, switchTableEnd);
}
genDefineTempLabel(switchTableEnd);
}
//------------------------------------------------------------------------
// genBaseIntrinsic: Generates the code for a base hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
// Note:
// We currently assume that all base intrinsics have zero or one operand.
//
void CodeGen::genBaseIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
var_types baseType = node->GetSimdBaseType();
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_SSE));
assert((baseType >= TYP_BYTE) && (baseType <= TYP_DOUBLE));
GenTree* op1 = (node->GetOperandCount() >= 1) ? node->Op(1) : nullptr;
GenTree* op2 = (node->GetOperandCount() >= 2) ? node->Op(2) : nullptr;
genConsumeMultiOpOperands(node);
regNumber op1Reg = (op1 == nullptr) ? REG_NA : op1->GetRegNum();
emitter* emit = GetEmitter();
var_types simdType = Compiler::getSIMDTypeForSize(node->GetSimdSize());
emitAttr attr = emitActualTypeSize(simdType);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
switch (intrinsicId)
{
case NI_Vector128_CreateScalarUnsafe:
case NI_Vector256_CreateScalarUnsafe:
{
if (varTypeIsIntegral(baseType))
{
genHWIntrinsic_R_RM(node, ins, emitActualTypeSize(baseType), targetReg, op1);
}
else
{
assert(varTypeIsFloating(baseType));
attr = emitTypeSize(baseType);
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
genHWIntrinsic_R_RM(node, ins, attr, targetReg, op1);
}
else
{
// Just use movaps for reg->reg moves as it has zero-latency on modern CPUs
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true);
}
}
break;
}
case NI_Vector128_GetElement:
case NI_Vector256_GetElement:
{
if (simdType == TYP_SIMD12)
{
// op1 of TYP_SIMD12 should be considered as TYP_SIMD16
simdType = TYP_SIMD16;
}
// Optimize the case of op1 is in memory and trying to access i'th element.
if (!op1->isUsedFromReg())
{
assert(op1->isContained());
regNumber baseReg;
regNumber indexReg;
int offset = 0;
if (op1->OperIsLocal())
{
// There are three parts to the total offset here:
// {offset of local} + {offset of vector field (lclFld only)} + {offset of element within vector}.
bool isEBPbased;
unsigned varNum = op1->AsLclVarCommon()->GetLclNum();
offset += compiler->lvaFrameAddress(varNum, &isEBPbased);
#if !FEATURE_FIXED_OUT_ARGS
if (!isEBPbased)
{
// Adjust the offset by the amount currently pushed on the CPU stack
offset += genStackLevel;
}
#else
assert(genStackLevel == 0);
#endif // !FEATURE_FIXED_OUT_ARGS
if (op1->OperIs(GT_LCL_FLD))
{
offset += op1->AsLclFld()->GetLclOffs();
}
baseReg = (isEBPbased) ? REG_EBP : REG_ESP;
}
else
{
// Require GT_IND addr to be not contained.
assert(op1->OperIs(GT_IND));
GenTree* addr = op1->AsIndir()->Addr();
assert(!addr->isContained());
baseReg = addr->GetRegNum();
}
if (op2->OperIsConst())
{
assert(op2->isContained());
indexReg = REG_NA;
offset += (int)op2->AsIntCon()->IconValue() * genTypeSize(baseType);
}
else
{
indexReg = op2->GetRegNum();
assert(genIsValidIntReg(indexReg));
}
// Now, load the desired element.
GetEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
emitTypeSize(baseType), // Of the vector baseType
targetReg, // To targetReg
baseReg, // Base Reg
indexReg, // Indexed
genTypeSize(baseType), // by the size of the baseType
offset);
}
else if (op2->OperIsConst())
{
assert(intrinsicId == NI_Vector128_GetElement);
assert(varTypeIsFloating(baseType));
assert(op1Reg != REG_NA);
ssize_t ival = op2->AsIntCon()->IconValue();
if (baseType == TYP_FLOAT)
{
if (ival == 1)
{
if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE3))
{
emit->emitIns_R_R(INS_movshdup, attr, targetReg, op1Reg);
}
else
{
emit->emitIns_SIMD_R_R_R_I(INS_shufps, attr, targetReg, op1Reg, op1Reg,
static_cast<int8_t>(0x55));
}
}
else if (ival == 2)
{
emit->emitIns_SIMD_R_R_R(INS_unpckhps, attr, targetReg, op1Reg, op1Reg);
}
else
{
assert(ival == 3);
emit->emitIns_SIMD_R_R_R_I(INS_shufps, attr, targetReg, op1Reg, op1Reg,
static_cast<int8_t>(0xFF));
}
}
else
{
assert(baseType == TYP_DOUBLE);
assert(ival == 1);
emit->emitIns_SIMD_R_R_R(INS_unpckhpd, attr, targetReg, op1Reg, op1Reg);
}
}
else
{
// We don't have an instruction to implement this intrinsic if the index is not a constant.
// So we will use the SIMD temp location to store the vector, and the load the desired element.
// The range check will already have been performed, so at this point we know we have an index
// within the bounds of the vector.
unsigned simdInitTempVarNum = compiler->lvaSIMDInitTempVarNum;
noway_assert(simdInitTempVarNum != BAD_VAR_NUM);
bool isEBPbased;
unsigned offs = compiler->lvaFrameAddress(simdInitTempVarNum, &isEBPbased);
#if !FEATURE_FIXED_OUT_ARGS
if (!isEBPbased)
{
// Adjust the offset by the amount currently pushed on the CPU stack
offs += genStackLevel;
}
#else
assert(genStackLevel == 0);
#endif // !FEATURE_FIXED_OUT_ARGS
regNumber indexReg = op2->GetRegNum();
// Store the vector to the temp location.
GetEmitter()->emitIns_S_R(ins_Store(simdType, compiler->isSIMDTypeLocalAligned(simdInitTempVarNum)),
emitTypeSize(simdType), op1Reg, simdInitTempVarNum, 0);
// Now, load the desired element.
GetEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
emitTypeSize(baseType), // Of the vector baseType
targetReg, // To targetReg
(isEBPbased) ? REG_EBP : REG_ESP, // Stack-based
indexReg, // Indexed
genTypeSize(baseType), // by the size of the baseType
offs);
}
break;
}
case NI_Vector128_ToScalar:
case NI_Vector256_ToScalar:
{
assert(varTypeIsFloating(baseType));
attr = emitTypeSize(TYP_SIMD16);
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
genHWIntrinsic_R_RM(node, ins, attr, targetReg, op1);
}
else
{
// Just use movaps for reg->reg moves as it has zero-latency on modern CPUs
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true);
}
break;
}
case NI_Vector128_ToVector256:
{
// ToVector256 has zero-extend semantics in order to ensure it is deterministic
// We always emit a move to the target register, even when op1Reg == targetReg,
// in order to ensure that Bits MAXVL-1:128 are zeroed.
attr = emitTypeSize(TYP_SIMD16);
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
genHWIntrinsic_R_RM(node, ins, attr, targetReg, op1);
}
else
{
// Just use movaps for reg->reg moves as it has zero-latency on modern CPUs
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ false);
}
break;
}
case NI_Vector128_ToVector256Unsafe:
case NI_Vector256_GetLower:
{
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
genHWIntrinsic_R_RM(node, ins, attr, targetReg, op1);
}
else
{
// Just use movaps for reg->reg moves as it has zero-latency on modern CPUs
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true);
}
break;
}
case NI_Vector128_get_Zero:
case NI_Vector256_get_Zero:
{
emit->emitIns_SIMD_R_R_R(ins, attr, targetReg, targetReg, targetReg);
break;
}
case NI_Vector128_get_AllBitsSet:
if (varTypeIsFloating(baseType) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX))
{
// The following corresponds to vcmptrueps pseudo-op and not available without VEX prefix.
emit->emitIns_SIMD_R_R_R_I(ins, attr, targetReg, targetReg, targetReg, 15);
}
else
{
emit->emitIns_SIMD_R_R_R(INS_pcmpeqd, attr, targetReg, targetReg, targetReg);
}
break;
case NI_Vector256_get_AllBitsSet:
if (varTypeIsIntegral(baseType) && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
emit->emitIns_SIMD_R_R_R(ins, attr, targetReg, targetReg, targetReg);
}
else
{
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_AVX));
// The following corresponds to vcmptrueps pseudo-op.
emit->emitIns_SIMD_R_R_R_I(INS_cmpps, attr, targetReg, targetReg, targetReg, 15);
}
break;
default:
{
unreached();
break;
}
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genX86BaseIntrinsic: Generates the code for an X86 base hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genX86BaseIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_X86Base_BitScanForward:
case NI_X86Base_BitScanReverse:
case NI_X86Base_X64_BitScanForward:
case NI_X86Base_X64_BitScanReverse:
{
GenTree* op1 = node->Op(1);
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, targetType);
genHWIntrinsic_R_RM(node, ins, emitTypeSize(targetType), targetReg, op1);
break;
}
case NI_X86Base_Pause:
{
assert(node->GetSimdBaseType() == TYP_UNKNOWN);
GetEmitter()->emitIns(INS_pause);
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genSSEIntrinsic: Generates the code for an SSE hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genSSEIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
var_types baseType = node->GetSimdBaseType();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_SSE_X64_ConvertToInt64:
case NI_SSE_X64_ConvertToInt64WithTruncation:
{
assert(targetType == TYP_LONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_RM(node, ins, EA_8BYTE, targetReg, node->Op(1));
break;
}
case NI_SSE_X64_ConvertScalarToVector128Single:
{
assert(baseType == TYP_LONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_R_RM(node, ins, EA_8BYTE);
break;
}
case NI_SSE_Prefetch0:
case NI_SSE_Prefetch1:
case NI_SSE_Prefetch2:
case NI_SSE_PrefetchNonTemporal:
{
assert(baseType == TYP_UBYTE);
// These do not support containment.
assert(!node->Op(1)->isContained());
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, node->GetSimdBaseType());
emit->emitIns_AR(ins, emitTypeSize(baseType), node->Op(1)->GetRegNum(), 0);
break;
}
case NI_SSE_StoreFence:
{
assert(baseType == TYP_UNKNOWN);
emit->emitIns(INS_sfence);
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genSSE2Intrinsic: Generates the code for an SSE2 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genSSE2Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
var_types baseType = node->GetSimdBaseType();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_SSE2_X64_ConvertScalarToVector128Double:
{
assert(baseType == TYP_LONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_R_RM(node, ins, EA_8BYTE);
break;
}
case NI_SSE2_X64_ConvertScalarToVector128Int64:
case NI_SSE2_X64_ConvertScalarToVector128UInt64:
{
assert(baseType == TYP_LONG || baseType == TYP_ULONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_RM(node, ins, emitTypeSize(baseType), targetReg, node->Op(1));
break;
}
case NI_SSE2_ConvertToInt32:
case NI_SSE2_ConvertToInt32WithTruncation:
case NI_SSE2_ConvertToUInt32:
case NI_SSE2_X64_ConvertToInt64:
case NI_SSE2_X64_ConvertToInt64WithTruncation:
case NI_SSE2_X64_ConvertToUInt64:
{
emitAttr attr;
if (varTypeIsIntegral(baseType))
{
assert(baseType == TYP_INT || baseType == TYP_UINT || baseType == TYP_LONG || baseType == TYP_ULONG);
attr = emitActualTypeSize(baseType);
}
else
{
assert(baseType == TYP_DOUBLE || baseType == TYP_FLOAT);
attr = emitTypeSize(targetType);
}
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
genHWIntrinsic_R_RM(node, ins, attr, targetReg, node->Op(1));
break;
}
case NI_SSE2_LoadFence:
{
assert(baseType == TYP_UNKNOWN);
emit->emitIns(INS_lfence);
break;
}
case NI_SSE2_MemoryFence:
{
assert(baseType == TYP_UNKNOWN);
emit->emitIns(INS_mfence);
break;
}
case NI_SSE2_StoreNonTemporal:
case NI_SSE2_X64_StoreNonTemporal:
{
assert(baseType == TYP_INT || baseType == TYP_UINT || baseType == TYP_LONG || baseType == TYP_ULONG);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
GenTreeStoreInd store = storeIndirForm(node->TypeGet(), node->Op(1), node->Op(2));
emit->emitInsStoreInd(ins, emitTypeSize(baseType), &store);
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genSSE41Intrinsic: Generates the code for an SSE4.1 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genSSE41Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
GenTree* op1 = node->Op(1);
regNumber targetReg = node->GetRegNum();
var_types baseType = node->GetSimdBaseType();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
{
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
if (!varTypeIsSIMD(op1->gtType))
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir load = indirForm(node->TypeGet(), op1);
emit->emitInsLoadInd(ins, emitTypeSize(TYP_SIMD16), node->GetRegNum(), &load);
}
else
{
genHWIntrinsic_R_RM(node, ins, EA_16BYTE, targetReg, op1);
}
break;
}
case NI_SSE41_Extract:
case NI_SSE41_X64_Extract:
{
assert(!varTypeIsFloating(baseType));
GenTree* op2 = node->Op(2);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
emitAttr attr = emitActualTypeSize(node->TypeGet());
auto emitSwCase = [&](int8_t i) { inst_RV_TT_IV(ins, attr, targetReg, op1, i); };
if (op2->IsCnsIntOrI())
{
ssize_t ival = op2->AsIntCon()->IconValue();
assert((ival >= 0) && (ival <= 255));
emitSwCase((int8_t)ival);
}
else
{
// We emit a fallback case for the scenario when the imm-op is not a constant. This should
// normally happen when the intrinsic is called indirectly, such as via Reflection. However, it
// can also occur if the consumer calls it directly and just doesn't pass a constant value.
regNumber baseReg = node->ExtractTempReg();
regNumber offsReg = node->GetSingleTempReg();
genHWIntrinsicJumpTableFallback(intrinsicId, op2->GetRegNum(), baseReg, offsReg, emitSwCase);
}
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genSSE42Intrinsic: Generates the code for an SSE4.2 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genSSE42Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
var_types baseType = node->GetSimdBaseType();
var_types targetType = node->TypeGet();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
regNumber op1Reg = op1->GetRegNum();
assert(targetReg != REG_NA);
assert(op1Reg != REG_NA);
assert(!node->OperIsCommutative());
switch (intrinsicId)
{
case NI_SSE42_Crc32:
case NI_SSE42_X64_Crc32:
{
assert((op2->GetRegNum() != targetReg) || (op1Reg == targetReg));
emit->emitIns_Mov(INS_mov, emitTypeSize(targetType), targetReg, op1Reg, /* canSkip */ true);
if ((baseType == TYP_UBYTE) || (baseType == TYP_USHORT)) // baseType is the type of the second argument
{
assert(targetType == TYP_INT);
genHWIntrinsic_R_RM(node, INS_crc32, emitTypeSize(baseType), targetReg, op2);
}
else
{
assert(op1->TypeGet() == op2->TypeGet());
assert((targetType == TYP_INT) || (targetType == TYP_LONG));
genHWIntrinsic_R_RM(node, INS_crc32, emitTypeSize(targetType), targetReg, op2);
}
break;
}
default:
{
unreached();
break;
}
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genAvxOrAvx2Intrinsic: Generates the code for an AVX/AVX2 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genAvxOrAvx2Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
var_types baseType = node->GetSimdBaseType();
emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
var_types targetType = node->TypeGet();
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
size_t numArgs = node->GetOperandCount();
GenTree* op1 = node->Op(1);
regNumber op1Reg = REG_NA;
regNumber targetReg = node->GetRegNum();
emitter* emit = GetEmitter();
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_AVX2_ConvertToInt32:
case NI_AVX2_ConvertToUInt32:
{
op1Reg = op1->GetRegNum();
assert((baseType == TYP_INT) || (baseType == TYP_UINT));
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
emit->emitIns_Mov(ins, emitActualTypeSize(baseType), targetReg, op1Reg, /* canSkip */ false);
break;
}
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
if (!varTypeIsSIMD(op1->gtType))
{
// Until we improve the handling of addressing modes in the emitter, we'll create a
// temporary GT_IND to generate code with.
GenTreeIndir load = indirForm(node->TypeGet(), op1);
emit->emitInsLoadInd(ins, emitTypeSize(TYP_SIMD32), node->GetRegNum(), &load);
}
else
{
genHWIntrinsic_R_RM(node, ins, EA_32BYTE, targetReg, op1);
}
break;
}
case NI_AVX2_GatherVector128:
case NI_AVX2_GatherVector256:
case NI_AVX2_GatherMaskVector128:
case NI_AVX2_GatherMaskVector256:
{
GenTree* op2 = node->Op(2);
GenTree* op3 = node->Op(3);
GenTree* lastOp = nullptr;
GenTree* indexOp = nullptr;
op1Reg = op1->GetRegNum();
regNumber op2Reg = op2->GetRegNum();
regNumber addrBaseReg = REG_NA;
regNumber addrIndexReg = REG_NA;
regNumber maskReg = node->ExtractTempReg(RBM_ALLFLOAT);
if (numArgs == 5)
{
assert(intrinsicId == NI_AVX2_GatherMaskVector128 || intrinsicId == NI_AVX2_GatherMaskVector256);
GenTree* op4 = node->Op(4);
lastOp = node->Op(5);
regNumber op3Reg = op3->GetRegNum();
regNumber op4Reg = op4->GetRegNum();
addrBaseReg = op2Reg;
addrIndexReg = op3Reg;
indexOp = op3;
// copy op4Reg into the tmp mask register,
// the mask register will be cleared by gather instructions
emit->emitIns_Mov(INS_movaps, attr, maskReg, op4Reg, /* canSkip */ false);
// copy source vector to the target register for masking merge
emit->emitIns_Mov(INS_movaps, attr, targetReg, op1Reg, /* canSkip */ true);
}
else
{
assert(intrinsicId == NI_AVX2_GatherVector128 || intrinsicId == NI_AVX2_GatherVector256);
addrBaseReg = op1Reg;
addrIndexReg = op2Reg;
indexOp = op2;
lastOp = op3;
// generate all-one mask vector
emit->emitIns_SIMD_R_R_R(INS_pcmpeqd, attr, maskReg, maskReg, maskReg);
}
bool isVector128GatherWithVector256Index = (targetType == TYP_SIMD16) && (indexOp->TypeGet() == TYP_SIMD32);
// hwintrinsiclistxarch.h uses Dword index instructions in default
if (varTypeIsLong(node->GetAuxiliaryType()))
{
switch (ins)
{
case INS_vpgatherdd:
ins = INS_vpgatherqd;
if (isVector128GatherWithVector256Index)
{
// YMM index in address mode
attr = emitTypeSize(TYP_SIMD32);
}
break;
case INS_vpgatherdq:
ins = INS_vpgatherqq;
break;
case INS_vgatherdps:
ins = INS_vgatherqps;
if (isVector128GatherWithVector256Index)
{
// YMM index in address mode
attr = emitTypeSize(TYP_SIMD32);
}
break;
case INS_vgatherdpd:
ins = INS_vgatherqpd;
break;
default:
unreached();
}
}
assert(lastOp->IsCnsIntOrI());
ssize_t ival = lastOp->AsIntCon()->IconValue();
assert((ival >= 0) && (ival <= 255));
assert(targetReg != maskReg);
assert(targetReg != addrIndexReg);
assert(maskReg != addrIndexReg);
emit->emitIns_R_AR_R(ins, attr, targetReg, maskReg, addrBaseReg, addrIndexReg, (int8_t)ival, 0);
break;
}
default:
unreached();
break;
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genAESIntrinsic: Generates the code for an AES hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genAESIntrinsic(GenTreeHWIntrinsic* node)
{
NYI("Implement AES intrinsic code generation");
}
//------------------------------------------------------------------------
// genBMI1OrBMI2Intrinsic: Generates the code for a BMI1 and BMI2 hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genBMI1OrBMI2Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, targetType);
emitter* emit = GetEmitter();
assert(targetReg != REG_NA);
genConsumeMultiOpOperands(node);
switch (intrinsicId)
{
case NI_BMI1_AndNot:
case NI_BMI1_X64_AndNot:
case NI_BMI1_BitFieldExtract:
case NI_BMI1_X64_BitFieldExtract:
case NI_BMI2_ParallelBitDeposit:
case NI_BMI2_ParallelBitExtract:
case NI_BMI2_X64_ParallelBitDeposit:
case NI_BMI2_X64_ParallelBitExtract:
case NI_BMI2_ZeroHighBits:
case NI_BMI2_X64_ZeroHighBits:
{
assert((targetType == TYP_INT) || (targetType == TYP_LONG));
genHWIntrinsic_R_R_RM(node, ins, emitTypeSize(node->TypeGet()));
break;
}
case NI_BMI1_ExtractLowestSetBit:
case NI_BMI1_GetMaskUpToLowestSetBit:
case NI_BMI1_ResetLowestSetBit:
case NI_BMI1_X64_ExtractLowestSetBit:
case NI_BMI1_X64_GetMaskUpToLowestSetBit:
case NI_BMI1_X64_ResetLowestSetBit:
{
assert((targetType == TYP_INT) || (targetType == TYP_LONG));
genHWIntrinsic_R_RM(node, ins, emitTypeSize(node->TypeGet()), targetReg, node->Op(1));
break;
}
case NI_BMI1_TrailingZeroCount:
case NI_BMI1_X64_TrailingZeroCount:
{
assert((targetType == TYP_INT) || (targetType == TYP_LONG));
genXCNTIntrinsic(node, ins);
break;
}
case NI_BMI2_MultiplyNoFlags:
case NI_BMI2_X64_MultiplyNoFlags:
{
size_t numArgs = node->GetOperandCount();
assert(numArgs == 2 || numArgs == 3);
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
regNumber op1Reg = op1->GetRegNum();
regNumber op2Reg = op2->GetRegNum();
regNumber op3Reg = REG_NA;
regNumber lowReg = REG_NA;
if (numArgs == 2)
{
lowReg = targetReg;
}
else
{
op3Reg = node->Op(3)->GetRegNum();
assert(!node->Op(3)->isContained());
assert(op3Reg != op1Reg);
assert(op3Reg != targetReg);
assert(op3Reg != REG_EDX);
lowReg = node->GetSingleTempReg();
assert(op3Reg != lowReg);
assert(lowReg != targetReg);
}
// These do not support containment
assert(!op2->isContained());
emitAttr attr = emitTypeSize(targetType);
// mov the first operand into implicit source operand EDX/RDX
assert((op2Reg != REG_EDX) || (op1Reg == REG_EDX));
emit->emitIns_Mov(INS_mov, attr, REG_EDX, op1Reg, /* canSkip */ true);
// generate code for MULX
genHWIntrinsic_R_R_RM(node, ins, attr, targetReg, lowReg, op2);
// If requires the lower half result, store in the memory pointed to by op3
if (numArgs == 3)
{
emit->emitIns_AR_R(INS_mov, attr, lowReg, op3Reg, 0);
}
break;
}
default:
{
unreached();
break;
}
}
genProduceReg(node);
}
//------------------------------------------------------------------------
// genFMAIntrinsic: Generates the code for an FMA hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genFMAIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
var_types baseType = node->GetSimdBaseType();
emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
instruction _213form = HWIntrinsicInfo::lookupIns(intrinsicId, baseType); // 213 form
instruction _132form = (instruction)(_213form - 1);
instruction _231form = (instruction)(_213form + 1);
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
GenTree* op3 = node->Op(3);
regNumber targetReg = node->GetRegNum();
genConsumeMultiOpOperands(node);
regNumber op1NodeReg = op1->GetRegNum();
regNumber op2NodeReg = op2->GetRegNum();
regNumber op3NodeReg = op3->GetRegNum();
GenTree* emitOp1 = op1;
GenTree* emitOp2 = op2;
GenTree* emitOp3 = op3;
const bool copiesUpperBits = HWIntrinsicInfo::CopiesUpperBits(intrinsicId);
// Intrinsics with CopyUpperBits semantics cannot have op1 be contained
assert(!copiesUpperBits || !op1->isContained());
// We need to keep this in sync with lsraxarch.cpp
// Ideally we'd actually swap the operands in lsra and simplify codegen
// but its a bit more complicated to do so for many operands as well
// as being complicated to tell codegen how to pick the right instruction
instruction ins = INS_invalid;
if (op1->isContained() || op1->isUsedFromSpillTemp())
{
// targetReg == op3NodeReg or targetReg == ?
// op3 = ([op1] * op2) + op3
// 231 form: XMM1 = (XMM2 * [XMM3]) + XMM1
ins = _231form;
std::swap(emitOp1, emitOp3);
if (targetReg == op2NodeReg)
{
// op2 = ([op1] * op2) + op3
// 132 form: XMM1 = (XMM1 * [XMM3]) + XMM2
ins = _132form;
std::swap(emitOp1, emitOp2);
}
}
else if (op3->isContained() || op3->isUsedFromSpillTemp())
{
// targetReg could be op1NodeReg, op2NodeReg, or not equal to any op
// op1 = (op1 * op2) + [op3] or op2 = (op1 * op2) + [op3]
// ? = (op1 * op2) + [op3] or ? = (op1 * op2) + op3
// 213 form: XMM1 = (XMM2 * XMM1) + [XMM3]
ins = _213form;
if (!copiesUpperBits && (targetReg == op2NodeReg))
{
// op2 = (op1 * op2) + [op3]
// 213 form: XMM1 = (XMM2 * XMM1) + [XMM3]
std::swap(emitOp1, emitOp2);
}
}
else if (op2->isContained() || op2->isUsedFromSpillTemp())
{
// targetReg == op1NodeReg or targetReg == ?
// op1 = (op1 * [op2]) + op3
// 132 form: XMM1 = (XMM1 * [XMM3]) + XMM2
ins = _132form;
std::swap(emitOp2, emitOp3);
if (!copiesUpperBits && (targetReg == op3NodeReg))
{
// op3 = (op1 * [op2]) + op3
// 231 form: XMM1 = (XMM2 * [XMM3]) + XMM1
ins = _231form;
std::swap(emitOp1, emitOp2);
}
}
else
{
// When we don't have a contained operand we still want to
// preference based on the target register if possible.
if (targetReg == op2NodeReg)
{
ins = _213form;
std::swap(emitOp1, emitOp2);
}
else if (targetReg == op3NodeReg)
{
ins = _231form;
std::swap(emitOp1, emitOp3);
}
else
{
ins = _213form;
}
}
assert(ins != INS_invalid);
genHWIntrinsic_R_R_R_RM(ins, attr, targetReg, emitOp1->GetRegNum(), emitOp2->GetRegNum(), emitOp3);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genLZCNTIntrinsic: Generates the code for a LZCNT hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genLZCNTIntrinsic(GenTreeHWIntrinsic* node)
{
assert((node->GetHWIntrinsicId() == NI_LZCNT_LeadingZeroCount) ||
(node->GetHWIntrinsicId() == NI_LZCNT_X64_LeadingZeroCount));
genConsumeMultiOpOperands(node);
genXCNTIntrinsic(node, INS_lzcnt);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genPCLMULQDQIntrinsic: Generates the code for a PCLMULQDQ hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genPCLMULQDQIntrinsic(GenTreeHWIntrinsic* node)
{
NYI("Implement PCLMULQDQ intrinsic code generation");
}
//------------------------------------------------------------------------
// genPOPCNTIntrinsic: Generates the code for a POPCNT hardware intrinsic node
//
// Arguments:
// node - The hardware intrinsic node
//
void CodeGen::genPOPCNTIntrinsic(GenTreeHWIntrinsic* node)
{
assert(node->GetHWIntrinsicId() == NI_POPCNT_PopCount || node->GetHWIntrinsicId() == NI_POPCNT_X64_PopCount);
genConsumeMultiOpOperands(node);
genXCNTIntrinsic(node, INS_popcnt);
genProduceReg(node);
}
//------------------------------------------------------------------------
// genXCNTIntrinsic: Generates the code for a lzcnt/tzcnt/popcnt hardware intrinsic node, breaks false dependencies on
// the target register
//
// Arguments:
// node - The hardware intrinsic node
// ins - The instruction being generated
//
void CodeGen::genXCNTIntrinsic(GenTreeHWIntrinsic* node, instruction ins)
{
// LZCNT/TZCNT/POPCNT have a false dependency on the target register on Intel Sandy Bridge, Haswell, and Skylake
// (POPCNT only) processors, so insert a `XOR target, target` to break the dependency via XOR triggering register
// renaming, but only if it's not an actual dependency.
GenTree* op1 = node->Op(1);
regNumber sourceReg1 = REG_NA;
regNumber sourceReg2 = REG_NA;
if (!op1->isContained())
{
sourceReg1 = op1->GetRegNum();
}
else if (op1->isIndir())
{
GenTreeIndir* indir = op1->AsIndir();
GenTree* memBase = indir->Base();
if (memBase != nullptr)
{
sourceReg1 = memBase->GetRegNum();
}
if (indir->HasIndex())
{
sourceReg2 = indir->Index()->GetRegNum();
}
}
regNumber targetReg = node->GetRegNum();
if ((targetReg != sourceReg1) && (targetReg != sourceReg2))
{
GetEmitter()->emitIns_R_R(INS_xor, EA_4BYTE, targetReg, targetReg);
}
genHWIntrinsic_R_RM(node, ins, emitTypeSize(node->TypeGet()), targetReg, op1);
}
#endif // FEATURE_HW_INTRINSICS
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/lsra.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
Linear Scan Register Allocation
a.k.a. LSRA
Preconditions
- All register requirements are expressed in the code stream, either as destination
registers of tree nodes, or as internal registers. These requirements are
expressed in the RefPositions built for each node by BuildNode(), which includes:
- The register uses and definitions.
- The register restrictions (candidates) of the target register, both from itself,
as producer of the value (dstCandidates), and from its consuming node (srcCandidates).
Note that when we talk about srcCandidates we are referring to the destination register
(not any of its sources).
- The number (internalCount) of registers required, and their register restrictions (internalCandidates).
These are neither inputs nor outputs of the node, but used in the sequence of code generated for the tree.
"Internal registers" are registers used during the code sequence generated for the node.
The register lifetimes must obey the following lifetime model:
- First, any internal registers are defined.
- Next, any source registers are used (and are then freed if they are last use and are not identified as
"delayRegFree").
- Next, the internal registers are used (and are then freed).
- Next, any registers in the kill set for the instruction are killed.
- Next, the destination register(s) are defined (multiple destination registers are only supported on ARM)
- Finally, any "delayRegFree" source registers are freed.
There are several things to note about this order:
- The internal registers will never overlap any use, but they may overlap a destination register.
- Internal registers are never live beyond the node.
- The "delayRegFree" annotation is used for instructions that are only available in a Read-Modify-Write form.
That is, the destination register is one of the sources. In this case, we must not use the same register for
the non-RMW operand as for the destination.
Overview (doLinearScan):
- Walk all blocks, building intervals and RefPositions (buildIntervals)
- Allocate registers (allocateRegisters)
- Annotate nodes with register assignments (resolveRegisters)
- Add move nodes as needed to resolve conflicting register
assignments across non-adjacent edges. (resolveEdges, called from resolveRegisters)
Postconditions:
Tree nodes (GenTree):
- GenTree::GetRegNum() (and gtRegPair for ARM) is annotated with the register
assignment for a node. If the node does not require a register, it is
annotated as such (GetRegNum() = REG_NA). For a variable definition or interior
tree node (an "implicit" definition), this is the register to put the result.
For an expression use, this is the place to find the value that has previously
been computed.
- In most cases, this register must satisfy the constraints specified for the RefPosition.
- In some cases, this is difficult:
- If a lclVar node currently lives in some register, it may not be desirable to move it
(i.e. its current location may be desirable for future uses, e.g. if it's a callee save register,
but needs to be in a specific arg register for a call).
- In other cases there may be conflicts on the restrictions placed by the defining node and the node which
consumes it
- If such a node is constrained to a single fixed register (e.g. an arg register, or a return from a call),
then LSRA is free to annotate the node with a different register. The code generator must issue the appropriate
move.
- However, if such a node is constrained to a set of registers, and its current location does not satisfy that
requirement, LSRA must insert a GT_COPY node between the node and its parent. The GetRegNum() on the GT_COPY
node must satisfy the register requirement of the parent.
- GenTree::gtRsvdRegs has a set of registers used for internal temps.
- A tree node is marked GTF_SPILL if the tree node must be spilled by the code generator after it has been
evaluated.
- LSRA currently does not set GTF_SPILLED on such nodes, because it caused problems in the old code generator.
In the new backend perhaps this should change (see also the note below under CodeGen).
- A tree node is marked GTF_SPILLED if it is a lclVar that must be reloaded prior to use.
- The register (GetRegNum()) on the node indicates the register to which it must be reloaded.
- For lclVar nodes, since the uses and defs are distinct tree nodes, it is always possible to annotate the node
with the register to which the variable must be reloaded.
- For other nodes, since they represent both the def and use, if the value must be reloaded to a different
register, LSRA must insert a GT_RELOAD node in order to specify the register to which it should be reloaded.
Local variable table (LclVarDsc):
- LclVarDsc::lvRegister is set to true if a local variable has the
same register assignment for its entire lifetime.
- LclVarDsc::lvRegNum / GetOtherReg(): these are initialized to their
first value at the end of LSRA (it looks like GetOtherReg() isn't?
This is probably a bug (ARM)). Codegen will set them to their current value
as it processes the trees, since a variable can (now) be assigned different
registers over its lifetimes.
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "lsra.h"
#ifdef DEBUG
const char* LinearScan::resolveTypeName[] = {"Split", "Join", "Critical", "SharedCritical"};
#endif // DEBUG
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Small Helper functions XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//--------------------------------------------------------------
// lsraAssignRegToTree: Assign the given reg to tree node.
//
// Arguments:
// tree - Gentree node
// reg - register to be assigned
// regIdx - register idx, if tree is a multi-reg call node.
// regIdx will be zero for single-reg result producing tree nodes.
//
// Return Value:
// None
//
void lsraAssignRegToTree(GenTree* tree, regNumber reg, unsigned regIdx)
{
if (regIdx == 0)
{
tree->SetRegNum(reg);
}
#if !defined(TARGET_64BIT)
else if (tree->OperIsMultiRegOp())
{
assert(regIdx == 1);
GenTreeMultiRegOp* mul = tree->AsMultiRegOp();
mul->gtOtherReg = reg;
}
#endif // TARGET_64BIT
#if FEATURE_MULTIREG_RET
else if (tree->OperGet() == GT_COPY)
{
assert(regIdx == 1);
GenTreeCopyOrReload* copy = tree->AsCopyOrReload();
copy->gtOtherRegs[0] = (regNumberSmall)reg;
}
#endif // FEATURE_MULTIREG_RET
#if FEATURE_ARG_SPLIT
else if (tree->OperIsPutArgSplit())
{
GenTreePutArgSplit* putArg = tree->AsPutArgSplit();
putArg->SetRegNumByIdx(reg, regIdx);
}
#endif // FEATURE_ARG_SPLIT
#ifdef FEATURE_HW_INTRINSICS
else if (tree->OperIs(GT_HWINTRINSIC))
{
assert(regIdx == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
tree->AsHWIntrinsic()->SetOtherReg(reg);
}
#endif // FEATURE_HW_INTRINSICS
else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
tree->AsLclVar()->SetRegNumByIdx(reg, regIdx);
}
else
{
assert(tree->IsMultiRegCall());
GenTreeCall* call = tree->AsCall();
call->SetRegNumByIdx(reg, regIdx);
}
}
//-------------------------------------------------------------
// getWeight: Returns the weight of the RefPosition.
//
// Arguments:
// refPos - ref position
//
// Returns:
// Weight of ref position.
weight_t LinearScan::getWeight(RefPosition* refPos)
{
weight_t weight;
GenTree* treeNode = refPos->treeNode;
if (treeNode != nullptr)
{
if (isCandidateLocalRef(treeNode))
{
// Tracked locals: use weighted ref cnt as the weight of the
// ref position.
const LclVarDsc* varDsc = compiler->lvaGetDesc(treeNode->AsLclVarCommon());
weight = varDsc->lvRefCntWtd();
if (refPos->getInterval()->isSpilled)
{
// Decrease the weight if the interval has already been spilled.
if (varDsc->lvLiveInOutOfHndlr || refPos->getInterval()->firstRefPosition->singleDefSpill)
{
// An EH-var/single-def is always spilled at defs, and we'll decrease the weight by half,
// since only the reload is needed.
weight = weight / 2;
}
else
{
weight -= BB_UNITY_WEIGHT;
}
}
}
else
{
// Non-candidate local ref or non-lcl tree node.
// These are considered to have two references in the basic block:
// a def and a use and hence weighted ref count would be 2 times
// the basic block weight in which they appear.
// However, it is generally more harmful to spill tree temps, so we
// double that.
const unsigned TREE_TEMP_REF_COUNT = 2;
const unsigned TREE_TEMP_BOOST_FACTOR = 2;
weight = TREE_TEMP_REF_COUNT * TREE_TEMP_BOOST_FACTOR * blockInfo[refPos->bbNum].weight;
}
}
else
{
// Non-tree node ref positions. These will have a single
// reference in the basic block and hence their weighted
// refcount is equal to the block weight in which they
// appear.
weight = blockInfo[refPos->bbNum].weight;
}
return weight;
}
// allRegs represents a set of registers that can
// be used to allocate the specified type in any point
// in time (more of a 'bank' of registers).
regMaskTP LinearScan::allRegs(RegisterType rt)
{
assert((rt != TYP_UNDEF) && (rt != TYP_STRUCT));
if (rt == TYP_FLOAT)
{
return availableFloatRegs;
}
else if (rt == TYP_DOUBLE)
{
return availableDoubleRegs;
}
#ifdef FEATURE_SIMD
// TODO-Cleanup: Add an RBM_ALLSIMD
else if (varTypeIsSIMD(rt))
{
return availableDoubleRegs;
}
#endif // FEATURE_SIMD
else
{
return availableIntRegs;
}
}
regMaskTP LinearScan::allByteRegs()
{
#ifdef TARGET_X86
return availableIntRegs & RBM_BYTE_REGS;
#else
return availableIntRegs;
#endif
}
regMaskTP LinearScan::allSIMDRegs()
{
return availableFloatRegs;
}
void LinearScan::updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition)
{
LsraLocation nextLocation;
if (nextRefPosition == nullptr)
{
nextLocation = MaxLocation;
fixedRegs &= ~genRegMask(regRecord->regNum);
}
else
{
nextLocation = nextRefPosition->nodeLocation;
fixedRegs |= genRegMask(regRecord->regNum);
}
nextFixedRef[regRecord->regNum] = nextLocation;
}
regMaskTP LinearScan::getMatchingConstants(regMaskTP mask, Interval* currentInterval, RefPosition* refPosition)
{
assert(currentInterval->isConstant && RefTypeIsDef(refPosition->refType));
regMaskTP candidates = (mask & m_RegistersWithConstants);
regMaskTP result = RBM_NONE;
while (candidates != RBM_NONE)
{
regMaskTP candidateBit = genFindLowestBit(candidates);
candidates &= ~candidateBit;
regNumber regNum = genRegNumFromMask(candidateBit);
RegRecord* physRegRecord = getRegisterRecord(regNum);
if (isMatchingConstant(physRegRecord, refPosition))
{
result |= candidateBit;
}
}
return result;
}
void LinearScan::clearNextIntervalRef(regNumber reg, var_types regType)
{
nextIntervalRef[reg] = MaxLocation;
#ifdef TARGET_ARM
if (regType == TYP_DOUBLE)
{
assert(genIsValidDoubleReg(reg));
regNumber otherReg = REG_NEXT(reg);
nextIntervalRef[otherReg] = MaxLocation;
}
#endif
}
void LinearScan::clearSpillCost(regNumber reg, var_types regType)
{
spillCost[reg] = 0;
#ifdef TARGET_ARM
if (regType == TYP_DOUBLE)
{
assert(genIsValidDoubleReg(reg));
regNumber otherReg = REG_NEXT(reg);
spillCost[otherReg] = 0;
}
#endif
}
void LinearScan::updateNextIntervalRef(regNumber reg, Interval* interval)
{
LsraLocation nextRefLocation = interval->getNextRefLocation();
nextIntervalRef[reg] = nextRefLocation;
#ifdef TARGET_ARM
if (interval->registerType == TYP_DOUBLE)
{
regNumber otherReg = REG_NEXT(reg);
nextIntervalRef[otherReg] = nextRefLocation;
}
#endif
}
void LinearScan::updateSpillCost(regNumber reg, Interval* interval)
{
// An interval can have no recentRefPosition if this is the initial assignment
// of a parameter to its home register.
weight_t cost = (interval->recentRefPosition != nullptr) ? getWeight(interval->recentRefPosition) : 0;
spillCost[reg] = cost;
#ifdef TARGET_ARM
if (interval->registerType == TYP_DOUBLE)
{
regNumber otherReg = REG_NEXT(reg);
spillCost[otherReg] = cost;
}
#endif
}
//------------------------------------------------------------------------
// internalFloatRegCandidates: Return the set of registers that are appropriate
// for use as internal float registers.
//
// Return Value:
// The set of registers (as a regMaskTP).
//
// Notes:
// compFloatingPointUsed is only required to be set if it is possible that we
// will use floating point callee-save registers.
// It is unlikely, if an internal register is the only use of floating point,
// that it will select a callee-save register. But to be safe, we restrict
// the set of candidates if compFloatingPointUsed is not already set.
regMaskTP LinearScan::internalFloatRegCandidates()
{
if (compiler->compFloatingPointUsed)
{
return allRegs(TYP_FLOAT);
}
else
{
return RBM_FLT_CALLEE_TRASH;
}
}
bool LinearScan::isFree(RegRecord* regRecord)
{
return ((regRecord->assignedInterval == nullptr || !regRecord->assignedInterval->isActive) &&
!isRegBusy(regRecord->regNum, regRecord->registerType));
}
RegRecord* LinearScan::getRegisterRecord(regNumber regNum)
{
assert((unsigned)regNum < ArrLen(physRegs));
return &physRegs[regNum];
}
#ifdef DEBUG
//----------------------------------------------------------------------------
// getConstrainedRegMask: Returns new regMask which is the intersection of
// regMaskActual and regMaskConstraint if the new regMask has at least
// minRegCount registers, otherwise returns regMaskActual.
//
// Arguments:
// regMaskActual - regMask that needs to be constrained
// regMaskConstraint - regMask constraint that needs to be
// applied to regMaskActual
// minRegCount - Minimum number of regs that should be
// be present in new regMask.
//
// Return Value:
// New regMask that has minRegCount registers after instersection.
// Otherwise returns regMaskActual.
regMaskTP LinearScan::getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstraint, unsigned minRegCount)
{
regMaskTP newMask = regMaskActual & regMaskConstraint;
if (genCountBits(newMask) >= minRegCount)
{
return newMask;
}
return regMaskActual;
}
//------------------------------------------------------------------------
// stressLimitRegs: Given a set of registers, expressed as a register mask, reduce
// them based on the current stress options.
//
// Arguments:
// mask - The current mask of register candidates for a node
//
// Return Value:
// A possibly-modified mask, based on the value of COMPlus_JitStressRegs.
//
// Notes:
// This is the method used to implement the stress options that limit
// the set of registers considered for allocation.
regMaskTP LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask)
{
if (getStressLimitRegs() != LSRA_LIMIT_NONE)
{
// The refPosition could be null, for example when called
// by getTempRegForResolution().
int minRegCount = (refPosition != nullptr) ? refPosition->minRegCandidateCount : 1;
switch (getStressLimitRegs())
{
case LSRA_LIMIT_CALLEE:
if (!compiler->opts.compDbgEnC)
{
mask = getConstrainedRegMask(mask, RBM_CALLEE_SAVED, minRegCount);
}
break;
case LSRA_LIMIT_CALLER:
{
mask = getConstrainedRegMask(mask, RBM_CALLEE_TRASH, minRegCount);
}
break;
case LSRA_LIMIT_SMALL_SET:
if ((mask & LsraLimitSmallIntSet) != RBM_NONE)
{
mask = getConstrainedRegMask(mask, LsraLimitSmallIntSet, minRegCount);
}
else if ((mask & LsraLimitSmallFPSet) != RBM_NONE)
{
mask = getConstrainedRegMask(mask, LsraLimitSmallFPSet, minRegCount);
}
break;
default:
unreached();
}
if (refPosition != nullptr && refPosition->isFixedRegRef)
{
mask |= refPosition->registerAssignment;
}
}
return mask;
}
#endif // DEBUG
//------------------------------------------------------------------------
// conflictingFixedRegReference: Determine whether the 'reg' has a
// fixed register use that conflicts with 'refPosition'
//
// Arguments:
// regNum - The register of interest
// refPosition - The RefPosition of interest
//
// Return Value:
// Returns true iff the given RefPosition is NOT a fixed use of this register,
// AND either:
// - there is a RefPosition on this RegRecord at the nodeLocation of the given RefPosition, or
// - the given RefPosition has a delayRegFree, and there is a RefPosition on this RegRecord at
// the nodeLocation just past the given RefPosition.
//
// Assumptions:
// 'refPosition is non-null.
bool LinearScan::conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition)
{
// Is this a fixed reference of this register? If so, there is no conflict.
if (refPosition->isFixedRefOfRegMask(genRegMask(regNum)))
{
return false;
}
// Otherwise, check for conflicts.
// There is a conflict if:
// 1. There is a recent RefPosition on this RegRecord that is at this location, OR
// 2. There is an upcoming RefPosition at this location, or at the next location
// if refPosition is a delayed use (i.e. must be kept live through the next/def location).
LsraLocation refLocation = refPosition->nodeLocation;
RegRecord* regRecord = getRegisterRecord(regNum);
if (isRegInUse(regNum, refPosition->getInterval()->registerType) &&
(regRecord->assignedInterval != refPosition->getInterval()))
{
return true;
}
LsraLocation nextPhysRefLocation = nextFixedRef[regNum];
if (nextPhysRefLocation == refLocation || (refPosition->delayRegFree && nextPhysRefLocation == (refLocation + 1)))
{
return true;
}
return false;
}
/*****************************************************************************
* Inline functions for Interval
*****************************************************************************/
RefPosition* Referenceable::getNextRefPosition()
{
if (recentRefPosition == nullptr)
{
return firstRefPosition;
}
else
{
return recentRefPosition->nextRefPosition;
}
}
LsraLocation Referenceable::getNextRefLocation()
{
RefPosition* nextRefPosition = getNextRefPosition();
if (nextRefPosition == nullptr)
{
return MaxLocation;
}
else
{
return nextRefPosition->nodeLocation;
}
}
#ifdef DEBUG
void LinearScan::dumpVarToRegMap(VarToRegMap map)
{
bool anyPrinted = false;
for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
if (map[varIndex] != REG_STK)
{
printf("V%02u=%s ", compiler->lvaTrackedIndexToLclNum(varIndex), getRegName(map[varIndex]));
anyPrinted = true;
}
}
if (!anyPrinted)
{
printf("none");
}
printf("\n");
}
void LinearScan::dumpInVarToRegMap(BasicBlock* block)
{
printf("Var=Reg beg of " FMT_BB ": ", block->bbNum);
VarToRegMap map = getInVarToRegMap(block->bbNum);
dumpVarToRegMap(map);
}
void LinearScan::dumpOutVarToRegMap(BasicBlock* block)
{
printf("Var=Reg end of " FMT_BB ": ", block->bbNum);
VarToRegMap map = getOutVarToRegMap(block->bbNum);
dumpVarToRegMap(map);
}
#endif // DEBUG
LinearScanInterface* getLinearScanAllocator(Compiler* comp)
{
return new (comp, CMK_LSRA) LinearScan(comp);
}
//------------------------------------------------------------------------
// LSRA constructor
//
// Arguments:
// theCompiler
//
// Notes:
// The constructor takes care of initializing the data structures that are used
// during Lowering, including (in DEBUG) getting the stress environment variables,
// as they may affect the block ordering.
LinearScan::LinearScan(Compiler* theCompiler)
: compiler(theCompiler)
, intervals(theCompiler->getAllocator(CMK_LSRA_Interval))
, allocationPassComplete(false)
, refPositions(theCompiler->getAllocator(CMK_LSRA_RefPosition))
, listNodePool(theCompiler)
{
regSelector = new (theCompiler, CMK_LSRA) RegisterSelection(this);
firstColdLoc = MaxLocation;
#ifdef DEBUG
maxNodeLocation = 0;
activeRefPosition = nullptr;
// Get the value of the environment variable that controls stress for register allocation
lsraStressMask = JitConfig.JitStressRegs();
#if 0
if (lsraStressMask != 0)
{
// The code in this #if can be used to debug JitStressRegs issues according to
// method hash or method count.
// To use, simply set environment variables:
// JitStressRegsHashLo and JitStressRegsHashHi to set the range of method hash, or
// JitStressRegsStart and JitStressRegsEnd to set the range of method count
// (Compiler::jitTotalMethodCount as reported by COMPlus_DumpJittedMethods).
unsigned methHash = compiler->info.compMethodHash();
char* lostr = getenv("JitStressRegsHashLo");
unsigned methHashLo = 0;
bool dump = false;
if (lostr != nullptr)
{
sscanf_s(lostr, "%x", &methHashLo);
dump = true;
}
char* histr = getenv("JitStressRegsHashHi");
unsigned methHashHi = UINT32_MAX;
if (histr != nullptr)
{
sscanf_s(histr, "%x", &methHashHi);
dump = true;
}
if (methHash < methHashLo || methHash > methHashHi)
{
lsraStressMask = 0;
}
// Check method count
unsigned count = Compiler::jitTotalMethodCompiled;
unsigned start = 0;
unsigned end = UINT32_MAX;
char* startStr = getenv("JitStressRegsStart");
char* endStr = getenv("JitStressRegsEnd");
if (startStr != nullptr)
{
sscanf_s(startStr, "%d", &start);
dump = true;
}
if (endStr != nullptr)
{
sscanf_s(endStr, "%d", &end);
dump = true;
}
if (count < start || (count > end))
{
lsraStressMask = 0;
}
if ((lsraStressMask != 0) && (dump == true))
{
printf("JitStressRegs = %x for method %d: %s, hash = 0x%x.\n",
lsraStressMask, Compiler::jitTotalMethodCompiled, compiler->info.compFullName, compiler->info.compMethodHash());
printf(""); // flush
}
}
#endif // 0
#endif // DEBUG
// Assume that we will enregister local variables if it's not disabled. We'll reset it if we
// have no tracked locals when we start allocating. Note that new tracked lclVars may be added
// after the first liveness analysis - either by optimizations or by Lowering, and the tracked
// set won't be recomputed until after Lowering (and this constructor is called prior to Lowering),
// so we don't want to check that yet.
enregisterLocalVars = compiler->compEnregLocals();
#ifdef TARGET_ARM64
availableIntRegs = (RBM_ALLINT & ~(RBM_PR | RBM_FP | RBM_LR) & ~compiler->codeGen->regSet.rsMaskResvd);
#else
availableIntRegs = (RBM_ALLINT & ~compiler->codeGen->regSet.rsMaskResvd);
#endif
#if ETW_EBP_FRAMED
availableIntRegs &= ~RBM_FPBASE;
#endif // ETW_EBP_FRAMED
availableFloatRegs = RBM_ALLFLOAT;
availableDoubleRegs = RBM_ALLDOUBLE;
#ifdef TARGET_AMD64
if (compiler->opts.compDbgEnC)
{
// On x64 when the EnC option is set, we always save exactly RBP, RSI and RDI.
// RBP is not available to the register allocator, so RSI and RDI are the only
// callee-save registers available.
availableIntRegs &= ~RBM_CALLEE_SAVED | RBM_RSI | RBM_RDI;
availableFloatRegs &= ~RBM_CALLEE_SAVED;
availableDoubleRegs &= ~RBM_CALLEE_SAVED;
}
#endif // TARGET_AMD64
compiler->rpFrameType = FT_NOT_SET;
compiler->rpMustCreateEBPCalled = false;
compiler->codeGen->intRegState.rsIsFloat = false;
compiler->codeGen->floatRegState.rsIsFloat = true;
// Block sequencing (the order in which we schedule).
// Note that we don't initialize the bbVisitedSet until we do the first traversal
// This is so that any blocks that are added during the first traversal
// are accounted for (and we don't have BasicBlockEpoch issues).
blockSequencingDone = false;
blockSequence = nullptr;
blockSequenceWorkList = nullptr;
curBBSeqNum = 0;
bbSeqCount = 0;
// Information about each block, including predecessor blocks used for variable locations at block entry.
blockInfo = nullptr;
pendingDelayFree = false;
tgtPrefUse = nullptr;
}
//------------------------------------------------------------------------
// getNextCandidateFromWorkList: Get the next candidate for block sequencing
//
// Arguments:
// None.
//
// Return Value:
// The next block to be placed in the sequence.
//
// Notes:
// This method currently always returns the next block in the list, and relies on having
// blocks added to the list only when they are "ready", and on the
// addToBlockSequenceWorkList() method to insert them in the proper order.
// However, a block may be in the list and already selected, if it was subsequently
// encountered as both a flow and layout successor of the most recently selected
// block.
BasicBlock* LinearScan::getNextCandidateFromWorkList()
{
BasicBlockList* nextWorkList = nullptr;
for (BasicBlockList* workList = blockSequenceWorkList; workList != nullptr; workList = nextWorkList)
{
nextWorkList = workList->next;
BasicBlock* candBlock = workList->block;
removeFromBlockSequenceWorkList(workList, nullptr);
if (!isBlockVisited(candBlock))
{
return candBlock;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// setBlockSequence: Determine the block order for register allocation.
//
// Arguments:
// None
//
// Return Value:
// None
//
// Notes:
// On return, the blockSequence array contains the blocks, in the order in which they
// will be allocated.
// This method clears the bbVisitedSet on LinearScan, and when it returns the set
// contains all the bbNums for the block.
void LinearScan::setBlockSequence()
{
assert(!blockSequencingDone); // The method should be called only once.
compiler->EnsureBasicBlockEpoch();
#ifdef DEBUG
blockEpoch = compiler->GetCurBasicBlockEpoch();
#endif // DEBUG
// Initialize the "visited" blocks set.
bbVisitedSet = BlockSetOps::MakeEmpty(compiler);
BlockSet readySet(BlockSetOps::MakeEmpty(compiler));
BlockSet predSet(BlockSetOps::MakeEmpty(compiler));
assert(blockSequence == nullptr && bbSeqCount == 0);
blockSequence = new (compiler, CMK_LSRA) BasicBlock*[compiler->fgBBcount];
bbNumMaxBeforeResolution = compiler->fgBBNumMax;
blockInfo = new (compiler, CMK_LSRA) LsraBlockInfo[bbNumMaxBeforeResolution + 1];
assert(blockSequenceWorkList == nullptr);
verifiedAllBBs = false;
hasCriticalEdges = false;
BasicBlock* nextBlock;
// We use a bbNum of 0 for entry RefPositions.
// The other information in blockInfo[0] will never be used.
blockInfo[0].weight = BB_UNITY_WEIGHT;
#if TRACK_LSRA_STATS
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
blockInfo[0].stats[statIndex] = 0;
}
#endif // TRACK_LSRA_STATS
JITDUMP("Start LSRA Block Sequence: \n");
for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = nextBlock)
{
JITDUMP("Current block: " FMT_BB "\n", block->bbNum);
blockSequence[bbSeqCount] = block;
markBlockVisited(block);
bbSeqCount++;
nextBlock = nullptr;
// Initialize the blockInfo.
// predBBNum will be set later.
// 0 is never used as a bbNum, but is used in blockInfo to designate an exception entry block.
blockInfo[block->bbNum].predBBNum = 0;
// We check for critical edges below, but initialize to false.
blockInfo[block->bbNum].hasCriticalInEdge = false;
blockInfo[block->bbNum].hasCriticalOutEdge = false;
blockInfo[block->bbNum].weight = block->getBBWeight(compiler);
blockInfo[block->bbNum].hasEHBoundaryIn = block->hasEHBoundaryIn();
blockInfo[block->bbNum].hasEHBoundaryOut = block->hasEHBoundaryOut();
blockInfo[block->bbNum].hasEHPred = false;
#if TRACK_LSRA_STATS
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
blockInfo[block->bbNum].stats[statIndex] = 0;
}
#endif // TRACK_LSRA_STATS
// We treat BBCallAlwaysPairTail blocks as having EH flow, since we can't
// insert resolution moves into those blocks.
if (block->isBBCallAlwaysPairTail())
{
blockInfo[block->bbNum].hasEHBoundaryIn = true;
blockInfo[block->bbNum].hasEHBoundaryOut = true;
}
bool hasUniquePred = (block->GetUniquePred(compiler) != nullptr);
for (BasicBlock* const predBlock : block->PredBlocks())
{
if (!hasUniquePred)
{
if (predBlock->NumSucc(compiler) > 1)
{
blockInfo[block->bbNum].hasCriticalInEdge = true;
hasCriticalEdges = true;
}
else if (predBlock->bbJumpKind == BBJ_SWITCH)
{
assert(!"Switch with single successor");
}
}
if (!block->isBBCallAlwaysPairTail() &&
(predBlock->hasEHBoundaryOut() || predBlock->isBBCallAlwaysPairTail()))
{
assert(!block->isBBCallAlwaysPairTail());
if (hasUniquePred)
{
// A unique pred with an EH out edge won't allow us to keep any variables enregistered.
blockInfo[block->bbNum].hasEHBoundaryIn = true;
}
else
{
blockInfo[block->bbNum].hasEHPred = true;
}
}
}
// Determine which block to schedule next.
// First, update the NORMAL successors of the current block, adding them to the worklist
// according to the desired order. We will handle the EH successors below.
const unsigned numSuccs = block->NumSucc(compiler);
bool checkForCriticalOutEdge = (numSuccs > 1);
if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH)
{
assert(!"Switch with single successor");
}
for (unsigned succIndex = 0; succIndex < numSuccs; succIndex++)
{
BasicBlock* succ = block->GetSucc(succIndex, compiler);
if (checkForCriticalOutEdge && succ->GetUniquePred(compiler) == nullptr)
{
blockInfo[block->bbNum].hasCriticalOutEdge = true;
hasCriticalEdges = true;
// We can stop checking now.
checkForCriticalOutEdge = false;
}
if (isTraversalLayoutOrder() || isBlockVisited(succ))
{
continue;
}
// We've now seen a predecessor, so add it to the work list and the "readySet".
// It will be inserted in the worklist according to the specified traversal order
// (i.e. pred-first or random, since layout order is handled above).
if (!BlockSetOps::IsMember(compiler, readySet, succ->bbNum))
{
JITDUMP("\tSucc block: " FMT_BB, succ->bbNum);
addToBlockSequenceWorkList(readySet, succ, predSet);
BlockSetOps::AddElemD(compiler, readySet, succ->bbNum);
}
}
// For layout order, simply use bbNext
if (isTraversalLayoutOrder())
{
nextBlock = block->bbNext;
continue;
}
while (nextBlock == nullptr)
{
nextBlock = getNextCandidateFromWorkList();
// TODO-Throughput: We would like to bypass this traversal if we know we've handled all
// the blocks - but fgBBcount does not appear to be updated when blocks are removed.
if (nextBlock == nullptr /* && bbSeqCount != compiler->fgBBcount*/ && !verifiedAllBBs)
{
// If we don't encounter all blocks by traversing the regular successor links, do a full
// traversal of all the blocks, and add them in layout order.
// This may include:
// - internal-only blocks which may not be in the flow graph
// - blocks that have become unreachable due to optimizations, but that are strongly
// connected (these are not removed)
// - EH blocks
for (BasicBlock* const seqBlock : compiler->Blocks())
{
if (!isBlockVisited(seqBlock))
{
JITDUMP("\tUnvisited block: " FMT_BB, seqBlock->bbNum);
addToBlockSequenceWorkList(readySet, seqBlock, predSet);
BlockSetOps::AddElemD(compiler, readySet, seqBlock->bbNum);
}
}
verifiedAllBBs = true;
}
else
{
break;
}
}
}
blockSequencingDone = true;
#ifdef DEBUG
// Make sure that we've visited all the blocks.
for (BasicBlock* const block : compiler->Blocks())
{
assert(isBlockVisited(block));
}
JITDUMP("Final LSRA Block Sequence: \n");
int i = 1;
for (BasicBlock *block = startBlockSequence(); block != nullptr; ++i, block = moveToNextBlock())
{
JITDUMP(FMT_BB, block->bbNum);
JITDUMP("(%6s) ", refCntWtd2str(block->getBBWeight(compiler)));
if (blockInfo[block->bbNum].hasEHBoundaryIn)
{
JITDUMP(" EH-in");
}
if (blockInfo[block->bbNum].hasEHBoundaryOut)
{
JITDUMP(" EH-out");
}
if (blockInfo[block->bbNum].hasEHPred)
{
JITDUMP(" has EH pred");
}
JITDUMP("\n");
}
JITDUMP("\n");
#endif
}
//------------------------------------------------------------------------
// compareBlocksForSequencing: Compare two basic blocks for sequencing order.
//
// Arguments:
// block1 - the first block for comparison
// block2 - the second block for comparison
// useBlockWeights - whether to use block weights for comparison
//
// Return Value:
// -1 if block1 is preferred.
// 0 if the blocks are equivalent.
// 1 if block2 is preferred.
//
// Notes:
// See addToBlockSequenceWorkList.
int LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights)
{
if (useBlockWeights)
{
weight_t weight1 = block1->getBBWeight(compiler);
weight_t weight2 = block2->getBBWeight(compiler);
if (weight1 > weight2)
{
return -1;
}
else if (weight1 < weight2)
{
return 1;
}
}
// If weights are the same prefer LOWER bbnum
if (block1->bbNum < block2->bbNum)
{
return -1;
}
else if (block1->bbNum == block2->bbNum)
{
return 0;
}
else
{
return 1;
}
}
//------------------------------------------------------------------------
// addToBlockSequenceWorkList: Add a BasicBlock to the work list for sequencing.
//
// Arguments:
// sequencedBlockSet - the set of blocks that are already sequenced
// block - the new block to be added
// predSet - the buffer to save predecessors set. A block set allocated by the caller used here as a
// temporary block set for constructing a predecessor set. Allocated by the caller to avoid reallocating a new block
// set with every call to this function
//
// Return Value:
// None.
//
// Notes:
// The first block in the list will be the next one to be sequenced, as soon
// as we encounter a block whose successors have all been sequenced, in pred-first
// order, or the very next block if we are traversing in random order (once implemented).
// This method uses a comparison method to determine the order in which to place
// the blocks in the list. This method queries whether all predecessors of the
// block are sequenced at the time it is added to the list and if so uses block weights
// for inserting the block. A block is never inserted ahead of its predecessors.
// A block at the time of insertion may not have all its predecessors sequenced, in
// which case it will be sequenced based on its block number. Once a block is inserted,
// its priority\order will not be changed later once its remaining predecessors are
// sequenced. This would mean that work list may not be sorted entirely based on
// block weights alone.
//
// Note also that, when random traversal order is implemented, this method
// should insert the blocks into the list in random order, so that we can always
// simply select the first block in the list.
void LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet)
{
// The block that is being added is not already sequenced
assert(!BlockSetOps::IsMember(compiler, sequencedBlockSet, block->bbNum));
// Get predSet of block
BlockSetOps::ClearD(compiler, predSet);
for (BasicBlock* const predBlock : block->PredBlocks())
{
BlockSetOps::AddElemD(compiler, predSet, predBlock->bbNum);
}
// If either a rarely run block or all its preds are already sequenced, use block's weight to sequence
bool useBlockWeight = block->isRunRarely() || BlockSetOps::IsSubset(compiler, sequencedBlockSet, predSet);
JITDUMP(", Criteria: %s", useBlockWeight ? "weight" : "bbNum");
BasicBlockList* prevNode = nullptr;
BasicBlockList* nextNode = blockSequenceWorkList;
while (nextNode != nullptr)
{
int seqResult;
if (nextNode->block->isRunRarely())
{
// If the block that is yet to be sequenced is a rarely run block, always use block weights for sequencing
seqResult = compareBlocksForSequencing(nextNode->block, block, true);
}
else if (BlockSetOps::IsMember(compiler, predSet, nextNode->block->bbNum))
{
// always prefer unsequenced pred blocks
seqResult = -1;
}
else
{
seqResult = compareBlocksForSequencing(nextNode->block, block, useBlockWeight);
}
if (seqResult > 0)
{
break;
}
prevNode = nextNode;
nextNode = nextNode->next;
}
BasicBlockList* newListNode = new (compiler, CMK_LSRA) BasicBlockList(block, nextNode);
if (prevNode == nullptr)
{
blockSequenceWorkList = newListNode;
}
else
{
prevNode->next = newListNode;
}
#ifdef DEBUG
nextNode = blockSequenceWorkList;
JITDUMP(", Worklist: [");
while (nextNode != nullptr)
{
JITDUMP(FMT_BB " ", nextNode->block->bbNum);
nextNode = nextNode->next;
}
JITDUMP("]\n");
#endif
}
void LinearScan::removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode)
{
if (listNode == blockSequenceWorkList)
{
assert(prevNode == nullptr);
blockSequenceWorkList = listNode->next;
}
else
{
assert(prevNode != nullptr && prevNode->next == listNode);
prevNode->next = listNode->next;
}
// TODO-Cleanup: consider merging Compiler::BlockListNode and BasicBlockList
// compiler->FreeBlockListNode(listNode);
}
// Initialize the block order for allocation (called each time a new traversal begins).
BasicBlock* LinearScan::startBlockSequence()
{
if (!blockSequencingDone)
{
setBlockSequence();
}
else
{
clearVisitedBlocks();
}
BasicBlock* curBB = compiler->fgFirstBB;
curBBSeqNum = 0;
curBBNum = curBB->bbNum;
assert(blockSequence[0] == compiler->fgFirstBB);
markBlockVisited(curBB);
return curBB;
}
//------------------------------------------------------------------------
// moveToNextBlock: Move to the next block in order for allocation or resolution.
//
// Arguments:
// None
//
// Return Value:
// The next block.
//
// Notes:
// This method is used when the next block is actually going to be handled.
// It changes curBBNum.
BasicBlock* LinearScan::moveToNextBlock()
{
BasicBlock* nextBlock = getNextBlock();
curBBSeqNum++;
if (nextBlock != nullptr)
{
curBBNum = nextBlock->bbNum;
}
return nextBlock;
}
//------------------------------------------------------------------------
// getNextBlock: Get the next block in order for allocation or resolution.
//
// Arguments:
// None
//
// Return Value:
// The next block.
//
// Notes:
// This method does not actually change the current block - it is used simply
// to determine which block will be next.
BasicBlock* LinearScan::getNextBlock()
{
assert(blockSequencingDone);
unsigned int nextBBSeqNum = curBBSeqNum + 1;
if (nextBBSeqNum < bbSeqCount)
{
return blockSequence[nextBBSeqNum];
}
return nullptr;
}
//------------------------------------------------------------------------
// doLinearScan: The main method for register allocation.
//
// Arguments:
// None
//
// Return Value:
// None.
//
void LinearScan::doLinearScan()
{
// Check to see whether we have any local variables to enregister.
// We initialize this in the constructor based on opt settings,
// but we don't want to spend time on the lclVar parts of LinearScan
// if we have no tracked locals.
if (enregisterLocalVars && (compiler->lvaTrackedCount == 0))
{
enregisterLocalVars = false;
}
splitBBNumToTargetBBNumMap = nullptr;
// This is complicated by the fact that physical registers have refs associated
// with locations where they are killed (e.g. calls), but we don't want to
// count these as being touched.
compiler->codeGen->regSet.rsClearRegsModified();
initMaxSpill();
buildIntervals();
DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_REFPOS));
compiler->EndPhase(PHASE_LINEAR_SCAN_BUILD);
DBEXEC(VERBOSE, lsraDumpIntervals("after buildIntervals"));
initVarRegMaps();
allocateRegisters();
allocationPassComplete = true;
compiler->EndPhase(PHASE_LINEAR_SCAN_ALLOC);
resolveRegisters();
compiler->EndPhase(PHASE_LINEAR_SCAN_RESOLVE);
assert(blockSequencingDone); // Should do at least one traversal.
assert(blockEpoch == compiler->GetCurBasicBlockEpoch());
#if TRACK_LSRA_STATS
if ((JitConfig.DisplayLsraStats() == 1)
#ifdef DEBUG
|| VERBOSE
#endif
)
{
dumpLsraStats(jitstdout);
}
#endif // TRACK_LSRA_STATS
DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_POST));
#ifdef DEBUG
compiler->fgDebugCheckLinks();
#endif
compiler->compLSRADone = true;
}
//------------------------------------------------------------------------
// recordVarLocationsAtStartOfBB: Update live-in LclVarDscs with the appropriate
// register location at the start of a block, during codegen.
//
// Arguments:
// bb - the block for which code is about to be generated.
//
// Return Value:
// None.
//
// Assumptions:
// CodeGen will take care of updating the reg masks and the current var liveness,
// after calling this method.
// This is because we need to kill off the dead registers before setting the newly live ones.
void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb)
{
if (!enregisterLocalVars)
{
return;
}
JITDUMP("Recording Var Locations at start of " FMT_BB "\n", bb->bbNum);
VarToRegMap map = getInVarToRegMap(bb->bbNum);
unsigned count = 0;
VarSetOps::AssignNoCopy(compiler, currentLiveVars,
VarSetOps::Intersection(compiler, registerCandidateVars, bb->bbLiveIn));
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedIndexToLclNum(varIndex);
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
regNumber oldRegNum = varDsc->GetRegNum();
regNumber newRegNum = getVarReg(map, varIndex);
if (oldRegNum != newRegNum)
{
JITDUMP(" V%02u(%s->%s)", varNum, compiler->compRegVarName(oldRegNum),
compiler->compRegVarName(newRegNum));
varDsc->SetRegNum(newRegNum);
count++;
#ifdef USING_VARIABLE_LIVE_RANGE
BasicBlock* prevReportedBlock = bb->bbPrev;
if (bb->bbPrev != nullptr && bb->bbPrev->isBBCallAlwaysPairTail())
{
// For callf+always pair we generate the code for the always
// block in genCallFinally and skip it, so we don't report
// anything for it (it has only trivial instructions, so that
// does not matter much). So whether we need to rehome or not
// depends on what we reported at the end of the callf block.
prevReportedBlock = bb->bbPrev->bbPrev;
}
if (prevReportedBlock != nullptr && VarSetOps::IsMember(compiler, prevReportedBlock->bbLiveOut, varIndex))
{
// varDsc was alive on previous block end so it has an open
// "VariableLiveRange" which should change to be according to
// "getInVarToRegMap"
compiler->codeGen->getVariableLiveKeeper()->siUpdateVariableLiveRange(varDsc, varNum);
}
#endif // USING_VARIABLE_LIVE_RANGE
}
else if (newRegNum != REG_STK)
{
JITDUMP(" V%02u(%s)", varNum, compiler->compRegVarName(newRegNum));
count++;
}
}
if (count == 0)
{
JITDUMP(" <none>\n");
}
JITDUMP("\n");
}
void Interval::setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* linScan)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
assert(varDsc->lvTracked);
assert(varDsc->lvVarIndex < compiler->lvaTrackedCount);
linScan->localVarIntervals[varDsc->lvVarIndex] = this;
assert(linScan->getIntervalForLocalVar(varDsc->lvVarIndex) == this);
this->isLocalVar = true;
this->varNum = lclNum;
}
//------------------------------------------------------------------------
// LinearScan:identifyCandidatesExceptionDataflow: Build the set of variables exposed on EH flow edges
//
// Notes:
// This logic was originally cloned from fgInterBlockLocalVarLiveness.
//
void LinearScan::identifyCandidatesExceptionDataflow()
{
for (BasicBlock* const block : compiler->Blocks())
{
if (block->hasEHBoundaryIn())
{
// live on entry to handler
VarSetOps::UnionD(compiler, exceptVars, block->bbLiveIn);
}
if (block->hasEHBoundaryOut())
{
VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut);
if (block->bbJumpKind == BBJ_EHFINALLYRET)
{
// Live on exit from finally.
// We track these separately because, in addition to having EH live-out semantics,
// we need to mark them must-init.
VarSetOps::UnionD(compiler, finallyVars, block->bbLiveOut);
}
}
}
#ifdef DEBUG
if (VERBOSE)
{
JITDUMP("EH Vars: ");
INDEBUG(dumpConvertedVarSet(compiler, exceptVars));
JITDUMP("\nFinally Vars: ");
INDEBUG(dumpConvertedVarSet(compiler, finallyVars));
JITDUMP("\n\n");
}
// All variables live on exit from a 'finally' block should be marked lvLiveInOutOfHndlr.
// and as 'explicitly initialized' (must-init) for GC-ref types.
VarSetOps::Iter iter(compiler, exceptVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedIndexToLclNum(varIndex);
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(varDsc->lvLiveInOutOfHndlr);
if (varTypeIsGC(varDsc) && VarSetOps::IsMember(compiler, finallyVars, varIndex) && !varDsc->lvIsParam)
{
assert(varDsc->lvMustInit);
}
}
#endif
}
bool LinearScan::isRegCandidate(LclVarDsc* varDsc)
{
if (!enregisterLocalVars)
{
return false;
}
assert(compiler->compEnregLocals());
if (!varDsc->lvTracked)
{
return false;
}
#if !defined(TARGET_64BIT)
if (varDsc->lvType == TYP_LONG)
{
// Long variables should not be register candidates.
// Lowering will have split any candidate lclVars into lo/hi vars.
return false;
}
#endif // !defined(TARGET_64BIT)
// If we have JMP, reg args must be put on the stack
if (compiler->compJmpOpUsed && varDsc->lvIsRegArg)
{
return false;
}
// Don't allocate registers for dependently promoted struct fields
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
return false;
}
// Don't enregister if the ref count is zero.
if (varDsc->lvRefCnt() == 0)
{
varDsc->setLvRefCntWtd(0);
return false;
}
// Variables that are address-exposed are never enregistered, or tracked.
// A struct may be promoted, and a struct that fits in a register may be fully enregistered.
// Pinned variables may not be tracked (a condition of the GCInfo representation)
// or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning")
// references when using the general GC encoding.
unsigned lclNum = compiler->lvaGetLclNum(varDsc);
if (varDsc->IsAddressExposed() || !varDsc->IsEnregisterableType() ||
(!compiler->compEnregStructLocals() && (varDsc->lvType == TYP_STRUCT)))
{
#ifdef DEBUG
DoNotEnregisterReason dner;
if (varDsc->IsAddressExposed())
{
dner = DoNotEnregisterReason::AddrExposed;
}
else if (!varDsc->IsEnregisterableType())
{
dner = DoNotEnregisterReason::NotRegSizeStruct;
}
else
{
dner = DoNotEnregisterReason::DontEnregStructs;
}
#endif // DEBUG
compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(dner));
return false;
}
else if (varDsc->lvPinned)
{
varDsc->lvTracked = 0;
#ifdef JIT32_GCENCODER
compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef));
#endif // JIT32_GCENCODER
return false;
}
// Are we not optimizing and we have exception handlers?
// if so mark all args and locals as volatile, so that they
// won't ever get enregistered.
//
if (compiler->opts.MinOpts() && compiler->compHndBBtabCount > 0)
{
compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler));
}
if (varDsc->lvDoNotEnregister)
{
return false;
}
switch (genActualType(varDsc->TypeGet()))
{
case TYP_FLOAT:
case TYP_DOUBLE:
return !compiler->opts.compDbgCode;
case TYP_INT:
case TYP_LONG:
case TYP_REF:
case TYP_BYREF:
break;
#ifdef FEATURE_SIMD
case TYP_SIMD8:
case TYP_SIMD12:
case TYP_SIMD16:
case TYP_SIMD32:
return !varDsc->lvPromoted;
#endif // FEATURE_SIMD
case TYP_STRUCT:
// TODO-1stClassStructs: support vars with GC pointers. The issue is that such
// vars will have `lvMustInit` set, because emitter has poor support for struct liveness,
// but if the variable is tracked the prolog generator would expect it to be in liveIn set,
// so an assert in `genFnProlog` will fire.
return compiler->compEnregStructLocals() && !varDsc->HasGCPtr();
case TYP_UNDEF:
case TYP_UNKNOWN:
noway_assert(!"lvType not set correctly");
varDsc->lvType = TYP_INT;
return false;
default:
return false;
}
return true;
}
// Identify locals & compiler temps that are register candidates
// TODO-Cleanup: This was cloned from Compiler::lvaSortByRefCount() in lclvars.cpp in order
// to avoid perturbation, but should be merged.
void LinearScan::identifyCandidates()
{
if (enregisterLocalVars)
{
// Initialize the set of lclVars that are candidates for register allocation.
VarSetOps::AssignNoCopy(compiler, registerCandidateVars, VarSetOps::MakeEmpty(compiler));
// Initialize the sets of lclVars that are used to determine whether, and for which lclVars,
// we need to perform resolution across basic blocks.
// Note that we can't do this in the constructor because the number of tracked lclVars may
// change between the constructor and the actual allocation.
VarSetOps::AssignNoCopy(compiler, resolutionCandidateVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, splitOrSpilledVars, VarSetOps::MakeEmpty(compiler));
// We set enregisterLocalVars to true only if there are tracked lclVars
assert(compiler->lvaCount != 0);
}
else if (compiler->lvaCount == 0)
{
// Nothing to do. Note that even if enregisterLocalVars is false, we still need to set the
// lvLRACandidate field on all the lclVars to false if we have any.
return;
}
VarSetOps::AssignNoCopy(compiler, exceptVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, finallyVars, VarSetOps::MakeEmpty(compiler));
if (compiler->compHndBBtabCount > 0)
{
identifyCandidatesExceptionDataflow();
}
unsigned lclNum;
LclVarDsc* varDsc;
// While we build intervals for the candidate lclVars, we will determine the floating point
// lclVars, if any, to consider for callee-save register preferencing.
// We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count,
// and those that meet the second.
// The first threshold is used for methods that are heuristically deemed either to have light
// fp usage, or other factors that encourage conservative use of callee-save registers, such
// as multiple exits (where there might be an early exit that woudl be excessively penalized by
// lots of prolog/epilog saves & restores).
// The second threshold is used where there are factors deemed to make it more likely that fp
// fp callee save registers will be needed, such as loops or many fp vars.
// We keep two sets of vars, since we collect some of the information to determine which set to
// use as we iterate over the vars.
// When we are generating AVX code on non-Unix (FEATURE_PARTIAL_SIMD_CALLEE_SAVE), we maintain an
// additional set of LargeVectorType vars, and there is a separate threshold defined for those.
// It is assumed that if we encounter these, that we should consider this a "high use" scenario,
// so we don't maintain two sets of these vars.
// This is defined as thresholdLargeVectorRefCntWtd, as we are likely to use the same mechanism
// for vectors on Arm64, though the actual value may differ.
unsigned int floatVarCount = 0;
weight_t thresholdFPRefCntWtd = 4 * BB_UNITY_WEIGHT;
weight_t maybeFPRefCntWtd = 2 * BB_UNITY_WEIGHT;
VARSET_TP fpMaybeCandidateVars(VarSetOps::UninitVal());
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
unsigned int largeVectorVarCount = 0;
weight_t thresholdLargeVectorRefCntWtd = 4 * BB_UNITY_WEIGHT;
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (enregisterLocalVars)
{
VarSetOps::AssignNoCopy(compiler, fpCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, fpMaybeCandidateVars, VarSetOps::MakeEmpty(compiler));
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
VarSetOps::AssignNoCopy(compiler, largeVectorVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, largeVectorCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler));
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
#if DOUBLE_ALIGN
unsigned refCntStk = 0;
unsigned refCntReg = 0;
weight_t refCntWtdReg = 0;
unsigned refCntStkParam = 0; // sum of ref counts for all stack based parameters
weight_t refCntWtdStkDbl = 0; // sum of wtd ref counts for stack based doubles
doDoubleAlign = false;
bool checkDoubleAlign = true;
if (compiler->codeGen->isFramePointerRequired() || compiler->opts.MinOpts())
{
checkDoubleAlign = false;
}
else
{
switch (compiler->getCanDoubleAlign())
{
case MUST_DOUBLE_ALIGN:
doDoubleAlign = true;
checkDoubleAlign = false;
break;
case CAN_DOUBLE_ALIGN:
break;
case CANT_DOUBLE_ALIGN:
doDoubleAlign = false;
checkDoubleAlign = false;
break;
default:
unreached();
}
}
#endif // DOUBLE_ALIGN
// Check whether register variables are permitted.
if (!enregisterLocalVars)
{
localVarIntervals = nullptr;
}
else if (compiler->lvaTrackedCount > 0)
{
// initialize mapping from tracked local to interval
localVarIntervals = new (compiler, CMK_LSRA) Interval*[compiler->lvaTrackedCount];
}
INTRACK_STATS(regCandidateVarCount = 0);
for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++)
{
// Initialize all variables to REG_STK
varDsc->SetRegNum(REG_STK);
#ifndef TARGET_64BIT
varDsc->SetOtherReg(REG_STK);
#endif // TARGET_64BIT
if (!enregisterLocalVars)
{
varDsc->lvLRACandidate = false;
continue;
}
#if DOUBLE_ALIGN
if (checkDoubleAlign)
{
if (varDsc->lvIsParam && !varDsc->lvIsRegArg)
{
refCntStkParam += varDsc->lvRefCnt();
}
else if (!isRegCandidate(varDsc) || varDsc->lvDoNotEnregister)
{
refCntStk += varDsc->lvRefCnt();
if ((varDsc->lvType == TYP_DOUBLE) ||
((varTypeIsStruct(varDsc) && varDsc->lvStructDoubleAlign &&
(compiler->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT))))
{
refCntWtdStkDbl += varDsc->lvRefCntWtd();
}
}
else
{
refCntReg += varDsc->lvRefCnt();
refCntWtdReg += varDsc->lvRefCntWtd();
}
}
#endif // DOUBLE_ALIGN
// Start with the assumption that it's a candidate.
varDsc->lvLRACandidate = 1;
// Start with lvRegister as false - set it true only if the variable gets
// the same register assignment throughout
varDsc->lvRegister = false;
if (!isRegCandidate(varDsc))
{
varDsc->lvLRACandidate = 0;
if (varDsc->lvTracked)
{
localVarIntervals[varDsc->lvVarIndex] = nullptr;
}
// The current implementation of multi-reg structs that are referenced collectively
// (i.e. by refering to the parent lclVar rather than each field separately) relies
// on all or none of the fields being candidates.
if (varDsc->lvIsStructField)
{
LclVarDsc* parentVarDsc = compiler->lvaGetDesc(varDsc->lvParentLcl);
if (parentVarDsc->lvIsMultiRegRet && !parentVarDsc->lvDoNotEnregister)
{
JITDUMP("Setting multi-reg struct V%02u as not enregisterable:", varDsc->lvParentLcl);
compiler->lvaSetVarDoNotEnregister(varDsc->lvParentLcl DEBUGARG(DoNotEnregisterReason::BlockOp));
for (unsigned int i = 0; i < parentVarDsc->lvFieldCnt; i++)
{
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(parentVarDsc->lvFieldLclStart + i);
JITDUMP(" V%02u", parentVarDsc->lvFieldLclStart + i);
if (fieldVarDsc->lvTracked)
{
fieldVarDsc->lvLRACandidate = 0;
localVarIntervals[fieldVarDsc->lvVarIndex] = nullptr;
VarSetOps::RemoveElemD(compiler, registerCandidateVars, fieldVarDsc->lvVarIndex);
JITDUMP("*");
}
// This is not accurate, but we need a non-zero refCnt for the parent so that it will
// be allocated to the stack.
parentVarDsc->setLvRefCnt(parentVarDsc->lvRefCnt() + fieldVarDsc->lvRefCnt());
}
JITDUMP("\n");
}
}
continue;
}
if (varDsc->lvLRACandidate)
{
var_types type = varDsc->GetActualRegisterType();
if (varTypeUsesFloatReg(type))
{
compiler->compFloatingPointUsed = true;
}
Interval* newInt = newInterval(type);
newInt->setLocalNumber(compiler, lclNum, this);
VarSetOps::AddElemD(compiler, registerCandidateVars, varDsc->lvVarIndex);
// we will set this later when we have determined liveness
varDsc->lvMustInit = false;
if (varDsc->lvIsStructField)
{
newInt->isStructField = true;
}
if (varDsc->lvLiveInOutOfHndlr)
{
newInt->isWriteThru = varDsc->lvSingleDefRegCandidate;
setIntervalAsSpilled(newInt);
}
INTRACK_STATS(regCandidateVarCount++);
// We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count,
// and those that meet the second (see the definitions of thresholdFPRefCntWtd and maybeFPRefCntWtd
// above).
CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Additionally, when we are generating code for a target with partial SIMD callee-save
// (AVX on non-UNIX amd64 and 16-byte vectors on arm64), we keep a separate set of the
// LargeVectorType vars.
if (Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType()))
{
largeVectorVarCount++;
VarSetOps::AddElemD(compiler, largeVectorVars, varDsc->lvVarIndex);
weight_t refCntWtd = varDsc->lvRefCntWtd();
if (refCntWtd >= thresholdLargeVectorRefCntWtd)
{
VarSetOps::AddElemD(compiler, largeVectorCalleeSaveCandidateVars, varDsc->lvVarIndex);
}
}
else
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (regType(type) == FloatRegisterType)
{
floatVarCount++;
weight_t refCntWtd = varDsc->lvRefCntWtd();
if (varDsc->lvIsRegArg)
{
// Don't count the initial reference for register params. In those cases,
// using a callee-save causes an extra copy.
refCntWtd -= BB_UNITY_WEIGHT;
}
if (refCntWtd >= thresholdFPRefCntWtd)
{
VarSetOps::AddElemD(compiler, fpCalleeSaveCandidateVars, varDsc->lvVarIndex);
}
else if (refCntWtd >= maybeFPRefCntWtd)
{
VarSetOps::AddElemD(compiler, fpMaybeCandidateVars, varDsc->lvVarIndex);
}
}
JITDUMP(" ");
DBEXEC(VERBOSE, newInt->dump());
}
else
{
localVarIntervals[varDsc->lvVarIndex] = nullptr;
}
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Create Intervals to use for the save & restore of the upper halves of large vector lclVars.
if (enregisterLocalVars)
{
VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars);
unsigned largeVectorVarIndex = 0;
while (largeVectorVarsIter.NextElem(&largeVectorVarIndex))
{
makeUpperVectorInterval(largeVectorVarIndex);
}
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#if DOUBLE_ALIGN
if (checkDoubleAlign)
{
// TODO-CQ: Fine-tune this:
// In the legacy reg predictor, this runs after allocation, and then demotes any lclVars
// allocated to the frame pointer, which is probably the wrong order.
// However, because it runs after allocation, it can determine the impact of demoting
// the lclVars allocated to the frame pointer.
// => Here, estimate of the EBP refCnt and weighted refCnt is a wild guess.
//
unsigned refCntEBP = refCntReg / 8;
weight_t refCntWtdEBP = refCntWtdReg / 8;
doDoubleAlign =
compiler->shouldDoubleAlign(refCntStk, refCntEBP, refCntWtdEBP, refCntStkParam, refCntWtdStkDbl);
}
#endif // DOUBLE_ALIGN
// The factors we consider to determine which set of fp vars to use as candidates for callee save
// registers current include the number of fp vars, whether there are loops, and whether there are
// multiple exits. These have been selected somewhat empirically, but there is probably room for
// more tuning.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (VERBOSE)
{
printf("\nFP callee save candidate vars: ");
if (enregisterLocalVars && !VarSetOps::IsEmpty(compiler, fpCalleeSaveCandidateVars))
{
dumpConvertedVarSet(compiler, fpCalleeSaveCandidateVars);
printf("\n");
}
else
{
printf("None\n\n");
}
}
#endif
JITDUMP("floatVarCount = %d; hasLoops = %s, singleExit = %s\n", floatVarCount, dspBool(compiler->fgHasLoops),
dspBool(compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr));
// Determine whether to use the 2nd, more aggressive, threshold for fp callee saves.
if (floatVarCount > 6 && compiler->fgHasLoops &&
(compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr))
{
assert(enregisterLocalVars);
#ifdef DEBUG
if (VERBOSE)
{
printf("Adding additional fp callee save candidates: \n");
if (!VarSetOps::IsEmpty(compiler, fpMaybeCandidateVars))
{
dumpConvertedVarSet(compiler, fpMaybeCandidateVars);
printf("\n");
}
else
{
printf("None\n\n");
}
}
#endif
VarSetOps::UnionD(compiler, fpCalleeSaveCandidateVars, fpMaybeCandidateVars);
}
// From here on, we're only interested in the exceptVars that are candidates.
if (enregisterLocalVars && (compiler->compHndBBtabCount > 0))
{
VarSetOps::IntersectionD(compiler, exceptVars, registerCandidateVars);
}
#ifdef TARGET_ARM
#ifdef DEBUG
if (VERBOSE)
{
// Frame layout is only pre-computed for ARM
printf("\nlvaTable after IdentifyCandidates\n");
compiler->lvaTableDump(Compiler::FrameLayoutState::PRE_REGALLOC_FRAME_LAYOUT);
}
#endif // DEBUG
#endif // TARGET_ARM
}
// TODO-Throughput: This mapping can surely be more efficiently done
void LinearScan::initVarRegMaps()
{
if (!enregisterLocalVars)
{
inVarToRegMaps = nullptr;
outVarToRegMaps = nullptr;
return;
}
assert(compiler->lvaTrackedFixed); // We should have already set this to prevent us from adding any new tracked
// variables.
// The compiler memory allocator requires that the allocation be an
// even multiple of int-sized objects
unsigned int varCount = compiler->lvaTrackedCount;
regMapCount = roundUp(varCount, (unsigned)sizeof(int));
// Not sure why blocks aren't numbered from zero, but they don't appear to be.
// So, if we want to index by bbNum we have to know the maximum value.
unsigned int bbCount = compiler->fgBBNumMax + 1;
inVarToRegMaps = new (compiler, CMK_LSRA) regNumberSmall*[bbCount];
outVarToRegMaps = new (compiler, CMK_LSRA) regNumberSmall*[bbCount];
if (varCount > 0)
{
// This VarToRegMap is used during the resolution of critical edges.
sharedCriticalVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount];
for (unsigned int i = 0; i < bbCount; i++)
{
VarToRegMap inVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount];
VarToRegMap outVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount];
for (unsigned int j = 0; j < regMapCount; j++)
{
inVarToRegMap[j] = REG_STK;
outVarToRegMap[j] = REG_STK;
}
inVarToRegMaps[i] = inVarToRegMap;
outVarToRegMaps[i] = outVarToRegMap;
}
}
else
{
sharedCriticalVarToRegMap = nullptr;
for (unsigned int i = 0; i < bbCount; i++)
{
inVarToRegMaps[i] = nullptr;
outVarToRegMaps[i] = nullptr;
}
}
}
void LinearScan::setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg)
{
assert(enregisterLocalVars);
assert(reg < UCHAR_MAX && varNum < compiler->lvaCount);
inVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = (regNumberSmall)reg;
}
void LinearScan::setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg)
{
assert(enregisterLocalVars);
assert(reg < UCHAR_MAX && varNum < compiler->lvaCount);
outVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = (regNumberSmall)reg;
}
LinearScan::SplitEdgeInfo LinearScan::getSplitEdgeInfo(unsigned int bbNum)
{
assert(enregisterLocalVars);
SplitEdgeInfo splitEdgeInfo;
assert(bbNum <= compiler->fgBBNumMax);
assert(bbNum > bbNumMaxBeforeResolution);
assert(splitBBNumToTargetBBNumMap != nullptr);
splitBBNumToTargetBBNumMap->Lookup(bbNum, &splitEdgeInfo);
assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution);
assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution);
return splitEdgeInfo;
}
VarToRegMap LinearScan::getInVarToRegMap(unsigned int bbNum)
{
assert(enregisterLocalVars);
assert(bbNum <= compiler->fgBBNumMax);
// For the blocks inserted to split critical edges, the inVarToRegMap is
// equal to the outVarToRegMap at the "from" block.
if (bbNum > bbNumMaxBeforeResolution)
{
SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum);
unsigned fromBBNum = splitEdgeInfo.fromBBNum;
if (fromBBNum == 0)
{
assert(splitEdgeInfo.toBBNum != 0);
return inVarToRegMaps[splitEdgeInfo.toBBNum];
}
else
{
return outVarToRegMaps[fromBBNum];
}
}
return inVarToRegMaps[bbNum];
}
VarToRegMap LinearScan::getOutVarToRegMap(unsigned int bbNum)
{
assert(enregisterLocalVars);
assert(bbNum <= compiler->fgBBNumMax);
if (bbNum == 0)
{
return nullptr;
}
// For the blocks inserted to split critical edges, the outVarToRegMap is
// equal to the inVarToRegMap at the target.
if (bbNum > bbNumMaxBeforeResolution)
{
// If this is an empty block, its in and out maps are both the same.
// We identify this case by setting fromBBNum or toBBNum to 0, and using only the other.
SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum);
unsigned toBBNum = splitEdgeInfo.toBBNum;
if (toBBNum == 0)
{
assert(splitEdgeInfo.fromBBNum != 0);
return outVarToRegMaps[splitEdgeInfo.fromBBNum];
}
else
{
return inVarToRegMaps[toBBNum];
}
}
return outVarToRegMaps[bbNum];
}
//------------------------------------------------------------------------
// setVarReg: Set the register associated with a variable in the given 'bbVarToRegMap'.
//
// Arguments:
// bbVarToRegMap - the map of interest
// trackedVarIndex - the lvVarIndex for the variable
// reg - the register to which it is being mapped
//
// Return Value:
// None
//
void LinearScan::setVarReg(VarToRegMap bbVarToRegMap, unsigned int trackedVarIndex, regNumber reg)
{
assert(trackedVarIndex < compiler->lvaTrackedCount);
regNumberSmall regSmall = (regNumberSmall)reg;
assert((regNumber)regSmall == reg);
bbVarToRegMap[trackedVarIndex] = regSmall;
}
//------------------------------------------------------------------------
// getVarReg: Get the register associated with a variable in the given 'bbVarToRegMap'.
//
// Arguments:
// bbVarToRegMap - the map of interest
// trackedVarIndex - the lvVarIndex for the variable
//
// Return Value:
// The register to which 'trackedVarIndex' is mapped
//
regNumber LinearScan::getVarReg(VarToRegMap bbVarToRegMap, unsigned int trackedVarIndex)
{
assert(enregisterLocalVars);
assert(trackedVarIndex < compiler->lvaTrackedCount);
return (regNumber)bbVarToRegMap[trackedVarIndex];
}
// Initialize the incoming VarToRegMap to the given map values (generally a predecessor of
// the block)
VarToRegMap LinearScan::setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap)
{
assert(enregisterLocalVars);
VarToRegMap inVarToRegMap = inVarToRegMaps[bbNum];
memcpy(inVarToRegMap, srcVarToRegMap, (regMapCount * sizeof(regNumber)));
return inVarToRegMap;
}
//------------------------------------------------------------------------
// checkLastUses: Check correctness of last use flags
//
// Arguments:
// The block for which we are checking last uses.
//
// Notes:
// This does a backward walk of the RefPositions, starting from the liveOut set.
// This method was previously used to set the last uses, which were computed by
// liveness, but were not create in some cases of multiple lclVar references in the
// same tree. However, now that last uses are computed as RefPositions are created,
// that is no longer necessary, and this method is simply retained as a check.
// The exception to the check-only behavior is when LSRA_EXTEND_LIFETIMES if set via
// COMPlus_JitStressRegs. In that case, this method is required, because even though
// the RefPositions will not be marked lastUse in that case, we still need to correclty
// mark the last uses on the tree nodes, which is done by this method.
//
#ifdef DEBUG
void LinearScan::checkLastUses(BasicBlock* block)
{
if (VERBOSE)
{
JITDUMP("\n\nCHECKING LAST USES for " FMT_BB ", liveout=", block->bbNum);
dumpConvertedVarSet(compiler, block->bbLiveOut);
JITDUMP("\n==============================\n");
}
unsigned keepAliveVarNum = BAD_VAR_NUM;
if (compiler->lvaKeepAliveAndReportThis())
{
keepAliveVarNum = compiler->info.compThisArg;
assert(compiler->info.compIsStatic == false);
}
// find which uses are lastUses
// Work backwards starting with live out.
// 'computedLive' is updated to include any exposed use (including those in this
// block that we've already seen). When we encounter a use, if it's
// not in that set, then it's a last use.
VARSET_TP computedLive(VarSetOps::MakeCopy(compiler, block->bbLiveOut));
bool foundDiff = false;
RefPositionReverseIterator reverseIterator = refPositions.rbegin();
RefPosition* currentRefPosition;
for (currentRefPosition = &reverseIterator; currentRefPosition->refType != RefTypeBB;
reverseIterator++, currentRefPosition = &reverseIterator)
{
// We should never see ParamDefs or ZeroInits within a basic block.
assert(currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit);
if (currentRefPosition->isIntervalRef() && currentRefPosition->getInterval()->isLocalVar)
{
unsigned varNum = currentRefPosition->getInterval()->varNum;
unsigned varIndex = currentRefPosition->getInterval()->getVarIndex(compiler);
LsraLocation loc = currentRefPosition->nodeLocation;
// We should always have a tree node for a localVar, except for the "special" RefPositions.
GenTree* tree = currentRefPosition->treeNode;
assert(tree != nullptr || currentRefPosition->refType == RefTypeExpUse ||
currentRefPosition->refType == RefTypeDummyDef);
if (!VarSetOps::IsMember(compiler, computedLive, varIndex) && varNum != keepAliveVarNum)
{
// There was no exposed use, so this is a "last use" (and we mark it thus even if it's a def)
if (extendLifetimes())
{
// NOTE: this is a bit of a hack. When extending lifetimes, the "last use" bit will be clear.
// This bit, however, would normally be used during resolveLocalRef to set the value of
// LastUse on the node for a ref position. If this bit is not set correctly even when
// extending lifetimes, the code generator will assert as it expects to have accurate last
// use information. To avoid these asserts, set the LastUse bit here.
// Note also that extendLifetimes() is an LSRA stress mode, so it will only be true for
// Checked or Debug builds, for which this method will be executed.
if (tree != nullptr)
{
tree->AsLclVar()->SetLastUse(currentRefPosition->multiRegIdx);
}
}
else if (!currentRefPosition->lastUse)
{
JITDUMP("missing expected last use of V%02u @%u\n", compiler->lvaTrackedIndexToLclNum(varIndex),
loc);
foundDiff = true;
}
VarSetOps::AddElemD(compiler, computedLive, varIndex);
}
else if (currentRefPosition->lastUse)
{
JITDUMP("unexpected last use of V%02u @%u\n", compiler->lvaTrackedIndexToLclNum(varIndex), loc);
foundDiff = true;
}
else if (extendLifetimes() && tree != nullptr)
{
// NOTE: see the comment above re: the extendLifetimes hack.
tree->AsLclVar()->ClearLastUse(currentRefPosition->multiRegIdx);
}
if (currentRefPosition->refType == RefTypeDef || currentRefPosition->refType == RefTypeDummyDef)
{
VarSetOps::RemoveElemD(compiler, computedLive, varIndex);
}
}
assert(reverseIterator != refPositions.rend());
}
VARSET_TP liveInNotComputedLive(VarSetOps::Diff(compiler, block->bbLiveIn, computedLive));
// We may have exception vars in the liveIn set of exception blocks that are not computed live.
if (compiler->ehBlockHasExnFlowDsc(block))
{
VarSetOps::DiffD(compiler, liveInNotComputedLive, compiler->fgGetHandlerLiveVars(block));
}
VarSetOps::Iter liveInNotComputedLiveIter(compiler, liveInNotComputedLive);
unsigned liveInNotComputedLiveIndex = 0;
while (liveInNotComputedLiveIter.NextElem(&liveInNotComputedLiveIndex))
{
LclVarDsc* varDesc = compiler->lvaGetDescByTrackedIndex(liveInNotComputedLiveIndex);
if (varDesc->lvLRACandidate)
{
JITDUMP(FMT_BB ": V%02u is in LiveIn set, but not computed live.\n", block->bbNum,
compiler->lvaTrackedIndexToLclNum(liveInNotComputedLiveIndex));
foundDiff = true;
}
}
VarSetOps::DiffD(compiler, computedLive, block->bbLiveIn);
const VARSET_TP& computedLiveNotLiveIn(computedLive); // reuse the buffer.
VarSetOps::Iter computedLiveNotLiveInIter(compiler, computedLiveNotLiveIn);
unsigned computedLiveNotLiveInIndex = 0;
while (computedLiveNotLiveInIter.NextElem(&computedLiveNotLiveInIndex))
{
LclVarDsc* varDesc = compiler->lvaGetDescByTrackedIndex(computedLiveNotLiveInIndex);
if (varDesc->lvLRACandidate)
{
JITDUMP(FMT_BB ": V%02u is computed live, but not in LiveIn set.\n", block->bbNum,
compiler->lvaTrackedIndexToLclNum(computedLiveNotLiveInIndex));
foundDiff = true;
}
}
assert(!foundDiff);
}
#endif // DEBUG
//------------------------------------------------------------------------
// findPredBlockForLiveIn: Determine which block should be used for the register locations of the live-in variables.
//
// Arguments:
// block - The block for which we're selecting a predecesor.
// prevBlock - The previous block in in allocation order.
// pPredBlockIsAllocated - A debug-only argument that indicates whether any of the predecessors have been seen
// in allocation order.
//
// Return Value:
// The selected predecessor.
//
// Assumptions:
// in DEBUG, caller initializes *pPredBlockIsAllocated to false, and it will be set to true if the block
// returned is in fact a predecessor.
//
// Notes:
// This will select a predecessor based on the heuristics obtained by getLsraBlockBoundaryLocations(), which can be
// one of:
// LSRA_BLOCK_BOUNDARY_PRED - Use the register locations of a predecessor block (default)
// LSRA_BLOCK_BOUNDARY_LAYOUT - Use the register locations of the previous block in layout order.
// This is the only case where this actually returns a different block.
// LSRA_BLOCK_BOUNDARY_ROTATE - Rotate the register locations from a predecessor.
// For this case, the block returned is the same as for LSRA_BLOCK_BOUNDARY_PRED, but
// the register locations will be "rotated" to stress the resolution and allocation
// code.
BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block,
BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated))
{
BasicBlock* predBlock = nullptr;
assert(*pPredBlockIsAllocated == false);
// Blocks with exception flow on entry use no predecessor blocks, as all incoming vars
// are on the stack.
if (blockInfo[block->bbNum].hasEHBoundaryIn)
{
JITDUMP("\n\nIncoming EH boundary; ");
return nullptr;
}
if (block == compiler->fgFirstBB)
{
return nullptr;
}
if (block->bbPreds == nullptr)
{
assert((block != compiler->fgFirstBB) || (prevBlock != nullptr));
JITDUMP("\n\nNo predecessor; ");
// Some throw blocks do not have predecessor. For such blocks, we want to return the fact
// that predecessor is indeed null instead of returning the prevBlock. Returning prevBlock
// will be wrong, because LSRA would think that the variable is live in registers based on
// the lexical flow, but that won't be true according to the control flow.
// Example:
//
// IG05:
// ... ; V01 is in 'rdi'
// JNE IG07
// ...
// IG06:
// ...
// ... ; V01 is in 'rbx'
// JMP IG08
// IG07:
// ... ; LSRA thinks V01 is in 'rbx' if IG06 is set as previous block of IG07.
// ....
// CALL CORINFO_HELP_RNGCHKFAIL
// ...
// IG08:
// ...
// ...
if (block->bbJumpKind == BBJ_THROW)
{
JITDUMP(" - throw block; ");
return nullptr;
}
// We may have unreachable blocks, due to optimization.
// We don't want to set the predecessor as null in this case, since that will result in
// unnecessary DummyDefs, and possibly result in inconsistencies requiring resolution
// (since these unreachable blocks can have reachable successors).
return prevBlock;
}
#ifdef DEBUG
if (getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_LAYOUT)
{
if (prevBlock != nullptr)
{
predBlock = prevBlock;
}
}
else
#endif // DEBUG
{
predBlock = block->GetUniquePred(compiler);
if (predBlock != nullptr)
{
// We should already have returned null if this block has a single incoming EH boundary edge.
assert(!predBlock->hasEHBoundaryOut());
if (isBlockVisited(predBlock))
{
if (predBlock->bbJumpKind == BBJ_COND)
{
// Special handling to improve matching on backedges.
BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext;
noway_assert(otherBlock != nullptr);
if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn)
{
// This is the case when we have a conditional branch where one target has already
// been visited. It would be best to use the same incoming regs as that block,
// so that we have less likelihood of having to move registers.
// For example, in determining the block to use for the starting register locations for
// "block" in the following example, we'd like to use the same predecessor for "block"
// as for "otherBlock", so that both successors of predBlock have the same locations, reducing
// the likelihood of needing a split block on a backedge:
//
// otherPred
// |
// otherBlock <-+
// . . . |
// |
// predBlock----+
// |
// block
//
if (blockInfo[otherBlock->bbNum].hasEHBoundaryIn)
{
return nullptr;
}
else
{
for (BasicBlock* const otherPred : otherBlock->PredBlocks())
{
if (otherPred->bbNum == blockInfo[otherBlock->bbNum].predBBNum)
{
predBlock = otherPred;
break;
}
}
}
}
}
}
else
{
predBlock = nullptr;
}
}
else
{
for (BasicBlock* const candidatePredBlock : block->PredBlocks())
{
if (isBlockVisited(candidatePredBlock))
{
if ((predBlock == nullptr) || (predBlock->bbWeight < candidatePredBlock->bbWeight))
{
predBlock = candidatePredBlock;
INDEBUG(*pPredBlockIsAllocated = true;)
}
}
}
}
if (predBlock == nullptr)
{
predBlock = prevBlock;
assert(predBlock != nullptr);
JITDUMP("\n\nNo allocated predecessor; ");
}
}
return predBlock;
}
#ifdef DEBUG
void LinearScan::dumpVarRefPositions(const char* title)
{
if (enregisterLocalVars)
{
printf("\nVAR REFPOSITIONS %s\n", title);
for (unsigned i = 0; i < compiler->lvaCount; i++)
{
printf("--- V%02u", i);
const LclVarDsc* varDsc = compiler->lvaGetDesc(i);
if (varDsc->lvIsRegCandidate())
{
Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex);
printf(" (Interval %d)\n", interval->intervalIndex);
for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
{
ref->dump(this);
}
}
else
{
printf("\n");
}
}
printf("\n");
}
}
#endif // DEBUG
// Set the default rpFrameType based upon codeGen->isFramePointerRequired()
// This was lifted from the register predictor
//
void LinearScan::setFrameType()
{
FrameType frameType = FT_NOT_SET;
#if DOUBLE_ALIGN
compiler->codeGen->setDoubleAlign(false);
if (doDoubleAlign)
{
frameType = FT_DOUBLE_ALIGN_FRAME;
compiler->codeGen->setDoubleAlign(true);
}
else
#endif // DOUBLE_ALIGN
if (compiler->codeGen->isFramePointerRequired())
{
frameType = FT_EBP_FRAME;
}
else
{
if (compiler->rpMustCreateEBPCalled == false)
{
#ifdef DEBUG
const char* reason;
#endif // DEBUG
compiler->rpMustCreateEBPCalled = true;
if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason)))
{
JITDUMP("; Decided to create an EBP based frame for ETW stackwalking (%s)\n", reason);
compiler->codeGen->setFrameRequired(true);
}
}
if (compiler->codeGen->isFrameRequired())
{
frameType = FT_EBP_FRAME;
}
else
{
frameType = FT_ESP_FRAME;
}
}
switch (frameType)
{
case FT_ESP_FRAME:
noway_assert(!compiler->codeGen->isFramePointerRequired());
noway_assert(!compiler->codeGen->isFrameRequired());
compiler->codeGen->setFramePointerUsed(false);
break;
case FT_EBP_FRAME:
compiler->codeGen->setFramePointerUsed(true);
break;
#if DOUBLE_ALIGN
case FT_DOUBLE_ALIGN_FRAME:
noway_assert(!compiler->codeGen->isFramePointerRequired());
compiler->codeGen->setFramePointerUsed(false);
break;
#endif // DOUBLE_ALIGN
default:
noway_assert(!"rpFrameType not set correctly!");
break;
}
// If we are using FPBASE as the frame register, we cannot also use it for
// a local var.
regMaskTP removeMask = RBM_NONE;
if (frameType == FT_EBP_FRAME)
{
removeMask |= RBM_FPBASE;
}
compiler->rpFrameType = frameType;
#ifdef TARGET_ARMARCH
// Determine whether we need to reserve a register for large lclVar offsets.
if (compiler->compRsvdRegCheck(Compiler::REGALLOC_FRAME_LAYOUT))
{
// We reserve R10/IP1 in this case to hold the offsets in load/store instructions
compiler->codeGen->regSet.rsMaskResvd |= RBM_OPT_RSVD;
assert(REG_OPT_RSVD != REG_FP);
JITDUMP(" Reserved REG_OPT_RSVD (%s) due to large frame\n", getRegName(REG_OPT_RSVD));
removeMask |= RBM_OPT_RSVD;
}
#endif // TARGET_ARMARCH
if ((removeMask != RBM_NONE) && ((availableIntRegs & removeMask) != 0))
{
// We know that we're already in "read mode" for availableIntRegs. However,
// we need to remove these registers, so subsequent users (like callers
// to allRegs()) get the right thing. The RemoveRegistersFromMasks() code
// fixes up everything that already took a dependency on the value that was
// previously read, so this completes the picture.
availableIntRegs.OverrideAssign(availableIntRegs & ~removeMask);
}
}
//------------------------------------------------------------------------
// copyOrMoveRegInUse: Is 'ref' a copyReg/moveReg that is still busy at the given location?
//
// Arguments:
// ref: The RefPosition of interest
// loc: The LsraLocation at which we're determining whether it's busy.
//
// Return Value:
// true iff 'ref' is active at the given location
//
bool copyOrMoveRegInUse(RefPosition* ref, LsraLocation loc)
{
if (!ref->copyReg && !ref->moveReg)
{
return false;
}
if (ref->getRefEndLocation() >= loc)
{
return true;
}
Interval* interval = ref->getInterval();
RefPosition* nextRef = interval->getNextRefPosition();
if (nextRef != nullptr && nextRef->treeNode == ref->treeNode && nextRef->getRefEndLocation() >= loc)
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// getRegisterType: Get the RegisterType to use for the given RefPosition
//
// Arguments:
// currentInterval: The interval for the current allocation
// refPosition: The RefPosition of the current Interval for which a register is being allocated
//
// Return Value:
// The RegisterType that should be allocated for this RefPosition
//
// Notes:
// This will nearly always be identical to the registerType of the interval, except in the case
// of SIMD types of 8 bytes (currently only Vector2) when they are passed and returned in integer
// registers, or copied to a return temp.
// This method need only be called in situations where we may be dealing with the register requirements
// of a RefTypeUse RefPosition (i.e. not when we are only looking at the type of an interval, nor when
// we are interested in the "defining" type of the interval). This is because the situation of interest
// only happens at the use (where it must be copied to an integer register).
RegisterType LinearScan::getRegisterType(Interval* currentInterval, RefPosition* refPosition)
{
assert(refPosition->getInterval() == currentInterval);
RegisterType regType = currentInterval->registerType;
regMaskTP candidates = refPosition->registerAssignment;
assert((candidates & allRegs(regType)) != RBM_NONE);
return regType;
}
//------------------------------------------------------------------------
// isMatchingConstant: Check to see whether a given register contains the constant referenced
// by the given RefPosition
//
// Arguments:
// physRegRecord: The RegRecord for the register we're interested in.
// refPosition: The RefPosition for a constant interval.
//
// Return Value:
// True iff the register was defined by an identical constant node as the current interval.
//
bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPosition)
{
if ((physRegRecord->assignedInterval == nullptr) || !physRegRecord->assignedInterval->isConstant ||
(refPosition->refType != RefTypeDef))
{
return false;
}
Interval* interval = refPosition->getInterval();
if (!interval->isConstant || !isRegConstant(physRegRecord->regNum, interval->registerType))
{
return false;
}
noway_assert(refPosition->treeNode != nullptr);
GenTree* otherTreeNode = physRegRecord->assignedInterval->firstRefPosition->treeNode;
noway_assert(otherTreeNode != nullptr);
if (refPosition->treeNode->OperGet() != otherTreeNode->OperGet())
{
return false;
}
switch (otherTreeNode->OperGet())
{
case GT_CNS_INT:
{
ssize_t v1 = refPosition->treeNode->AsIntCon()->IconValue();
ssize_t v2 = otherTreeNode->AsIntCon()->IconValue();
if ((v1 == v2) && (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode) || v1 == 0))
{
#ifdef TARGET_64BIT
// If the constant is negative, only reuse registers of the same type.
// This is because, on a 64-bit system, we do not sign-extend immediates in registers to
// 64-bits unless they are actually longs, as this requires a longer instruction.
// This doesn't apply to a 32-bit system, on which long values occupy multiple registers.
// (We could sign-extend, but we would have to always sign-extend, because if we reuse more
// than once, we won't have access to the instruction that originally defines the constant).
if ((refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()) || (v1 >= 0))
#endif // TARGET_64BIT
{
return true;
}
}
break;
}
case GT_CNS_DBL:
{
// For floating point constants, the values must be identical, not simply compare
// equal. So we compare the bits.
if (refPosition->treeNode->AsDblCon()->isBitwiseEqual(otherTreeNode->AsDblCon()) &&
(refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()))
{
return true;
}
break;
}
default:
break;
}
return false;
}
//------------------------------------------------------------------------
// allocateReg: Find a register that satisfies the requirements for refPosition,
// taking into account the preferences for the given Interval,
// and possibly spilling a lower weight Interval.
//
// Arguments:
// currentInterval: The interval for the current allocation
// refPosition: The RefPosition of the current Interval for which a register is being allocated
// Return Value:
// The regNumber, if any, allocated to the RefPosition.
// Returns REG_NA only if 'refPosition->RegOptional()' is true, and there are
// no free registers and no registers containing lower-weight Intervals that can be spilled.
//
// Notes:
// This method will prefer to allocate a free register, but if none are available,
// it will look for a lower-weight Interval to spill.
// Weight and farthest distance of next reference are used to determine whether an Interval
// currently occupying a register should be spilled. It will be spilled either:
// - At its most recent RefPosition, if that is within the current block, OR
// - At the boundary between the previous block and this one
//
// To select a ref position for spilling.
// - If refPosition->RegOptional() == false
// The RefPosition chosen for spilling will be the lowest weight
// of all and if there is is more than one ref position with the
// same lowest weight, among them choses the one with farthest
// distance to its next reference.
//
// - If refPosition->RegOptional() == true
// The ref position chosen for spilling will not only be lowest weight
// of all but also has a weight lower than 'refPosition'. If there is
// no such ref position, no register will be allocated.
//
regNumber LinearScan::allocateReg(Interval* currentInterval,
RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore))
{
regMaskTP foundRegBit = regSelector->select(currentInterval, refPosition DEBUG_ARG(registerScore));
if (foundRegBit == RBM_NONE)
{
return REG_NA;
}
regNumber foundReg = genRegNumFromMask(foundRegBit);
RegRecord* availablePhysRegRecord = getRegisterRecord(foundReg);
Interval* assignedInterval = availablePhysRegRecord->assignedInterval;
if ((assignedInterval != currentInterval) &&
isAssigned(availablePhysRegRecord ARM_ARG(getRegisterType(currentInterval, refPosition))))
{
if (regSelector->isSpilling())
{
// We're spilling.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
if (currentInterval->registerType == TYP_DOUBLE)
{
assert(genIsValidDoubleReg(availablePhysRegRecord->regNum));
unassignDoublePhysReg(availablePhysRegRecord);
}
else if (assignedInterval->registerType == TYP_DOUBLE)
{
// Make sure we spill both halves of the double register.
assert(genIsValidDoubleReg(assignedInterval->assignedReg->regNum));
unassignPhysReg(assignedInterval->assignedReg, assignedInterval->recentRefPosition);
}
else
#endif
{
unassignPhysReg(availablePhysRegRecord, assignedInterval->recentRefPosition);
}
}
else
{
// If we considered this "unassigned" because this interval's lifetime ends before
// the next ref, remember it.
// For historical reasons (due to former short-circuiting of this case), if we're reassigning
// the current interval to a previous assignment, we don't remember the previous interval.
// Note that we need to compute this condition before calling unassignPhysReg, which wil reset
// assignedInterval->physReg.
bool wasAssigned = regSelector->foundUnassignedReg() && (assignedInterval != nullptr) &&
(assignedInterval->physReg == foundReg);
unassignPhysReg(availablePhysRegRecord ARM_ARG(currentInterval->registerType));
if (regSelector->isMatchingConstant() && compiler->opts.OptimizationEnabled())
{
assert(assignedInterval->isConstant);
refPosition->treeNode->SetReuseRegVal();
}
else if (wasAssigned)
{
updatePreviousInterval(availablePhysRegRecord, assignedInterval, assignedInterval->registerType);
}
else
{
assert(!regSelector->isConstAvailable());
}
}
}
assignPhysReg(availablePhysRegRecord, currentInterval);
refPosition->registerAssignment = foundRegBit;
return foundReg;
}
//------------------------------------------------------------------------
// canSpillReg: Determine whether we can spill physRegRecord
//
// Arguments:
// physRegRecord - reg to spill
// refLocation - Location of RefPosition where this register will be spilled
//
// Return Value:
// True - if we can spill physRegRecord
// False - otherwise
//
bool LinearScan::canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation)
{
assert(physRegRecord->assignedInterval != nullptr);
RefPosition* recentAssignedRef = physRegRecord->assignedInterval->recentRefPosition;
if (recentAssignedRef != nullptr)
{
// We can't spill a register that's active at the current location.
// We should already have determined this with isRegBusy before calling this method.
assert(!isRefPositionActive(recentAssignedRef, refLocation));
return true;
}
// recentAssignedRef can only be null if this is a parameter that has not yet been
// moved to a register (or stack), in which case we can't spill it yet.
assert(physRegRecord->assignedInterval->getLocalVar(compiler)->lvIsParam);
return false;
}
//------------------------------------------------------------------------
// getSpillWeight: Get the weight associated with spilling the given register
//
// Arguments:
// physRegRecord - reg to spill
//
// Return Value:
// The weight associated with the location at which we will spill.
//
// Note: This helper is designed to be used only from allocateReg() and getDoubleSpillWeight()
//
weight_t LinearScan::getSpillWeight(RegRecord* physRegRecord)
{
assert(physRegRecord->assignedInterval != nullptr);
RefPosition* recentAssignedRef = physRegRecord->assignedInterval->recentRefPosition;
weight_t weight = BB_ZERO_WEIGHT;
// We shouldn't call this method if there is no recentAssignedRef.
assert(recentAssignedRef != nullptr);
// We shouldn't call this method if the register is active at this location.
assert(!isRefPositionActive(recentAssignedRef, currentLoc));
weight = getWeight(recentAssignedRef);
return weight;
}
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// canSpillDoubleReg: Determine whether we can spill physRegRecord
//
// Arguments:
// physRegRecord - reg to spill (must be a valid double register)
// refLocation - Location of RefPosition where this register will be spilled
//
// Return Value:
// True - if we can spill physRegRecord
// False - otherwise
//
bool LinearScan::canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation)
{
assert(genIsValidDoubleReg(physRegRecord->regNum));
RegRecord* physRegRecord2 = getSecondHalfRegRec(physRegRecord);
if ((physRegRecord->assignedInterval != nullptr) && !canSpillReg(physRegRecord, refLocation))
{
return false;
}
if ((physRegRecord2->assignedInterval != nullptr) && !canSpillReg(physRegRecord2, refLocation))
{
return false;
}
return true;
}
//------------------------------------------------------------------------
// unassignDoublePhysReg: unassign a double register (pair)
//
// Arguments:
// doubleRegRecord - reg to unassign
//
// Note:
// The given RegRecord must be a valid (even numbered) double register.
//
void LinearScan::unassignDoublePhysReg(RegRecord* doubleRegRecord)
{
assert(genIsValidDoubleReg(doubleRegRecord->regNum));
RegRecord* doubleRegRecordLo = doubleRegRecord;
RegRecord* doubleRegRecordHi = getSecondHalfRegRec(doubleRegRecordLo);
// For a double register, we has following four cases.
// Case 1: doubleRegRecLo is assigned to TYP_DOUBLE interval
// Case 2: doubleRegRecLo and doubleRegRecHi are assigned to different TYP_FLOAT intervals
// Case 3: doubelRegRecLo is assgined to TYP_FLOAT interval and doubleRegRecHi is nullptr
// Case 4: doubleRegRecordLo is nullptr, and doubleRegRecordHi is assigned to a TYP_FLOAT interval
if (doubleRegRecordLo->assignedInterval != nullptr)
{
if (doubleRegRecordLo->assignedInterval->registerType == TYP_DOUBLE)
{
// Case 1: doubleRegRecLo is assigned to TYP_DOUBLE interval
unassignPhysReg(doubleRegRecordLo, doubleRegRecordLo->assignedInterval->recentRefPosition);
}
else
{
// Case 2: doubleRegRecLo and doubleRegRecHi are assigned to different TYP_FLOAT intervals
// Case 3: doubelRegRecLo is assgined to TYP_FLOAT interval and doubleRegRecHi is nullptr
assert(doubleRegRecordLo->assignedInterval->registerType == TYP_FLOAT);
unassignPhysReg(doubleRegRecordLo, doubleRegRecordLo->assignedInterval->recentRefPosition);
if (doubleRegRecordHi != nullptr)
{
if (doubleRegRecordHi->assignedInterval != nullptr)
{
assert(doubleRegRecordHi->assignedInterval->registerType == TYP_FLOAT);
unassignPhysReg(doubleRegRecordHi, doubleRegRecordHi->assignedInterval->recentRefPosition);
}
}
}
}
else
{
// Case 4: doubleRegRecordLo is nullptr, and doubleRegRecordHi is assigned to a TYP_FLOAT interval
assert(doubleRegRecordHi->assignedInterval != nullptr);
assert(doubleRegRecordHi->assignedInterval->registerType == TYP_FLOAT);
unassignPhysReg(doubleRegRecordHi, doubleRegRecordHi->assignedInterval->recentRefPosition);
}
}
#endif // TARGET_ARM
//------------------------------------------------------------------------
// isRefPositionActive: Determine whether a given RefPosition is active at the given location
//
// Arguments:
// refPosition - the RefPosition of interest
// refLocation - the LsraLocation at which we want to know if it is active
//
// Return Value:
// True - if this RefPosition occurs at the given location, OR
// if it occurs at the previous location and is marked delayRegFree.
// False - otherwise
//
bool LinearScan::isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation)
{
return (refPosition->nodeLocation == refLocation ||
((refPosition->nodeLocation + 1 == refLocation) && refPosition->delayRegFree));
}
//------------------------------------------------------------------------
// isSpillCandidate: Determine if a register is a spill candidate for a given RefPosition.
//
// Arguments:
// current The interval for the current allocation
// refPosition The RefPosition of the current Interval for which a register is being allocated
// physRegRecord The RegRecord for the register we're considering for spill
//
// Return Value:
// True iff the given register can be spilled to accommodate the given RefPosition.
//
bool LinearScan::isSpillCandidate(Interval* current, RefPosition* refPosition, RegRecord* physRegRecord)
{
regMaskTP candidateBit = genRegMask(physRegRecord->regNum);
LsraLocation refLocation = refPosition->nodeLocation;
// We shouldn't be calling this if we haven't already determined that the register is not
// busy until the next kill.
assert(!isRegBusy(physRegRecord->regNum, current->registerType));
// We should already have determined that the register isn't actively in use.
assert(!isRegInUse(physRegRecord->regNum, current->registerType));
// We shouldn't be calling this if 'refPosition' is a fixed reference to this register.
assert(!refPosition->isFixedRefOfRegMask(candidateBit));
// We shouldn't be calling this if there is a fixed reference at the same location
// (and it's not due to this reference), as checked above.
assert(!conflictingFixedRegReference(physRegRecord->regNum, refPosition));
bool canSpill;
#ifdef TARGET_ARM
if (current->registerType == TYP_DOUBLE)
{
canSpill = canSpillDoubleReg(physRegRecord, refLocation);
}
else
#endif // TARGET_ARM
{
canSpill = canSpillReg(physRegRecord, refLocation);
}
if (!canSpill)
{
return false;
}
return true;
}
// Grab a register to use to copy and then immediately use.
// This is called only for localVar intervals that already have a register
// assignment that is not compatible with the current RefPosition.
// This is not like regular assignment, because we don't want to change
// any preferences or existing register assignments.
// Prefer a free register that's got the earliest next use.
// Otherwise, spill something with the farthest next use
//
regNumber LinearScan::assignCopyReg(RefPosition* refPosition)
{
Interval* currentInterval = refPosition->getInterval();
assert(currentInterval != nullptr);
assert(currentInterval->isActive);
// Save the relatedInterval, if any, so that it doesn't get modified during allocation.
Interval* savedRelatedInterval = currentInterval->relatedInterval;
currentInterval->relatedInterval = nullptr;
// We don't want really want to change the default assignment,
// so 1) pretend this isn't active, and 2) remember the old reg
regNumber oldPhysReg = currentInterval->physReg;
RegRecord* oldRegRecord = currentInterval->assignedReg;
assert(oldRegRecord->regNum == oldPhysReg);
currentInterval->isActive = false;
// We *must* allocate a register, and it will be a copyReg. Set that field now, so that
// refPosition->RegOptional() will return false.
refPosition->copyReg = true;
RegisterScore registerScore = NONE;
regNumber allocatedReg = allocateReg(currentInterval, refPosition DEBUG_ARG(®isterScore));
assert(allocatedReg != REG_NA);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, currentInterval, allocatedReg, nullptr, registerScore));
// Now restore the old info
currentInterval->relatedInterval = savedRelatedInterval;
currentInterval->physReg = oldPhysReg;
currentInterval->assignedReg = oldRegRecord;
currentInterval->isActive = true;
return allocatedReg;
}
//------------------------------------------------------------------------
// isAssigned: This is the function to check if the given RegRecord has an assignedInterval.
//
// Arguments:
// regRec - The RegRecord to check that it is assigned.
// newRegType - There are elements to judge according to the upcoming register type.
//
// Return Value:
// Returns true if the given RegRecord has an assignedInterval.
//
bool LinearScan::isAssigned(RegRecord* regRec ARM_ARG(RegisterType newRegType))
{
if (regRec->assignedInterval != nullptr)
{
return true;
}
#ifdef TARGET_ARM
if (newRegType == TYP_DOUBLE)
{
RegRecord* otherRegRecord = getSecondHalfRegRec(regRec);
if (otherRegRecord->assignedInterval != nullptr)
{
return true;
}
}
#endif
return false;
}
//------------------------------------------------------------------------
// checkAndAssignInterval: Check if the interval is already assigned and
// if it is then unassign the physical record
// and set the assignedInterval to 'interval'
//
// Arguments:
// regRec - The RegRecord of interest
// interval - The Interval that we're going to assign to 'regRec'
//
void LinearScan::checkAndAssignInterval(RegRecord* regRec, Interval* interval)
{
Interval* assignedInterval = regRec->assignedInterval;
if (assignedInterval != nullptr && assignedInterval != interval)
{
// This is allocated to another interval. Either it is inactive, or it was allocated as a
// copyReg and is therefore not the "assignedReg" of the other interval. In the latter case,
// we simply unassign it - in the former case we need to set the physReg on the interval to
// REG_NA to indicate that it is no longer in that register.
// The lack of checking for this case resulted in an assert in the retail version of System.dll,
// in method SerialStream.GetDcbFlag.
// Note that we can't check for the copyReg case, because we may have seen a more recent
// RefPosition for the Interval that was NOT a copyReg.
if (assignedInterval->assignedReg == regRec)
{
assert(assignedInterval->isActive == false);
assignedInterval->physReg = REG_NA;
}
unassignPhysReg(regRec->regNum);
}
#ifdef TARGET_ARM
// If 'interval' and 'assignedInterval' were both TYP_DOUBLE, then we have unassigned 'assignedInterval'
// from both halves. Otherwise, if 'interval' is TYP_DOUBLE, we now need to unassign the other half.
if ((interval->registerType == TYP_DOUBLE) &&
((assignedInterval == nullptr) || (assignedInterval->registerType == TYP_FLOAT)))
{
RegRecord* otherRegRecord = getSecondHalfRegRec(regRec);
assignedInterval = otherRegRecord->assignedInterval;
if (assignedInterval != nullptr && assignedInterval != interval)
{
if (assignedInterval->assignedReg == otherRegRecord)
{
assert(assignedInterval->isActive == false);
assignedInterval->physReg = REG_NA;
}
unassignPhysReg(otherRegRecord->regNum);
}
}
#endif
updateAssignedInterval(regRec, interval, interval->registerType);
}
// Assign the given physical register interval to the given interval
void LinearScan::assignPhysReg(RegRecord* regRec, Interval* interval)
{
regMaskTP assignedRegMask = genRegMask(regRec->regNum);
compiler->codeGen->regSet.rsSetRegsModified(assignedRegMask DEBUGARG(true));
interval->assignedReg = regRec;
checkAndAssignInterval(regRec, interval);
interval->physReg = regRec->regNum;
interval->isActive = true;
if (interval->isLocalVar)
{
// Prefer this register for future references
interval->updateRegisterPreferences(assignedRegMask);
}
}
//------------------------------------------------------------------------
// setIntervalAsSplit: Set this Interval as being split
//
// Arguments:
// interval - The Interval which is being split
//
// Return Value:
// None.
//
// Notes:
// The given Interval will be marked as split, and it will be added to the
// set of splitOrSpilledVars.
//
// Assumptions:
// "interval" must be a lclVar interval, as tree temps are never split.
// This is asserted in the call to getVarIndex().
//
void LinearScan::setIntervalAsSplit(Interval* interval)
{
if (interval->isLocalVar)
{
unsigned varIndex = interval->getVarIndex(compiler);
if (!interval->isSplit)
{
VarSetOps::AddElemD(compiler, splitOrSpilledVars, varIndex);
}
else
{
assert(VarSetOps::IsMember(compiler, splitOrSpilledVars, varIndex));
}
}
interval->isSplit = true;
}
//------------------------------------------------------------------------
// setIntervalAsSpilled: Set this Interval as being spilled
//
// Arguments:
// interval - The Interval which is being spilled
//
// Return Value:
// None.
//
// Notes:
// The given Interval will be marked as spilled, and it will be added
// to the set of splitOrSpilledVars.
//
void LinearScan::setIntervalAsSpilled(Interval* interval)
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (interval->isUpperVector)
{
assert(interval->relatedInterval->isLocalVar);
interval->isSpilled = true;
// Now we need to mark the local as spilled also, even if the lower half is never spilled,
// as this will use the upper part of its home location.
interval = interval->relatedInterval;
// We'll now mark this as spilled, so it changes the spillCost.
RefPosition* recentRefPos = interval->recentRefPosition;
if (!interval->isSpilled && interval->isActive && (recentRefPos != nullptr))
{
VarSetOps::AddElemD(compiler, splitOrSpilledVars, interval->getVarIndex(compiler));
interval->isSpilled = true;
regNumber reg = interval->physReg;
spillCost[reg] = getSpillWeight(getRegisterRecord(reg));
}
}
#endif
if (interval->isLocalVar)
{
unsigned varIndex = interval->getVarIndex(compiler);
if (!interval->isSpilled)
{
VarSetOps::AddElemD(compiler, splitOrSpilledVars, varIndex);
}
else
{
assert(VarSetOps::IsMember(compiler, splitOrSpilledVars, varIndex));
}
}
interval->isSpilled = true;
}
//------------------------------------------------------------------------
// spill: Spill the "interval" starting from "fromRefPosition" (upto "toRefPosition")
//
// Arguments:
// interval - The interval that contains the RefPosition to be spilled
// fromRefPosition - The RefPosition at which the Interval is to be spilled
// toRefPosition - The RefPosition at which it must be reloaded (debug only arg)
//
// Return Value:
// None.
//
// Assumptions:
// fromRefPosition and toRefPosition must not be null
//
void LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition DEBUGARG(RefPosition* toRefPosition))
{
assert(fromRefPosition != nullptr && toRefPosition != nullptr);
assert(fromRefPosition->getInterval() == interval && toRefPosition->getInterval() == interval);
assert(fromRefPosition->nextRefPosition == toRefPosition);
if (!fromRefPosition->lastUse)
{
// If not allocated a register, Lcl var def/use ref positions even if reg optional
// should be marked as spillAfter. Note that if it is a WriteThru interval, the value is always
// written to the stack, but the WriteThru indicates that the register is no longer live.
if (fromRefPosition->RegOptional() && !(interval->isLocalVar && fromRefPosition->IsActualRef()))
{
fromRefPosition->registerAssignment = RBM_NONE;
}
else
{
fromRefPosition->spillAfter = true;
}
}
// Only handle the singledef intervals whose firstRefPosition is RefTypeDef and is not yet marked as spillAfter.
// The singledef intervals whose firstRefPositions are already marked as spillAfter, no need to mark them as
// singleDefSpill because they will always get spilled at firstRefPosition.
// This helps in spilling the singleDef at definition
//
// Note: Only mark "singleDefSpill" for those intervals who ever get spilled. The intervals that are never spilled
// will not be marked as "singleDefSpill" and hence won't get spilled at the first definition.
if (interval->isSingleDef && RefTypeIsDef(interval->firstRefPosition->refType) &&
!interval->firstRefPosition->spillAfter)
{
// TODO-CQ: Check if it is beneficial to spill at def, meaning, if it is a hot block don't worry about
// doing the spill. Another option is to track number of refpositions and a interval has more than X
// refpositions
// then perform this optimization.
interval->firstRefPosition->singleDefSpill = true;
}
assert(toRefPosition != nullptr);
#ifdef DEBUG
if (VERBOSE)
{
dumpLsraAllocationEvent(LSRA_EVENT_SPILL, interval);
}
#endif // DEBUG
INTRACK_STATS(updateLsraStat(STAT_SPILL, fromRefPosition->bbNum));
interval->isActive = false;
setIntervalAsSpilled(interval);
// If fromRefPosition occurs before the beginning of this block, mark this as living in the stack
// on entry to this block.
if (fromRefPosition->nodeLocation <= curBBStartLocation)
{
// This must be a lclVar interval
assert(interval->isLocalVar);
setInVarRegForBB(curBBNum, interval->varNum, REG_STK);
}
}
//------------------------------------------------------------------------
// unassignPhysRegNoSpill: Unassign the given physical register record from
// an active interval, without spilling.
//
// Arguments:
// regRec - the RegRecord to be unassigned
//
// Return Value:
// None.
//
// Assumptions:
// The assignedInterval must not be null, and must be active.
//
// Notes:
// This method is used to unassign a register when an interval needs to be moved to a
// different register, but not (yet) spilled.
void LinearScan::unassignPhysRegNoSpill(RegRecord* regRec)
{
Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr && assignedInterval->isActive);
assignedInterval->isActive = false;
unassignPhysReg(regRec, nullptr);
assignedInterval->isActive = true;
}
//------------------------------------------------------------------------
// checkAndClearInterval: Clear the assignedInterval for the given
// physical register record
//
// Arguments:
// regRec - the physical RegRecord to be unassigned
// spillRefPosition - The RefPosition at which the assignedInterval is to be spilled
// or nullptr if we aren't spilling
//
// Return Value:
// None.
//
// Assumptions:
// see unassignPhysReg
//
void LinearScan::checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition)
{
Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr);
regNumber thisRegNum = regRec->regNum;
if (spillRefPosition == nullptr)
{
// Note that we can't assert for the copyReg case
//
if (assignedInterval->physReg == thisRegNum)
{
assert(assignedInterval->isActive == false);
}
}
else
{
assert(spillRefPosition->getInterval() == assignedInterval);
}
updateAssignedInterval(regRec, nullptr, assignedInterval->registerType);
}
//------------------------------------------------------------------------
// unassignPhysReg: Unassign the given physical register record, and spill the
// assignedInterval at the given spillRefPosition, if any.
//
// Arguments:
// regRec - The RegRecord to be unassigned
// newRegType - The RegisterType of interval that would be assigned
//
// Return Value:
// None.
//
// Notes:
// On ARM architecture, Intervals have to be unassigned considering
// with the register type of interval that would be assigned.
//
void LinearScan::unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType))
{
RegRecord* regRecToUnassign = regRec;
#ifdef TARGET_ARM
RegRecord* anotherRegRec = nullptr;
if ((regRecToUnassign->assignedInterval != nullptr) &&
(regRecToUnassign->assignedInterval->registerType == TYP_DOUBLE))
{
// If the register type of interval(being unassigned or new) is TYP_DOUBLE,
// It should have to be valid double register (even register)
if (!genIsValidDoubleReg(regRecToUnassign->regNum))
{
regRecToUnassign = findAnotherHalfRegRec(regRec);
}
}
else
{
if (newRegType == TYP_DOUBLE)
{
anotherRegRec = getSecondHalfRegRec(regRecToUnassign);
}
}
#endif
if (regRecToUnassign->assignedInterval != nullptr)
{
unassignPhysReg(regRecToUnassign, regRecToUnassign->assignedInterval->recentRefPosition);
}
#ifdef TARGET_ARM
if ((anotherRegRec != nullptr) && (anotherRegRec->assignedInterval != nullptr))
{
unassignPhysReg(anotherRegRec, anotherRegRec->assignedInterval->recentRefPosition);
}
#endif
}
//------------------------------------------------------------------------
// unassignPhysReg: Unassign the given physical register record, and spill the
// assignedInterval at the given spillRefPosition, if any.
//
// Arguments:
// regRec - the RegRecord to be unassigned
// spillRefPosition - The RefPosition at which the assignedInterval is to be spilled
//
// Return Value:
// None.
//
// Assumptions:
// The assignedInterval must not be null.
// If spillRefPosition is null, the assignedInterval must be inactive, or not currently
// assigned to this register (e.g. this is a copyReg for that Interval).
// Otherwise, spillRefPosition must be associated with the assignedInterval.
//
void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition)
{
Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr);
assert(spillRefPosition == nullptr || spillRefPosition->getInterval() == assignedInterval);
regNumber thisRegNum = regRec->regNum;
// Is assignedInterval actually still assigned to this register?
bool intervalIsAssigned = (assignedInterval->physReg == thisRegNum);
regNumber regToUnassign = thisRegNum;
#ifdef TARGET_ARM
RegRecord* anotherRegRec = nullptr;
// Prepare second half RegRecord of a double register for TYP_DOUBLE
if (assignedInterval->registerType == TYP_DOUBLE)
{
assert(isFloatRegType(regRec->registerType));
RegRecord* doubleRegRec;
if (genIsValidDoubleReg(thisRegNum))
{
anotherRegRec = getSecondHalfRegRec(regRec);
doubleRegRec = regRec;
}
else
{
regToUnassign = REG_PREV(thisRegNum);
anotherRegRec = getRegisterRecord(regToUnassign);
doubleRegRec = anotherRegRec;
}
// Both RegRecords should have been assigned to the same interval.
assert(assignedInterval == anotherRegRec->assignedInterval);
if (!intervalIsAssigned && (assignedInterval->physReg == anotherRegRec->regNum))
{
intervalIsAssigned = true;
}
clearNextIntervalRef(regToUnassign, TYP_DOUBLE);
clearSpillCost(regToUnassign, TYP_DOUBLE);
checkAndClearInterval(doubleRegRec, spillRefPosition);
// Both RegRecords should have been unassigned together.
assert(regRec->assignedInterval == nullptr);
assert(anotherRegRec->assignedInterval == nullptr);
}
else
#endif // TARGET_ARM
{
clearNextIntervalRef(thisRegNum, assignedInterval->registerType);
clearSpillCost(thisRegNum, assignedInterval->registerType);
checkAndClearInterval(regRec, spillRefPosition);
}
makeRegAvailable(regToUnassign, assignedInterval->registerType);
RefPosition* nextRefPosition = nullptr;
if (spillRefPosition != nullptr)
{
nextRefPosition = spillRefPosition->nextRefPosition;
}
if (!intervalIsAssigned && assignedInterval->physReg != REG_NA)
{
// This must have been a temporary copy reg, but we can't assert that because there
// may have been intervening RefPositions that were not copyRegs.
// reg->assignedInterval has already been set to nullptr by checkAndClearInterval()
assert(regRec->assignedInterval == nullptr);
return;
}
// regNumber victimAssignedReg = assignedInterval->physReg;
assignedInterval->physReg = REG_NA;
bool spill = assignedInterval->isActive && nextRefPosition != nullptr;
if (spill)
{
// If this is an active interval, it must have a recentRefPosition,
// otherwise it would not be active
assert(spillRefPosition != nullptr);
#if 0
// TODO-CQ: Enable this and insert an explicit GT_COPY (otherwise there's no way to communicate
// to codegen that we want the copyReg to be the new home location).
// If the last reference was a copyReg, and we're spilling the register
// it was copied from, then make the copyReg the new primary location
// if possible
if (spillRefPosition->copyReg)
{
regNumber copyFromRegNum = victimAssignedReg;
regNumber copyRegNum = genRegNumFromMask(spillRefPosition->registerAssignment);
if (copyFromRegNum == thisRegNum &&
getRegisterRecord(copyRegNum)->assignedInterval == assignedInterval)
{
assert(copyRegNum != thisRegNum);
assignedInterval->physReg = copyRegNum;
assignedInterval->assignedReg = this->getRegisterRecord(copyRegNum);
return;
}
}
#endif // 0
#ifdef DEBUG
// With JitStressRegs == 0x80 (LSRA_EXTEND_LIFETIMES), we may have a RefPosition
// that is not marked lastUse even though the treeNode is a lastUse. In that case
// we must not mark it for spill because the register will have been immediately freed
// after use. While we could conceivably add special handling for this case in codegen,
// it would be messy and undesirably cause the "bleeding" of LSRA stress modes outside
// of LSRA.
if (extendLifetimes() && assignedInterval->isLocalVar && RefTypeIsUse(spillRefPosition->refType) &&
spillRefPosition->treeNode != nullptr &&
spillRefPosition->treeNode->AsLclVar()->IsLastUse(spillRefPosition->multiRegIdx))
{
dumpLsraAllocationEvent(LSRA_EVENT_SPILL_EXTENDED_LIFETIME, assignedInterval);
assignedInterval->isActive = false;
spill = false;
// If the spillRefPosition occurs before the beginning of this block, it will have
// been marked as living in this register on entry to this block, but we now need
// to mark this as living on the stack.
if (spillRefPosition->nodeLocation <= curBBStartLocation)
{
setInVarRegForBB(curBBNum, assignedInterval->varNum, REG_STK);
if (spillRefPosition->nextRefPosition != nullptr)
{
setIntervalAsSpilled(assignedInterval);
}
}
else
{
// Otherwise, we need to mark spillRefPosition as lastUse, or the interval
// will remain active beyond its allocated range during the resolution phase.
spillRefPosition->lastUse = true;
}
}
else
#endif // DEBUG
{
spillInterval(assignedInterval, spillRefPosition DEBUGARG(nextRefPosition));
}
}
// Maintain the association with the interval, if it has more references.
// Or, if we "remembered" an interval assigned to this register, restore it.
if (nextRefPosition != nullptr)
{
assignedInterval->assignedReg = regRec;
}
else if (canRestorePreviousInterval(regRec, assignedInterval))
{
regRec->assignedInterval = regRec->previousInterval;
regRec->previousInterval = nullptr;
if (regRec->assignedInterval->physReg != thisRegNum)
{
clearNextIntervalRef(thisRegNum, regRec->assignedInterval->registerType);
}
else
{
updateNextIntervalRef(thisRegNum, regRec->assignedInterval);
}
#ifdef TARGET_ARM
// Note:
// We can not use updateAssignedInterval() and updatePreviousInterval() here,
// because regRec may not be a even-numbered float register.
// Update second half RegRecord of a double register for TYP_DOUBLE
if (regRec->assignedInterval->registerType == TYP_DOUBLE)
{
RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec);
anotherHalfRegRec->assignedInterval = regRec->assignedInterval;
anotherHalfRegRec->previousInterval = nullptr;
}
#endif // TARGET_ARM
#ifdef DEBUG
if (spill)
{
dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, regRec->assignedInterval,
thisRegNum);
}
else
{
dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, regRec->assignedInterval, thisRegNum);
}
#endif // DEBUG
}
else
{
updateAssignedInterval(regRec, nullptr, assignedInterval->registerType);
updatePreviousInterval(regRec, nullptr, assignedInterval->registerType);
}
}
//------------------------------------------------------------------------
// spillGCRefs: Spill any GC-type intervals that are currently in registers.
//
// Arguments:
// killRefPosition - The RefPosition for the kill
//
// Return Value:
// None.
//
// Notes:
// This is used to ensure that we have no live GC refs in registers at an
// unmanaged call.
//
void LinearScan::spillGCRefs(RefPosition* killRefPosition)
{
// For each physical register that can hold a GC type,
// if it is occupied by an interval of a GC type, spill that interval.
regMaskTP candidateRegs = killRefPosition->registerAssignment;
INDEBUG(bool killedRegs = false);
while (candidateRegs != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
candidateRegs &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
RegRecord* regRecord = getRegisterRecord(nextReg);
Interval* assignedInterval = regRecord->assignedInterval;
if (assignedInterval == nullptr || (assignedInterval->isActive == false))
{
continue;
}
bool needsKill = varTypeIsGC(assignedInterval->registerType);
if (!needsKill)
{
// The importer will assign a GC type to the rhs of an assignment if the lhs type is a GC type,
// even if the rhs is not. See the CEE_STLOC* case in impImportBlockCode(). As a result,
// we can have a 'GT_LCL_VAR' node with a GC type, when the lclVar itself is an integer type.
// The emitter will mark this register as holding a GC type. Therfore we must spill this value.
// This was exposed on Arm32 with EH write-thru.
if ((assignedInterval->recentRefPosition != nullptr) &&
(assignedInterval->recentRefPosition->treeNode != nullptr))
{
needsKill = varTypeIsGC(assignedInterval->recentRefPosition->treeNode);
}
}
if (needsKill)
{
INDEBUG(killedRegs = true);
unassignPhysReg(regRecord, assignedInterval->recentRefPosition);
makeRegAvailable(nextReg, assignedInterval->registerType);
}
}
INDEBUG(dumpLsraAllocationEvent(killedRegs ? LSRA_EVENT_DONE_KILL_GC_REFS : LSRA_EVENT_NO_GC_KILLS, nullptr, REG_NA,
nullptr));
}
//------------------------------------------------------------------------
// processBlockEndAllocation: Update var locations after 'currentBlock' has been allocated
//
// Arguments:
// currentBlock - the BasicBlock we have just finished allocating registers for
//
// Return Value:
// None
//
// Notes:
// Calls processBlockEndLocations() to set the outVarToRegMap, then gets the next block,
// and sets the inVarToRegMap appropriately.
void LinearScan::processBlockEndAllocation(BasicBlock* currentBlock)
{
assert(currentBlock != nullptr);
if (enregisterLocalVars)
{
processBlockEndLocations(currentBlock);
}
markBlockVisited(currentBlock);
// Get the next block to allocate.
// When the last block in the method has successors, there will be a final "RefTypeBB" to
// ensure that we get the varToRegMap set appropriately, but in that case we don't need
// to worry about "nextBlock".
BasicBlock* nextBlock = getNextBlock();
if (nextBlock != nullptr)
{
processBlockStartLocations(nextBlock);
}
}
//------------------------------------------------------------------------
// rotateBlockStartLocation: When in the LSRA_BLOCK_BOUNDARY_ROTATE stress mode, attempt to
// "rotate" the register assignment for a localVar to the next higher
// register that is available.
//
// Arguments:
// interval - the Interval for the variable whose register is getting rotated
// targetReg - its register assignment from the predecessor block being used for live-in
// availableRegs - registers available for use
//
// Return Value:
// The new register to use.
#ifdef DEBUG
regNumber LinearScan::rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs)
{
if (targetReg != REG_STK && getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE)
{
// If we're rotating the register locations at block boundaries, try to use
// the next higher register number of the appropriate register type.
regMaskTP candidateRegs = allRegs(interval->registerType) & availableRegs;
regNumber firstReg = REG_NA;
regNumber newReg = REG_NA;
while (candidateRegs != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
candidateRegs &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
if (nextReg > targetReg)
{
newReg = nextReg;
break;
}
else if (firstReg == REG_NA)
{
firstReg = nextReg;
}
}
if (newReg == REG_NA)
{
assert(firstReg != REG_NA);
newReg = firstReg;
}
targetReg = newReg;
}
return targetReg;
}
#endif // DEBUG
#ifdef TARGET_ARM
//--------------------------------------------------------------------------------------
// isSecondHalfReg: Test if recRec is second half of double register
// which is assigned to an interval.
//
// Arguments:
// regRec - a register to be tested
// interval - an interval which is assigned to some register
//
// Assumptions:
// None
//
// Return Value:
// True only if regRec is second half of assignedReg in interval
//
bool LinearScan::isSecondHalfReg(RegRecord* regRec, Interval* interval)
{
RegRecord* assignedReg = interval->assignedReg;
if (assignedReg != nullptr && interval->registerType == TYP_DOUBLE)
{
// interval should have been allocated to a valid double register
assert(genIsValidDoubleReg(assignedReg->regNum));
// Find a second half RegRecord of double register
regNumber firstRegNum = assignedReg->regNum;
regNumber secondRegNum = REG_NEXT(firstRegNum);
assert(genIsValidFloatReg(secondRegNum) && !genIsValidDoubleReg(secondRegNum));
RegRecord* secondRegRec = getRegisterRecord(secondRegNum);
return secondRegRec == regRec;
}
return false;
}
//------------------------------------------------------------------------------------------
// getSecondHalfRegRec: Get the second (odd) half of an ARM32 double register
//
// Arguments:
// regRec - A float RegRecord
//
// Assumptions:
// regRec must be a valid double register (i.e. even)
//
// Return Value:
// The RegRecord for the second half of the double register
//
RegRecord* LinearScan::getSecondHalfRegRec(RegRecord* regRec)
{
regNumber secondHalfRegNum;
RegRecord* secondHalfRegRec;
assert(genIsValidDoubleReg(regRec->regNum));
secondHalfRegNum = REG_NEXT(regRec->regNum);
secondHalfRegRec = getRegisterRecord(secondHalfRegNum);
return secondHalfRegRec;
}
//------------------------------------------------------------------------------------------
// findAnotherHalfRegRec: Find another half RegRecord which forms same ARM32 double register
//
// Arguments:
// regRec - A float RegRecord
//
// Assumptions:
// None
//
// Return Value:
// A RegRecord which forms same double register with regRec
//
RegRecord* LinearScan::findAnotherHalfRegRec(RegRecord* regRec)
{
regNumber anotherHalfRegNum = findAnotherHalfRegNum(regRec->regNum);
return getRegisterRecord(anotherHalfRegNum);
}
//------------------------------------------------------------------------------------------
// findAnotherHalfRegNum: Find another half register's number which forms same ARM32 double register
//
// Arguments:
// regNumber - A float regNumber
//
// Assumptions:
// None
//
// Return Value:
// A register number which forms same double register with regNum.
//
regNumber LinearScan::findAnotherHalfRegNum(regNumber regNum)
{
regNumber anotherHalfRegNum;
assert(genIsValidFloatReg(regNum));
// Find another half register for TYP_DOUBLE interval,
// following same logic in canRestorePreviousInterval().
if (genIsValidDoubleReg(regNum))
{
anotherHalfRegNum = REG_NEXT(regNum);
assert(!genIsValidDoubleReg(anotherHalfRegNum));
}
else
{
anotherHalfRegNum = REG_PREV(regNum);
assert(genIsValidDoubleReg(anotherHalfRegNum));
}
return anotherHalfRegNum;
}
#endif
//--------------------------------------------------------------------------------------
// canRestorePreviousInterval: Test if we can restore previous interval
//
// Arguments:
// regRec - a register which contains previous interval to be restored
// assignedInterval - an interval just unassigned
//
// Assumptions:
// None
//
// Return Value:
// True only if previous interval of regRec can be restored
//
bool LinearScan::canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval)
{
bool retVal =
(regRec->previousInterval != nullptr && regRec->previousInterval != assignedInterval &&
regRec->previousInterval->assignedReg == regRec && regRec->previousInterval->getNextRefPosition() != nullptr);
#ifdef TARGET_ARM
if (retVal && regRec->previousInterval->registerType == TYP_DOUBLE)
{
RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec);
retVal = retVal && anotherHalfRegRec->assignedInterval == nullptr;
}
#endif
return retVal;
}
bool LinearScan::isAssignedToInterval(Interval* interval, RegRecord* regRec)
{
bool isAssigned = (interval->assignedReg == regRec);
#ifdef TARGET_ARM
isAssigned |= isSecondHalfReg(regRec, interval);
#endif
return isAssigned;
}
void LinearScan::unassignIntervalBlockStart(RegRecord* regRecord, VarToRegMap inVarToRegMap)
{
// Is there another interval currently assigned to this register? If so unassign it.
Interval* assignedInterval = regRecord->assignedInterval;
if (assignedInterval != nullptr)
{
if (isAssignedToInterval(assignedInterval, regRecord))
{
// Only localVars, constants or vector upper halves should be assigned to registers at block boundaries.
if (!assignedInterval->isLocalVar)
{
assert(assignedInterval->isConstant || assignedInterval->IsUpperVector());
// Don't need to update the VarToRegMap.
inVarToRegMap = nullptr;
}
regNumber assignedRegNum = assignedInterval->assignedReg->regNum;
// If the interval is active, it will be set to active when we reach its new
// register assignment (which we must not yet have done, or it wouldn't still be
// assigned to this register).
assignedInterval->isActive = false;
unassignPhysReg(assignedInterval->assignedReg, nullptr);
if ((inVarToRegMap != nullptr) && inVarToRegMap[assignedInterval->getVarIndex(compiler)] == assignedRegNum)
{
inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK;
}
}
else
{
// This interval is no longer assigned to this register.
updateAssignedInterval(regRecord, nullptr, assignedInterval->registerType);
}
}
}
//------------------------------------------------------------------------
// processBlockStartLocations: Update var locations on entry to 'currentBlock' and clear constant
// registers.
//
// Arguments:
// currentBlock - the BasicBlock we are about to allocate registers for
//
// Return Value:
// None
//
// Notes:
// During the allocation pass (allocationPassComplete = false), we use the outVarToRegMap
// of the selected predecessor to determine the lclVar locations for the inVarToRegMap.
// During the resolution (write-back when allocationPassComplete = true) pass, we only
// modify the inVarToRegMap in cases where a lclVar was spilled after the block had been
// completed.
void LinearScan::processBlockStartLocations(BasicBlock* currentBlock)
{
// If we have no register candidates we should only call this method during allocation.
assert(enregisterLocalVars || !allocationPassComplete);
if (!enregisterLocalVars)
{
// Just clear any constant registers and return.
resetAvailableRegs();
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
Interval* assignedInterval = physRegRecord->assignedInterval;
clearNextIntervalRef(reg, physRegRecord->registerType);
clearSpillCost(reg, physRegRecord->registerType);
if (assignedInterval != nullptr)
{
assert(assignedInterval->isConstant);
physRegRecord->assignedInterval = nullptr;
}
}
return;
}
unsigned predBBNum = blockInfo[currentBlock->bbNum].predBBNum;
VarToRegMap predVarToRegMap = getOutVarToRegMap(predBBNum);
VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum);
// If this block enters an exception region, all incoming vars are on the stack.
if (predBBNum == 0)
{
#if DEBUG
if (blockInfo[currentBlock->bbNum].hasEHBoundaryIn || !allocationPassComplete)
{
// This should still be in its initialized empty state.
for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
// In the case where we're extending lifetimes for stress, we are intentionally modeling variables
// as live when they really aren't to create extra register pressure & constraints.
// However, this means that non-EH-vars will be live into EH regions. We can and should ignore the
// locations of these. Note that they aren't reported to codegen anyway.
if (!getLsraExtendLifeTimes() || VarSetOps::IsMember(compiler, currentBlock->bbLiveIn, varIndex))
{
assert(inVarToRegMap[varIndex] == REG_STK);
}
}
}
#endif // DEBUG
predVarToRegMap = inVarToRegMap;
}
VarSetOps::AssignNoCopy(compiler, currentLiveVars,
VarSetOps::Intersection(compiler, registerCandidateVars, currentBlock->bbLiveIn));
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
VarSetOps::AssignNoCopy(compiler, currentLiveVars, registerCandidateVars);
}
// If we are rotating register assignments at block boundaries, we want to make the
// inactive registers available for the rotation.
regMaskTP inactiveRegs = RBM_NONE;
#endif // DEBUG
regMaskTP liveRegs = RBM_NONE;
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate)
{
continue;
}
regNumber targetReg;
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* nextRefPosition = interval->getNextRefPosition();
assert((nextRefPosition != nullptr) || (interval->isWriteThru));
bool leaveOnStack = false;
// Special handling for variables live in/out of exception handlers.
if (interval->isWriteThru)
{
// There are 3 cases where we will leave writethru lclVars on the stack:
// 1) There is no predecessor.
// 2) It is conservatively or artificially live - that is, it has no next use,
// so there is no place for codegen to record that the register is no longer occupied.
// 3) This block has a predecessor with an outgoing EH edge. We won't be able to add "join"
// resolution to load the EH var into a register along that edge, so it must be on stack.
if ((predBBNum == 0) || (nextRefPosition == nullptr) || (RefTypeIsDef(nextRefPosition->refType)) ||
blockInfo[currentBlock->bbNum].hasEHPred)
{
leaveOnStack = true;
}
}
if (!allocationPassComplete)
{
targetReg = getVarReg(predVarToRegMap, varIndex);
if (leaveOnStack)
{
targetReg = REG_STK;
}
#ifdef DEBUG
regNumber newTargetReg = rotateBlockStartLocation(interval, targetReg, (~liveRegs | inactiveRegs));
if (newTargetReg != targetReg)
{
targetReg = newTargetReg;
setIntervalAsSplit(interval);
}
#endif // DEBUG
setVarReg(inVarToRegMap, varIndex, targetReg);
}
else // allocationPassComplete (i.e. resolution/write-back pass)
{
targetReg = getVarReg(inVarToRegMap, varIndex);
// There are four cases that we need to consider during the resolution pass:
// 1. This variable had a register allocated initially, and it was not spilled in the RefPosition
// that feeds this block. In this case, both targetReg and predVarToRegMap[varIndex] will be targetReg.
// 2. This variable had not been spilled prior to the end of predBB, but was later spilled, so
// predVarToRegMap[varIndex] will be REG_STK, but targetReg is its former allocated value.
// In this case, we will normally change it to REG_STK. We will update its "spilled" status when we
// encounter it in resolveLocalRef().
// 2a. If the next RefPosition is marked as a copyReg, we need to retain the allocated register. This is
// because the copyReg RefPosition will not have recorded the "home" register, yet downstream
// RefPositions rely on the correct "home" register.
// 3. This variable was spilled before we reached the end of predBB. In this case, both targetReg and
// predVarToRegMap[varIndex] will be REG_STK, and the next RefPosition will have been marked
// as reload during allocation time if necessary (note that by the time we actually reach the next
// RefPosition, we may be using a different predecessor, at which it is still in a register).
// 4. This variable was spilled during the allocation of this block, so targetReg is REG_STK
// (because we set inVarToRegMap at the time we spilled it), but predVarToRegMap[varIndex]
// is not REG_STK. We retain the REG_STK value in the inVarToRegMap.
if (targetReg != REG_STK)
{
if (getVarReg(predVarToRegMap, varIndex) != REG_STK)
{
// Case #1 above.
assert(getVarReg(predVarToRegMap, varIndex) == targetReg ||
getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE);
}
else if (!nextRefPosition->copyReg)
{
// case #2 above.
setVarReg(inVarToRegMap, varIndex, REG_STK);
targetReg = REG_STK;
}
// Else case 2a. - retain targetReg.
}
// Else case #3 or #4, we retain targetReg and nothing further to do or assert.
}
if (interval->physReg == targetReg)
{
if (interval->isActive)
{
assert(targetReg != REG_STK);
assert(interval->assignedReg != nullptr && interval->assignedReg->regNum == targetReg &&
interval->assignedReg->assignedInterval == interval);
liveRegs |= getRegMask(targetReg, interval->registerType);
continue;
}
}
else if (interval->physReg != REG_NA)
{
// This can happen if we are using the locations from a basic block other than the
// immediately preceding one - where the variable was in a different location.
if ((targetReg != REG_STK) || leaveOnStack)
{
// Unassign it from the register (it may get a new register below).
if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval)
{
interval->isActive = false;
unassignPhysReg(getRegisterRecord(interval->physReg), nullptr);
}
else
{
// This interval was live in this register the last time we saw a reference to it,
// but has since been displaced.
interval->physReg = REG_NA;
}
}
else if (!allocationPassComplete)
{
// Keep the register assignment - if another var has it, it will get unassigned.
// Otherwise, resolution will fix it up later, and it will be more
// likely to match other assignments this way.
targetReg = interval->physReg;
interval->isActive = true;
liveRegs |= getRegMask(targetReg, interval->registerType);
INDEBUG(inactiveRegs |= genRegMask(targetReg));
setVarReg(inVarToRegMap, varIndex, targetReg);
}
else
{
interval->physReg = REG_NA;
}
}
if (targetReg != REG_STK)
{
RegRecord* targetRegRecord = getRegisterRecord(targetReg);
liveRegs |= getRegMask(targetReg, interval->registerType);
if (!allocationPassComplete)
{
updateNextIntervalRef(targetReg, interval);
updateSpillCost(targetReg, interval);
}
if (!interval->isActive)
{
interval->isActive = true;
interval->physReg = targetReg;
interval->assignedReg = targetRegRecord;
}
if (targetRegRecord->assignedInterval != interval)
{
#ifdef TARGET_ARM
// If this is a TYP_DOUBLE interval, and the assigned interval is either null or is TYP_FLOAT,
// we also need to unassign the other half of the register.
// Note that if the assigned interval is TYP_DOUBLE, it will be unassigned below.
if ((interval->registerType == TYP_DOUBLE) &&
((targetRegRecord->assignedInterval == nullptr) ||
(targetRegRecord->assignedInterval->registerType == TYP_FLOAT)))
{
assert(genIsValidDoubleReg(targetReg));
unassignIntervalBlockStart(getSecondHalfRegRec(targetRegRecord),
allocationPassComplete ? nullptr : inVarToRegMap);
}
// If this is a TYP_FLOAT interval, and the assigned interval was TYP_DOUBLE, we also
// need to update the liveRegs to specify that the other half is not live anymore.
// As mentioned above, for TYP_DOUBLE, the other half will be unassigned further below.
if ((interval->registerType == TYP_FLOAT) &&
((targetRegRecord->assignedInterval != nullptr) &&
(targetRegRecord->assignedInterval->registerType == TYP_DOUBLE)))
{
RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(targetRegRecord);
// Use TYP_FLOAT to get the regmask of just the half reg.
liveRegs &= ~getRegMask(anotherHalfRegRec->regNum, TYP_FLOAT);
}
#endif // TARGET_ARM
unassignIntervalBlockStart(targetRegRecord, allocationPassComplete ? nullptr : inVarToRegMap);
assignPhysReg(targetRegRecord, interval);
}
if (interval->recentRefPosition != nullptr && !interval->recentRefPosition->copyReg &&
interval->recentRefPosition->registerAssignment != genRegMask(targetReg))
{
interval->getNextRefPosition()->outOfOrder = true;
}
}
}
// Unassign any registers that are no longer live, and set register state, if allocating.
if (!allocationPassComplete)
{
resetRegState();
setRegsInUse(liveRegs);
}
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
if ((liveRegs & genRegMask(reg)) == 0)
{
makeRegAvailable(reg, physRegRecord->registerType);
Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
assert(assignedInterval->isLocalVar || assignedInterval->isConstant ||
assignedInterval->IsUpperVector());
if (!assignedInterval->isConstant && assignedInterval->assignedReg == physRegRecord)
{
assignedInterval->isActive = false;
if (assignedInterval->getNextRefPosition() == nullptr)
{
unassignPhysReg(physRegRecord, nullptr);
}
if (!assignedInterval->IsUpperVector())
{
inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK;
}
}
else
{
// This interval may still be active, but was in another register in an
// intervening block.
updateAssignedInterval(physRegRecord, nullptr, assignedInterval->registerType);
}
#ifdef TARGET_ARM
// unassignPhysReg, above, may have restored a 'previousInterval', in which case we need to
// get the value of 'physRegRecord->assignedInterval' rather than using 'assignedInterval'.
if (physRegRecord->assignedInterval != nullptr)
{
assignedInterval = physRegRecord->assignedInterval;
}
if (assignedInterval->registerType == TYP_DOUBLE)
{
// Skip next float register, because we already addressed a double register
assert(genIsValidDoubleReg(reg));
reg = REG_NEXT(reg);
makeRegAvailable(reg, physRegRecord->registerType);
}
#endif // TARGET_ARM
}
}
#ifdef TARGET_ARM
else
{
Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr && assignedInterval->registerType == TYP_DOUBLE)
{
// Skip next float register, because we already addressed a double register
assert(genIsValidDoubleReg(reg));
reg = REG_NEXT(reg);
}
}
#endif // TARGET_ARM
}
}
//------------------------------------------------------------------------
// processBlockEndLocations: Record the variables occupying registers after completing the current block.
//
// Arguments:
// currentBlock - the block we have just completed.
//
// Return Value:
// None
//
// Notes:
// This must be called both during the allocation and resolution (write-back) phases.
// This is because we need to have the outVarToRegMap locations in order to set the locations
// at successor blocks during allocation time, but if lclVars are spilled after a block has been
// completed, we need to record the REG_STK location for those variables at resolution time.
void LinearScan::processBlockEndLocations(BasicBlock* currentBlock)
{
assert(currentBlock != nullptr && currentBlock->bbNum == curBBNum);
VarToRegMap outVarToRegMap = getOutVarToRegMap(curBBNum);
VarSetOps::AssignNoCopy(compiler, currentLiveVars,
VarSetOps::Intersection(compiler, registerCandidateVars, currentBlock->bbLiveOut));
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
VarSetOps::Assign(compiler, currentLiveVars, registerCandidateVars);
}
#endif // DEBUG
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
Interval* interval = getIntervalForLocalVar(varIndex);
if (interval->isActive)
{
assert(interval->physReg != REG_NA && interval->physReg != REG_STK);
setVarReg(outVarToRegMap, varIndex, interval->physReg);
}
else
{
outVarToRegMap[varIndex] = REG_STK;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Ensure that we have no partially-spilled large vector locals.
assert(!Compiler::varTypeNeedsPartialCalleeSave(interval->registerType) || !interval->isPartiallySpilled);
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_END_BB));
}
#ifdef DEBUG
void LinearScan::dumpRefPositions(const char* str)
{
printf("------------\n");
printf("REFPOSITIONS %s: \n", str);
printf("------------\n");
for (RefPosition& refPos : refPositions)
{
refPos.dump(this);
}
}
#endif // DEBUG
//------------------------------------------------------------------------
// LinearScan::makeRegisterInactive: Make the interval currently assigned to
// a register inactive.
//
// Arguments:
// physRegRecord - the RegRecord for the register
//
// Return Value:
// None.
//
// Notes:
// It may be that the RegRecord has already been freed, e.g. due to a kill,
// or it may be that the register was a copyReg, so is not the assigned register
// of the Interval currently occupying the register, in which case this method has no effect.
//
void LinearScan::makeRegisterInactive(RegRecord* physRegRecord)
{
Interval* assignedInterval = physRegRecord->assignedInterval;
// It may have already been freed by a "Kill"
if ((assignedInterval != nullptr) && (assignedInterval->physReg == physRegRecord->regNum))
{
assignedInterval->isActive = false;
if (assignedInterval->isConstant)
{
clearNextIntervalRef(physRegRecord->regNum, assignedInterval->registerType);
}
}
}
//------------------------------------------------------------------------
// LinearScan::freeRegister: Make a register available for use
//
// Arguments:
// physRegRecord - the RegRecord for the register to be freed.
//
// Return Value:
// None.
//
// Assumptions:
// None.
// It may be that the RegRecord has already been freed, e.g. due to a kill,
// in which case this method has no effect.
//
// Notes:
// If there is currently an Interval assigned to this register, and it has
// more references (i.e. this is a local last-use, but more uses and/or
// defs remain), it will remain assigned to the physRegRecord. However, since
// it is marked inactive, the register will be available, albeit less desirable
// to allocate.
//
void LinearScan::freeRegister(RegRecord* physRegRecord)
{
Interval* assignedInterval = physRegRecord->assignedInterval;
makeRegAvailable(physRegRecord->regNum, physRegRecord->registerType);
clearSpillCost(physRegRecord->regNum, physRegRecord->registerType);
makeRegisterInactive(physRegRecord);
if (assignedInterval != nullptr)
{
// TODO: Under the following conditions we should be just putting it in regsToMakeInactive
// not regsToFree.
//
// We don't unassign in the following conditions:
// - If this is a constant node, that we may encounter again, OR
// - If its recent RefPosition is not a last-use and its next RefPosition is non-null.
// - If there are no more RefPositions, or the next
// one is a def. Note that the latter condition doesn't actually ensure that
// there aren't subsequent uses that could be reached by a value in the assigned
// register, but is merely a heuristic to avoid tying up the register (or using
// it when it's non-optimal). A better alternative would be to use SSA, so that
// we wouldn't unnecessarily link separate live ranges to the same register.
//
RefPosition* nextRefPosition = assignedInterval->getNextRefPosition();
if (!assignedInterval->isConstant && (nextRefPosition == nullptr || RefTypeIsDef(nextRefPosition->refType)))
{
#ifdef TARGET_ARM
assert((assignedInterval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(physRegRecord->regNum));
#endif // TARGET_ARM
unassignPhysReg(physRegRecord, nullptr);
}
}
}
//------------------------------------------------------------------------
// LinearScan::freeRegisters: Free the registers in 'regsToFree'
//
// Arguments:
// regsToFree - the mask of registers to free
//
void LinearScan::freeRegisters(regMaskTP regsToFree)
{
if (regsToFree == RBM_NONE)
{
return;
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FREE_REGS));
makeRegsAvailable(regsToFree);
while (regsToFree != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(regsToFree);
regsToFree &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
RegRecord* regRecord = getRegisterRecord(nextReg);
#ifdef TARGET_ARM
if (regRecord->assignedInterval != nullptr && (regRecord->assignedInterval->registerType == TYP_DOUBLE))
{
assert(genIsValidDoubleReg(nextReg));
regsToFree &= ~(nextRegBit << 1);
}
#endif
freeRegister(regRecord);
}
}
//------------------------------------------------------------------------
// LinearScan::allocateRegisters: Perform the actual register allocation by iterating over
// all of the previously constructed Intervals
//
void LinearScan::allocateRegisters()
{
JITDUMP("*************** In LinearScan::allocateRegisters()\n");
DBEXEC(VERBOSE, lsraDumpIntervals("before allocateRegisters"));
// at start, nothing is active except for register args
for (Interval& interval : intervals)
{
Interval* currentInterval = &interval;
currentInterval->recentRefPosition = nullptr;
currentInterval->isActive = false;
if (currentInterval->isLocalVar)
{
LclVarDsc* varDsc = currentInterval->getLocalVar(compiler);
if (varDsc->lvIsRegArg && currentInterval->firstRefPosition != nullptr)
{
currentInterval->isActive = true;
}
}
}
if (enregisterLocalVars)
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars);
unsigned largeVectorVarIndex = 0;
while (largeVectorVarsIter.NextElem(&largeVectorVarIndex))
{
Interval* lclVarInterval = getIntervalForLocalVar(largeVectorVarIndex);
lclVarInterval->isPartiallySpilled = false;
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
resetRegState();
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->recentRefPosition = nullptr;
updateNextFixedRef(physRegRecord, physRegRecord->firstRefPosition);
// Is this an incoming arg register? (Note that we don't, currently, consider reassigning
// an incoming arg register as having spill cost.)
Interval* interval = physRegRecord->assignedInterval;
if (interval != nullptr)
{
#ifdef TARGET_ARM
if ((interval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(reg))
#endif // TARGET_ARM
{
updateNextIntervalRef(reg, interval);
updateSpillCost(reg, interval);
setRegInUse(reg, interval->registerType);
INDEBUG(registersToDump |= getRegMask(reg, interval->registerType));
}
}
else
{
clearNextIntervalRef(reg, physRegRecord->registerType);
clearSpillCost(reg, physRegRecord->registerType);
}
}
#ifdef DEBUG
if (VERBOSE)
{
dumpRefPositions("BEFORE ALLOCATION");
dumpVarRefPositions("BEFORE ALLOCATION");
printf("\n\nAllocating Registers\n"
"--------------------\n");
// Start with a small set of commonly used registers, so that we don't keep having to print a new title.
// Include all the arg regs, as they may already have values assigned to them.
registersToDump = LsraLimitSmallIntSet | LsraLimitSmallFPSet | RBM_ARG_REGS;
dumpRegRecordHeader();
// Now print an empty "RefPosition", since we complete the dump of the regs at the beginning of the loop.
printf(indentFormat, "");
}
#endif // DEBUG
BasicBlock* currentBlock = nullptr;
LsraLocation prevLocation = MinLocation;
regMaskTP regsToFree = RBM_NONE;
regMaskTP delayRegsToFree = RBM_NONE;
regMaskTP regsToMakeInactive = RBM_NONE;
regMaskTP delayRegsToMakeInactive = RBM_NONE;
regMaskTP copyRegsToFree = RBM_NONE;
regsInUseThisLocation = RBM_NONE;
regsInUseNextLocation = RBM_NONE;
// This is the most recent RefPosition for which a register was allocated
// - currently only used for DEBUG but maintained in non-debug, for clarity of code
// (and will be optimized away because in non-debug spillAlways() unconditionally returns false)
RefPosition* lastAllocatedRefPosition = nullptr;
bool handledBlockEnd = false;
for (RefPosition& refPositionIterator : refPositions)
{
RefPosition* currentRefPosition = &refPositionIterator;
RefPosition* nextRefPosition = currentRefPosition->nextRefPosition;
// TODO: Can we combine this with the freeing of registers below? It might
// mess with the dump, since this was previously being done before the call below
// to dumpRegRecords.
regMaskTP tempRegsToMakeInactive = (regsToMakeInactive | delayRegsToMakeInactive);
while (tempRegsToMakeInactive != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(tempRegsToMakeInactive);
tempRegsToMakeInactive &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
RegRecord* regRecord = getRegisterRecord(nextReg);
clearSpillCost(regRecord->regNum, regRecord->registerType);
makeRegisterInactive(regRecord);
}
if (currentRefPosition->nodeLocation > prevLocation)
{
makeRegsAvailable(regsToMakeInactive);
// TODO: Clean this up. We need to make the delayRegs inactive as well, but don't want
// to mark them as free yet.
regsToMakeInactive |= delayRegsToMakeInactive;
regsToMakeInactive = delayRegsToMakeInactive;
delayRegsToMakeInactive = RBM_NONE;
}
#ifdef DEBUG
// Set the activeRefPosition to null until we're done with any boundary handling.
activeRefPosition = nullptr;
if (VERBOSE)
{
// We're really dumping the RegRecords "after" the previous RefPosition, but it's more convenient
// to do this here, since there are a number of "continue"s in this loop.
dumpRegRecords();
}
#endif // DEBUG
// This is the previousRefPosition of the current Referent, if any
RefPosition* previousRefPosition = nullptr;
Interval* currentInterval = nullptr;
Referenceable* currentReferent = nullptr;
RefType refType = currentRefPosition->refType;
currentReferent = currentRefPosition->referent;
if (spillAlways() && lastAllocatedRefPosition != nullptr && !lastAllocatedRefPosition->IsPhysRegRef() &&
!lastAllocatedRefPosition->getInterval()->isInternal &&
(RefTypeIsDef(lastAllocatedRefPosition->refType) || lastAllocatedRefPosition->getInterval()->isLocalVar))
{
assert(lastAllocatedRefPosition->registerAssignment != RBM_NONE);
RegRecord* regRecord = lastAllocatedRefPosition->getInterval()->assignedReg;
unassignPhysReg(regRecord, lastAllocatedRefPosition);
// Now set lastAllocatedRefPosition to null, so that we don't try to spill it again
lastAllocatedRefPosition = nullptr;
}
// We wait to free any registers until we've completed all the
// uses for the current node.
// This avoids reusing registers too soon.
// We free before the last true def (after all the uses & internal
// registers), and then again at the beginning of the next node.
// This is made easier by assigning two LsraLocations per node - one
// for all the uses, internal registers & all but the last def, and
// another for the final def (if any).
LsraLocation currentLocation = currentRefPosition->nodeLocation;
// Free at a new location.
if (currentLocation > prevLocation)
{
// CopyRegs are simply made available - we don't want to make the associated interval inactive.
makeRegsAvailable(copyRegsToFree);
copyRegsToFree = RBM_NONE;
regsInUseThisLocation = regsInUseNextLocation;
regsInUseNextLocation = RBM_NONE;
if ((regsToFree | delayRegsToFree) != RBM_NONE)
{
freeRegisters(regsToFree);
if ((currentLocation > (prevLocation + 1)) && (delayRegsToFree != RBM_NONE))
{
// We should never see a delayReg that is delayed until a Location that has no RefPosition
// (that would be the RefPosition that it was supposed to interfere with).
assert(!"Found a delayRegFree associated with Location with no reference");
// However, to be cautious for the Release build case, we will free them.
freeRegisters(delayRegsToFree);
delayRegsToFree = RBM_NONE;
regsInUseThisLocation = RBM_NONE;
}
regsToFree = delayRegsToFree;
delayRegsToFree = RBM_NONE;
#ifdef DEBUG
// Validate the current state just after we've freed the registers. This ensures that any pending
// freed registers will have had their state updated to reflect the intervals they were holding.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
regMaskTP regMask = genRegMask(reg);
// If this isn't available or if it's still waiting to be freed (i.e. it was in
// delayRegsToFree and so now it's in regsToFree), then skip it.
if ((regMask & (availableIntRegs | availableFloatRegs) & ~regsToFree) == RBM_NONE)
{
continue;
}
RegRecord* physRegRecord = getRegisterRecord(reg);
Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
bool isAssignedReg = (assignedInterval->physReg == reg);
RefPosition* recentRefPosition = assignedInterval->recentRefPosition;
// If we have a copyReg or a moveReg, we might have assigned this register to an Interval,
// but that isn't considered its assignedReg.
if (recentRefPosition != nullptr)
{
if (recentRefPosition->refType == RefTypeExpUse)
{
// We don't update anything on these, as they're just placeholders to extend the
// lifetime.
continue;
}
// For copyReg or moveReg, we don't have anything further to assert.
if (recentRefPosition->copyReg || recentRefPosition->moveReg)
{
continue;
}
assert(assignedInterval->isConstant == isRegConstant(reg, assignedInterval->registerType));
if (assignedInterval->isActive)
{
// If this is not the register most recently allocated, it must be from a copyReg,
// it was placed there by the inVarToRegMap or it might be one of the upper vector
// save/restore refPosition.
// In either case it must be a lclVar.
if (!isAssignedToInterval(assignedInterval, physRegRecord))
{
// We'd like to assert that this was either set by the inVarToRegMap, or by
// a copyReg, but we can't traverse backward to check for a copyReg, because
// we only have recentRefPosition, and there may be a previous RefPosition
// at the same Location with a copyReg.
bool sanityCheck = assignedInterval->isLocalVar;
// For upper vector interval, make sure it was one of the save/restore only.
if (assignedInterval->IsUpperVector())
{
sanityCheck |= (recentRefPosition->refType == RefTypeUpperVectorSave) ||
(recentRefPosition->refType == RefTypeUpperVectorRestore);
}
assert(sanityCheck);
}
if (isAssignedReg)
{
assert(nextIntervalRef[reg] == assignedInterval->getNextRefLocation());
assert(!isRegAvailable(reg, assignedInterval->registerType));
assert((recentRefPosition == nullptr) ||
(spillCost[reg] == getSpillWeight(physRegRecord)));
}
else
{
assert((nextIntervalRef[reg] == MaxLocation) ||
isRegBusy(reg, assignedInterval->registerType));
}
}
else
{
if ((assignedInterval->physReg == reg) && !assignedInterval->isConstant)
{
assert(nextIntervalRef[reg] == assignedInterval->getNextRefLocation());
}
else
{
assert(nextIntervalRef[reg] == MaxLocation);
assert(isRegAvailable(reg, assignedInterval->registerType));
assert(spillCost[reg] == 0);
}
}
}
}
else
{
assert(isRegAvailable(reg, physRegRecord->registerType));
assert(!isRegConstant(reg, physRegRecord->registerType));
assert(nextIntervalRef[reg] == MaxLocation);
assert(spillCost[reg] == 0);
}
LsraLocation thisNextFixedRef = physRegRecord->getNextRefLocation();
assert(nextFixedRef[reg] == thisNextFixedRef);
#ifdef TARGET_ARM
// If this is occupied by a double interval, skip the corresponding float reg.
if ((assignedInterval != nullptr) && (assignedInterval->registerType == TYP_DOUBLE))
{
reg = REG_NEXT(reg);
}
#endif
}
#endif // DEBUG
}
}
prevLocation = currentLocation;
// get previous refposition, then current refpos is the new previous
if (currentReferent != nullptr)
{
previousRefPosition = currentReferent->recentRefPosition;
currentReferent->recentRefPosition = currentRefPosition;
}
else
{
assert((refType == RefTypeBB) || (refType == RefTypeKillGCRefs));
}
#ifdef DEBUG
activeRefPosition = currentRefPosition;
// For the purposes of register resolution, we handle the DummyDefs before
// the block boundary - so the RefTypeBB is after all the DummyDefs.
// However, for the purposes of allocation, we want to handle the block
// boundary first, so that we can free any registers occupied by lclVars
// that aren't live in the next block and make them available for the
// DummyDefs.
// If we've already handled the BlockEnd, but now we're seeing the RefTypeBB,
// dump it now.
if ((refType == RefTypeBB) && handledBlockEnd)
{
dumpNewBlock(currentBlock, currentRefPosition->nodeLocation);
}
#endif // DEBUG
if (!handledBlockEnd && (refType == RefTypeBB || refType == RefTypeDummyDef))
{
// Free any delayed regs (now in regsToFree) before processing the block boundary
freeRegisters(regsToFree);
regsToFree = RBM_NONE;
regsInUseThisLocation = RBM_NONE;
regsInUseNextLocation = RBM_NONE;
handledBlockEnd = true;
curBBStartLocation = currentRefPosition->nodeLocation;
if (currentBlock == nullptr)
{
currentBlock = startBlockSequence();
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_START_BB, nullptr, REG_NA, compiler->fgFirstBB));
}
else
{
processBlockEndAllocation(currentBlock);
currentBlock = moveToNextBlock();
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_START_BB, nullptr, REG_NA, currentBlock));
}
}
if (refType == RefTypeBB)
{
handledBlockEnd = false;
continue;
}
if (refType == RefTypeKillGCRefs)
{
spillGCRefs(currentRefPosition);
continue;
}
if (currentRefPosition->isPhysRegRef)
{
RegRecord* regRecord = currentRefPosition->getReg();
Interval* assignedInterval = regRecord->assignedInterval;
updateNextFixedRef(regRecord, currentRefPosition->nextRefPosition);
// If this is a FixedReg, disassociate any inactive constant interval from this register.
// Otherwise, do nothing.
if (refType == RefTypeFixedReg)
{
if (assignedInterval != nullptr && !assignedInterval->isActive && assignedInterval->isConstant)
{
clearConstantReg(regRecord->regNum, assignedInterval->registerType);
regRecord->assignedInterval = nullptr;
spillCost[regRecord->regNum] = 0;
#ifdef TARGET_ARM
// Update overlapping floating point register for TYP_DOUBLE
if (assignedInterval->registerType == TYP_DOUBLE)
{
RegRecord* otherRegRecord = findAnotherHalfRegRec(regRecord);
assert(otherRegRecord->assignedInterval == assignedInterval);
otherRegRecord->assignedInterval = nullptr;
spillCost[otherRegRecord->regNum] = 0;
}
#endif // TARGET_ARM
}
regsInUseThisLocation |= currentRefPosition->registerAssignment;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FIXED_REG, nullptr, currentRefPosition->assignedReg()));
continue;
}
if (refType == RefTypeKill)
{
if (assignedInterval != nullptr)
{
unassignPhysReg(regRecord, assignedInterval->recentRefPosition);
clearConstantReg(regRecord->regNum, assignedInterval->registerType);
makeRegAvailable(regRecord->regNum, assignedInterval->registerType);
}
clearRegBusyUntilKill(regRecord->regNum);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum));
continue;
}
}
// If this is an exposed use, do nothing - this is merely a placeholder to attempt to
// ensure that a register is allocated for the full lifetime. The resolution logic
// will take care of moving to the appropriate register if needed.
if (refType == RefTypeExpUse)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_EXP_USE));
currentInterval = currentRefPosition->getInterval();
if (currentInterval->physReg != REG_NA)
{
updateNextIntervalRef(currentInterval->physReg, currentInterval);
}
continue;
}
regNumber assignedRegister = REG_NA;
assert(currentRefPosition->isIntervalRef());
currentInterval = currentRefPosition->getInterval();
assert(currentInterval != nullptr);
assignedRegister = currentInterval->physReg;
// Identify the special cases where we decide up-front not to allocate
bool allocate = true;
bool didDump = false;
if (refType == RefTypeParamDef || refType == RefTypeZeroInit)
{
if (nextRefPosition == nullptr)
{
// If it has no actual references, mark it as "lastUse"; since they're not actually part
// of any flow they won't have been marked during dataflow. Otherwise, if we allocate a
// register we won't unassign it.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_ZERO_REF, currentInterval));
currentRefPosition->lastUse = true;
}
LclVarDsc* varDsc = currentInterval->getLocalVar(compiler);
assert(varDsc != nullptr);
assert(!blockInfo[compiler->fgFirstBB->bbNum].hasEHBoundaryIn || currentInterval->isWriteThru);
if (blockInfo[compiler->fgFirstBB->bbNum].hasEHBoundaryIn ||
blockInfo[compiler->fgFirstBB->bbNum].hasEHPred)
{
allocate = false;
}
else if (refType == RefTypeParamDef && (varDsc->lvRefCntWtd() <= BB_UNITY_WEIGHT) &&
(!currentRefPosition->lastUse || (currentInterval->physReg == REG_STK)))
{
// If this is a low ref-count parameter, and either it is used (def is not the last use) or it's
// passed on the stack, don't allocate a register.
// Note that if this is an unused register parameter we don't want to set allocate to false because that
// will cause us to allocate stack space to spill it.
allocate = false;
}
else if ((currentInterval->physReg == REG_STK) && nextRefPosition->treeNode->OperIs(GT_BITCAST))
{
// In the case of ABI mismatches, avoid allocating a register only to have to immediately move
// it to a different register file.
allocate = false;
}
else if ((currentInterval->isWriteThru) && (refType == RefTypeZeroInit))
{
// For RefTypeZeroInit which is a write thru, there is no need to allocate register
// right away. It can be assigned when actually definition occurs.
// In future, see if avoiding allocation for RefTypeZeroInit gives any benefit in general.
allocate = false;
}
if (!allocate)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, currentInterval));
didDump = true;
setIntervalAsSpilled(currentInterval);
if (assignedRegister != REG_NA)
{
clearNextIntervalRef(assignedRegister, currentInterval->registerType);
clearSpillCost(assignedRegister, currentInterval->registerType);
makeRegAvailable(assignedRegister, currentInterval->registerType);
}
}
}
#ifdef FEATURE_SIMD
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
else if (currentInterval->isUpperVector)
{
// This is a save or restore of the upper half of a large vector lclVar.
Interval* lclVarInterval = currentInterval->relatedInterval;
assert(lclVarInterval->isLocalVar);
if (refType == RefTypeUpperVectorSave)
{
if ((lclVarInterval->physReg == REG_NA) ||
(lclVarInterval->isPartiallySpilled && (currentInterval->physReg == REG_STK)))
{
allocate = false;
}
else
{
lclVarInterval->isPartiallySpilled = true;
}
}
else if (refType == RefTypeUpperVectorRestore)
{
assert(currentInterval->isUpperVector);
if (lclVarInterval->isPartiallySpilled)
{
lclVarInterval->isPartiallySpilled = false;
}
else
{
allocate = false;
}
}
}
else if (refType == RefTypeUpperVectorSave)
{
assert(!currentInterval->isLocalVar);
// Note that this case looks a lot like the case below, but in this case we need to spill
// at the previous RefPosition.
// We may want to consider allocating two callee-save registers for this case, but it happens rarely
// enough that it may not warrant the additional complexity.
if (assignedRegister != REG_NA)
{
unassignPhysReg(getRegisterRecord(assignedRegister), currentInterval->firstRefPosition);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval));
}
currentRefPosition->registerAssignment = RBM_NONE;
continue;
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#endif // FEATURE_SIMD
if (allocate == false)
{
if (assignedRegister != REG_NA)
{
unassignPhysReg(getRegisterRecord(assignedRegister), currentRefPosition);
}
else if (!didDump)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval));
didDump = true;
}
currentRefPosition->registerAssignment = RBM_NONE;
continue;
}
if (currentInterval->isSpecialPutArg)
{
assert(!currentInterval->isLocalVar);
Interval* srcInterval = currentInterval->relatedInterval;
assert(srcInterval != nullptr && srcInterval->isLocalVar);
if (refType == RefTypeDef)
{
assert(srcInterval->recentRefPosition->nodeLocation == currentLocation - 1);
RegRecord* physRegRecord = srcInterval->assignedReg;
// For a putarg_reg to be special, its next use location has to be the same
// as fixed reg's next kill location. Otherwise, if source lcl var's next use
// is after the kill of fixed reg but before putarg_reg's next use, fixed reg's
// kill would lead to spill of source but not the putarg_reg if it were treated
// as special.
if (srcInterval->isActive &&
genRegMask(srcInterval->physReg) == currentRefPosition->registerAssignment &&
currentInterval->getNextRefLocation() == nextFixedRef[srcInterval->physReg])
{
assert(physRegRecord->regNum == srcInterval->physReg);
// Special putarg_reg acts as a pass-thru since both source lcl var
// and putarg_reg have the same register allocated. Physical reg
// record of reg continue to point to source lcl var's interval
// instead of to putarg_reg's interval. So if a spill of reg
// allocated to source lcl var happens, to reallocate to another
// tree node, before its use at call node it will lead to spill of
// lcl var instead of putarg_reg since physical reg record is pointing
// to lcl var's interval. As a result, arg reg would get trashed leading
// to bad codegen. The assumption here is that source lcl var of a
// special putarg_reg doesn't get spilled and re-allocated prior to
// its use at the call node. This is ensured by marking physical reg
// record as busy until next kill.
setRegBusyUntilKill(srcInterval->physReg, srcInterval->registerType);
}
else
{
currentInterval->isSpecialPutArg = false;
}
}
// If this is still a SpecialPutArg, continue;
if (currentInterval->isSpecialPutArg)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, currentInterval,
currentRefPosition->assignedReg()));
continue;
}
}
if (assignedRegister == REG_NA && RefTypeIsUse(refType))
{
currentRefPosition->reload = true;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, currentInterval, assignedRegister));
}
regMaskTP assignedRegBit = RBM_NONE;
bool isInRegister = false;
if (assignedRegister != REG_NA)
{
isInRegister = true;
assignedRegBit = genRegMask(assignedRegister);
if (!currentInterval->isActive)
{
// If this is a use, it must have started the block on the stack, but the register
// was available for use so we kept the association.
if (RefTypeIsUse(refType))
{
assert(enregisterLocalVars);
assert(inVarToRegMaps[curBBNum][currentInterval->getVarIndex(compiler)] == REG_STK &&
previousRefPosition->nodeLocation <= curBBStartLocation);
isInRegister = false;
}
else
{
currentInterval->isActive = true;
setRegInUse(assignedRegister, currentInterval->registerType);
updateSpillCost(assignedRegister, currentInterval);
}
updateNextIntervalRef(assignedRegister, currentInterval);
}
assert(currentInterval->assignedReg != nullptr &&
currentInterval->assignedReg->regNum == assignedRegister &&
currentInterval->assignedReg->assignedInterval == currentInterval);
}
if (previousRefPosition != nullptr)
{
assert(previousRefPosition->nextRefPosition == currentRefPosition);
assert(assignedRegister == REG_NA || assignedRegBit == previousRefPosition->registerAssignment ||
currentRefPosition->outOfOrder || previousRefPosition->copyReg ||
previousRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef);
}
else if (assignedRegister != REG_NA)
{
// Handle the case where this is a preassigned register (i.e. parameter).
// We don't want to actually use the preassigned register if it's not
// going to cover the lifetime - but we had to preallocate it to ensure
// that it remained live.
// TODO-CQ: At some point we may want to refine the analysis here, in case
// it might be beneficial to keep it in this reg for PART of the lifetime
if (currentInterval->isLocalVar)
{
regMaskTP preferences = currentInterval->registerPreferences;
bool keepAssignment = true;
bool matchesPreferences = (preferences & genRegMask(assignedRegister)) != RBM_NONE;
// Will the assigned register cover the lifetime? If not, does it at least
// meet the preferences for the next RefPosition?
LsraLocation nextPhysRegLocation = nextFixedRef[assignedRegister];
if (nextPhysRegLocation <= currentInterval->lastRefPosition->nodeLocation)
{
// Check to see if the existing assignment matches the preferences (e.g. callee save registers)
// and ensure that the next use of this localVar does not occur after the nextPhysRegRefPos
// There must be a next RefPosition, because we know that the Interval extends beyond the
// nextPhysRegRefPos.
assert(nextRefPosition != nullptr);
if (!matchesPreferences || nextPhysRegLocation < nextRefPosition->nodeLocation)
{
keepAssignment = false;
}
else if ((nextRefPosition->registerAssignment != assignedRegBit) &&
(nextPhysRegLocation <= nextRefPosition->getRefEndLocation()))
{
keepAssignment = false;
}
}
else if (refType == RefTypeParamDef && !matchesPreferences)
{
// Don't use the register, even if available, if it doesn't match the preferences.
// Note that this case is only for ParamDefs, for which we haven't yet taken preferences
// into account (we've just automatically got the initial location). In other cases,
// we would already have put it in a preferenced register, if it was available.
// TODO-CQ: Consider expanding this to check availability - that would duplicate
// code here, but otherwise we may wind up in this register anyway.
keepAssignment = false;
}
if (keepAssignment == false)
{
RegRecord* physRegRecord = getRegisterRecord(currentInterval->physReg);
currentRefPosition->registerAssignment = allRegs(currentInterval->registerType);
currentRefPosition->isFixedRegRef = false;
unassignPhysRegNoSpill(physRegRecord);
// If the preferences are currently set to just this register, reset them to allRegs
// of the appropriate type (just as we just reset the registerAssignment for this
// RefPosition.
// Otherwise, simply remove this register from the preferences, if it's there.
if (currentInterval->registerPreferences == assignedRegBit)
{
currentInterval->registerPreferences = currentRefPosition->registerAssignment;
}
else
{
currentInterval->registerPreferences &= ~assignedRegBit;
}
assignedRegister = REG_NA;
assignedRegBit = RBM_NONE;
}
}
}
if (assignedRegister != REG_NA)
{
RegRecord* physRegRecord = getRegisterRecord(assignedRegister);
assert((assignedRegBit == currentRefPosition->registerAssignment) ||
(physRegRecord->assignedInterval == currentInterval) ||
!isRegInUse(assignedRegister, currentInterval->registerType));
if (conflictingFixedRegReference(assignedRegister, currentRefPosition))
{
// We may have already reassigned the register to the conflicting reference.
// If not, we need to unassign this interval.
if (physRegRecord->assignedInterval == currentInterval)
{
unassignPhysRegNoSpill(physRegRecord);
physRegRecord->assignedInterval = nullptr;
clearConstantReg(assignedRegister, currentInterval->registerType);
}
currentRefPosition->moveReg = true;
assignedRegister = REG_NA;
currentRefPosition->registerAssignment &= ~assignedRegBit;
setIntervalAsSplit(currentInterval);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_MOVE_REG, currentInterval, assignedRegister));
}
else if ((genRegMask(assignedRegister) & currentRefPosition->registerAssignment) != 0)
{
currentRefPosition->registerAssignment = assignedRegBit;
if (!currentInterval->isActive)
{
// If we've got an exposed use at the top of a block, the
// interval might not have been active. Otherwise if it's a use,
// the interval must be active.
if (refType == RefTypeDummyDef)
{
currentInterval->isActive = true;
assert(getRegisterRecord(assignedRegister)->assignedInterval == currentInterval);
}
else
{
currentRefPosition->reload = true;
}
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, currentInterval, assignedRegister));
}
else
{
// It's already in a register, but not one we need.
if (!RefTypeIsDef(currentRefPosition->refType))
{
regNumber copyReg = assignCopyReg(currentRefPosition);
lastAllocatedRefPosition = currentRefPosition;
bool unassign = false;
if (currentInterval->isWriteThru)
{
if (currentRefPosition->refType == RefTypeDef)
{
currentRefPosition->writeThru = true;
}
if (!currentRefPosition->lastUse)
{
if (currentRefPosition->spillAfter)
{
unassign = true;
}
}
}
regMaskTP copyRegMask = getRegMask(copyReg, currentInterval->registerType);
regMaskTP assignedRegMask = getRegMask(assignedRegister, currentInterval->registerType);
regsInUseThisLocation |= copyRegMask | assignedRegMask;
if (currentRefPosition->lastUse)
{
if (currentRefPosition->delayRegFree)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED, currentInterval,
assignedRegister));
delayRegsToFree |= copyRegMask | assignedRegMask;
regsInUseNextLocation |= copyRegMask | assignedRegMask;
}
else
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE, currentInterval, assignedRegister));
regsToFree |= copyRegMask | assignedRegMask;
}
}
else
{
copyRegsToFree |= copyRegMask;
if (currentRefPosition->delayRegFree)
{
regsInUseNextLocation |= copyRegMask | assignedRegMask;
}
}
// If this is a tree temp (non-localVar) interval, we will need an explicit move.
// Note: In theory a moveReg should cause the Interval to now have the new reg as its
// assigned register. However, that's not currently how this works.
// If we ever actually move lclVar intervals instead of copying, this will need to change.
if (!currentInterval->isLocalVar)
{
currentRefPosition->moveReg = true;
currentRefPosition->copyReg = false;
}
clearNextIntervalRef(copyReg, currentInterval->registerType);
clearSpillCost(copyReg, currentInterval->registerType);
updateNextIntervalRef(assignedRegister, currentInterval);
updateSpillCost(assignedRegister, currentInterval);
continue;
}
else
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NEEDS_NEW_REG, nullptr, assignedRegister));
regsToFree |= getRegMask(assignedRegister, currentInterval->registerType);
// We want a new register, but we don't want this to be considered a spill.
assignedRegister = REG_NA;
if (physRegRecord->assignedInterval == currentInterval)
{
unassignPhysRegNoSpill(physRegRecord);
}
}
}
}
if (assignedRegister == REG_NA)
{
if (currentRefPosition->RegOptional())
{
// We can avoid allocating a register if it is a last use requiring a reload.
if (currentRefPosition->lastUse && currentRefPosition->reload)
{
allocate = false;
}
else if (currentInterval->isWriteThru)
{
// Don't allocate if the next reference is in a cold block.
if (nextRefPosition == nullptr || (nextRefPosition->nodeLocation >= firstColdLoc))
{
allocate = false;
}
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(TARGET_XARCH)
// We can also avoid allocating a register (in fact we don't want to) if we have
// an UpperVectorRestore on xarch where the value is on the stack.
if ((currentRefPosition->refType == RefTypeUpperVectorRestore) && (currentInterval->physReg == REG_NA))
{
assert(currentRefPosition->regOptional);
allocate = false;
}
#endif
#ifdef DEBUG
// Under stress mode, don't allocate registers to RegOptional RefPositions.
if (allocate && regOptionalNoAlloc())
{
allocate = false;
}
#endif
}
RegisterScore registerScore = NONE;
if (allocate)
{
// Allocate a register, if we must, or if it is profitable to do so.
// If we have a fixed reg requirement, and the interval is inactive in another register,
// unassign that register.
if (currentRefPosition->isFixedRegRef && !currentInterval->isActive &&
(currentInterval->assignedReg != nullptr) &&
(currentInterval->assignedReg->assignedInterval == currentInterval) &&
(genRegMask(currentInterval->assignedReg->regNum) != currentRefPosition->registerAssignment))
{
unassignPhysReg(currentInterval->assignedReg, nullptr);
}
assignedRegister = allocateReg(currentInterval, currentRefPosition DEBUG_ARG(®isterScore));
}
// If no register was found, this RefPosition must not require a register.
if (assignedRegister == REG_NA)
{
assert(currentRefPosition->RegOptional());
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval));
currentRefPosition->registerAssignment = RBM_NONE;
currentRefPosition->reload = false;
currentInterval->isActive = false;
setIntervalAsSpilled(currentInterval);
}
#ifdef DEBUG
else
{
if (VERBOSE)
{
if (currentInterval->isConstant && (currentRefPosition->treeNode != nullptr) &&
currentRefPosition->treeNode->IsReuseRegVal())
{
dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, currentInterval, assignedRegister, currentBlock,
registerScore);
}
else
{
dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, currentInterval, assignedRegister, currentBlock,
registerScore);
}
}
}
#endif // DEBUG
if (refType == RefTypeDummyDef && assignedRegister != REG_NA)
{
setInVarRegForBB(curBBNum, currentInterval->varNum, assignedRegister);
}
// If we allocated a register, and this is a use of a spilled value,
// it should have been marked for reload above.
if (assignedRegister != REG_NA && RefTypeIsUse(refType) && !isInRegister)
{
assert(currentRefPosition->reload);
}
}
// If we allocated a register, record it
if (assignedRegister != REG_NA)
{
assignedRegBit = genRegMask(assignedRegister);
regMaskTP regMask = getRegMask(assignedRegister, currentInterval->registerType);
regsInUseThisLocation |= regMask;
if (currentRefPosition->delayRegFree)
{
regsInUseNextLocation |= regMask;
}
currentRefPosition->registerAssignment = assignedRegBit;
currentInterval->physReg = assignedRegister;
regsToFree &= ~regMask; // we'll set it again later if it's dead
// If this interval is dead, free the register.
// The interval could be dead if this is a user variable, or if the
// node is being evaluated for side effects, or a call whose result
// is not used, etc.
// If this is an UpperVector we'll neither free it nor preference it
// (it will be freed when it is used).
bool unassign = false;
if (!currentInterval->IsUpperVector())
{
if (currentInterval->isWriteThru)
{
if (currentRefPosition->refType == RefTypeDef)
{
currentRefPosition->writeThru = true;
}
if (!currentRefPosition->lastUse)
{
if (currentRefPosition->spillAfter)
{
unassign = true;
}
}
}
if (currentRefPosition->lastUse || currentRefPosition->nextRefPosition == nullptr)
{
assert(currentRefPosition->isIntervalRef());
// If this isn't a final use, we'll mark the register as available, but keep the association.
if ((refType != RefTypeExpUse) && (currentRefPosition->nextRefPosition == nullptr))
{
unassign = true;
}
else
{
if (currentRefPosition->delayRegFree)
{
delayRegsToMakeInactive |= regMask;
}
else
{
regsToMakeInactive |= regMask;
}
// TODO-Cleanup: this makes things consistent with previous, and will enable preferences
// to be propagated, but it seems less than ideal.
currentInterval->isActive = false;
}
// Update the register preferences for the relatedInterval, if this is 'preferencedToDef'.
// Don't propagate to subsequent relatedIntervals; that will happen as they are allocated, and we
// don't know yet whether the register will be retained.
if (currentInterval->relatedInterval != nullptr)
{
currentInterval->relatedInterval->updateRegisterPreferences(assignedRegBit);
}
}
if (unassign)
{
if (currentRefPosition->delayRegFree)
{
delayRegsToFree |= regMask;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED));
}
else
{
regsToFree |= regMask;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE));
}
}
}
if (!unassign)
{
updateNextIntervalRef(assignedRegister, currentInterval);
updateSpillCost(assignedRegister, currentInterval);
}
}
lastAllocatedRefPosition = currentRefPosition;
}
#ifdef JIT32_GCENCODER
// For the JIT32_GCENCODER, when lvaKeepAliveAndReportThis is true, we must either keep the "this" pointer
// in the same register for the entire method, or keep it on the stack. Rather than imposing this constraint
// as we allocate, we will force all refs to the stack if it is split or spilled.
if (enregisterLocalVars && compiler->lvaKeepAliveAndReportThis())
{
LclVarDsc* thisVarDsc = compiler->lvaGetDesc(compiler->info.compThisArg);
if (thisVarDsc->lvLRACandidate)
{
Interval* interval = getIntervalForLocalVar(thisVarDsc->lvVarIndex);
if (interval->isSplit)
{
// We'll have to spill this.
setIntervalAsSpilled(interval);
}
if (interval->isSpilled)
{
unsigned prevBBNum = 0;
for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
{
// For the resolution phase, we need to ensure that any block with exposed uses has the
// incoming reg for 'this' as REG_STK.
if (RefTypeIsUse(ref->refType) && (ref->bbNum != prevBBNum))
{
VarToRegMap inVarToRegMap = getInVarToRegMap(ref->bbNum);
setVarReg(inVarToRegMap, thisVarDsc->lvVarIndex, REG_STK);
}
if (ref->RegOptional())
{
ref->registerAssignment = RBM_NONE;
ref->reload = false;
ref->spillAfter = false;
}
switch (ref->refType)
{
case RefTypeDef:
if (ref->registerAssignment != RBM_NONE)
{
ref->spillAfter = true;
}
break;
case RefTypeUse:
if (ref->registerAssignment != RBM_NONE)
{
ref->reload = true;
ref->spillAfter = true;
ref->copyReg = false;
ref->moveReg = false;
}
break;
default:
break;
}
prevBBNum = ref->bbNum;
}
}
}
}
#endif // JIT32_GCENCODER
// Free registers to clear associated intervals for resolution phase
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
// If we have extended lifetimes, we need to make sure all the registers are freed.
for (size_t regNumIndex = 0; regNumIndex <= REG_FP_LAST; regNumIndex++)
{
RegRecord& regRecord = physRegs[regNumIndex];
Interval* interval = regRecord.assignedInterval;
if (interval != nullptr)
{
interval->isActive = false;
unassignPhysReg(®Record, nullptr);
}
}
}
else
#endif // DEBUG
{
freeRegisters(regsToFree | delayRegsToFree);
}
#ifdef DEBUG
if (VERBOSE)
{
// Dump the RegRecords after the last RefPosition is handled.
dumpRegRecords();
printf("\n");
dumpRefPositions("AFTER ALLOCATION");
dumpVarRefPositions("AFTER ALLOCATION");
// Dump the intervals that remain active
printf("Active intervals at end of allocation:\n");
// We COULD just reuse the intervalIter from above, but ArrayListIterator doesn't
// provide a Reset function (!) - we'll probably replace this so don't bother
// adding it
for (Interval& interval : intervals)
{
if (interval.isActive)
{
printf("Active ");
interval.dump();
}
}
printf("\n");
}
#endif // DEBUG
}
//-----------------------------------------------------------------------------
// updateAssignedInterval: Update assigned interval of register.
//
// Arguments:
// reg - register to be updated
// interval - interval to be assigned
// regType - register type
//
// Return Value:
// None
//
// Note:
// For ARM32, two float registers consisting a double register are updated
// together when "regType" is TYP_DOUBLE.
//
void LinearScan::updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType)
{
#ifdef TARGET_ARM
// Update overlapping floating point register for TYP_DOUBLE.
Interval* oldAssignedInterval = reg->assignedInterval;
regNumber doubleReg = REG_NA;
if (regType == TYP_DOUBLE)
{
RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg);
doubleReg = genIsValidDoubleReg(reg->regNum) ? reg->regNum : anotherHalfReg->regNum;
anotherHalfReg->assignedInterval = interval;
}
else if ((oldAssignedInterval != nullptr) && (oldAssignedInterval->registerType == TYP_DOUBLE))
{
RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg);
doubleReg = genIsValidDoubleReg(reg->regNum) ? reg->regNum : anotherHalfReg->regNum;
anotherHalfReg->assignedInterval = nullptr;
}
if (doubleReg != REG_NA)
{
clearNextIntervalRef(doubleReg, TYP_DOUBLE);
clearSpillCost(doubleReg, TYP_DOUBLE);
clearConstantReg(doubleReg, TYP_DOUBLE);
}
#endif
reg->assignedInterval = interval;
if (interval != nullptr)
{
setRegInUse(reg->regNum, interval->registerType);
if (interval->isConstant)
{
setConstantReg(reg->regNum, interval->registerType);
}
else
{
clearConstantReg(reg->regNum, interval->registerType);
}
updateNextIntervalRef(reg->regNum, interval);
updateSpillCost(reg->regNum, interval);
}
else
{
clearNextIntervalRef(reg->regNum, reg->registerType);
clearSpillCost(reg->regNum, reg->registerType);
}
}
//-----------------------------------------------------------------------------
// updatePreviousInterval: Update previous interval of register.
//
// Arguments:
// reg - register to be updated
// interval - interval to be assigned
// regType - register type
//
// Return Value:
// None
//
// Assumptions:
// For ARM32, when "regType" is TYP_DOUBLE, "reg" should be a even-numbered
// float register, i.e. lower half of double register.
//
// Note:
// For ARM32, two float registers consisting a double register are updated
// together when "regType" is TYP_DOUBLE.
//
void LinearScan::updatePreviousInterval(RegRecord* reg, Interval* interval, RegisterType regType)
{
reg->previousInterval = interval;
#ifdef TARGET_ARM
// Update overlapping floating point register for TYP_DOUBLE
if (regType == TYP_DOUBLE)
{
RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg);
anotherHalfReg->previousInterval = interval;
}
#endif
}
//-----------------------------------------------------------------------------
// writeLocalReg: Write the register assignment for a GT_LCL_VAR node.
//
// Arguments:
// lclNode - The GT_LCL_VAR node
// varNum - The variable number for the register
// reg - The assigned register
//
// Return Value:
// None
//
// Note:
// For a multireg node, 'varNum' will be the field local for the given register.
//
void LinearScan::writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumber reg)
{
assert((lclNode->GetLclNum() == varNum) == !lclNode->IsMultiReg());
if (lclNode->GetLclNum() == varNum)
{
lclNode->SetRegNum(reg);
}
else
{
assert(compiler->lvaEnregMultiRegVars);
LclVarDsc* parentVarDsc = compiler->lvaGetDesc(lclNode);
assert(parentVarDsc->lvPromoted);
unsigned regIndex = varNum - parentVarDsc->lvFieldLclStart;
assert(regIndex < MAX_MULTIREG_COUNT);
lclNode->SetRegNumByIdx(reg, regIndex);
}
}
//-----------------------------------------------------------------------------
// LinearScan::resolveLocalRef
// Description:
// Update the graph for a local reference.
// Also, track the register (if any) that is currently occupied.
// Arguments:
// treeNode: The lclVar that's being resolved
// currentRefPosition: the RefPosition associated with the treeNode
//
// Details:
// This method is called for each local reference, during the resolveRegisters
// phase of LSRA. It is responsible for keeping the following in sync:
// - varDsc->GetRegNum() (and GetOtherReg()) contain the unique register location.
// If it is not in the same register through its lifetime, it is set to REG_STK.
// - interval->physReg is set to the assigned register
// (i.e. at the code location which is currently being handled by resolveRegisters())
// - interval->isActive is true iff the interval is live and occupying a register
// - interval->isSpilled should have already been set to true if the interval is EVER spilled
// - interval->isSplit is set to true if the interval does not occupy the same
// register throughout the method
// - RegRecord->assignedInterval points to the interval which currently occupies
// the register
// - For each lclVar node:
// - GetRegNum()/gtRegPair is set to the currently allocated register(s).
// - GTF_SPILLED is set on a use if it must be reloaded prior to use.
// - GTF_SPILL is set if it must be spilled after use.
//
// A copyReg is an ugly case where the variable must be in a specific (fixed) register,
// but it currently resides elsewhere. The register allocator must track the use of the
// fixed register, but it marks the lclVar node with the register it currently lives in
// and the code generator does the necessary move.
//
// Before beginning, the varDsc for each parameter must be set to its initial location.
//
// NICE: Consider tracking whether an Interval is always in the same location (register/stack)
// in which case it will require no resolution.
//
void LinearScan::resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, RefPosition* currentRefPosition)
{
assert((block == nullptr) == (treeNode == nullptr));
assert(enregisterLocalVars);
// Is this a tracked local? Or just a register allocated for loading
// a non-tracked one?
Interval* interval = currentRefPosition->getInterval();
assert(interval->isLocalVar);
interval->recentRefPosition = currentRefPosition;
LclVarDsc* varDsc = interval->getLocalVar(compiler);
// NOTE: we set the LastUse flag here unless we are extending lifetimes, in which case we write
// this bit in checkLastUses. This is a bit of a hack, but is necessary because codegen requires
// accurate last use info that is not reflected in the lastUse bit on ref positions when we are extending
// lifetimes. See also the comments in checkLastUses.
if ((treeNode != nullptr) && !extendLifetimes())
{
if (currentRefPosition->lastUse)
{
treeNode->SetLastUse(currentRefPosition->getMultiRegIdx());
}
else
{
treeNode->ClearLastUse(currentRefPosition->getMultiRegIdx());
}
if ((currentRefPosition->registerAssignment != RBM_NONE) && (interval->physReg == REG_NA) &&
currentRefPosition->RegOptional() && currentRefPosition->lastUse &&
(currentRefPosition->refType == RefTypeUse))
{
// This can happen if the incoming location for the block was changed from a register to the stack
// during resolution. In this case we're better off making it contained.
assert(inVarToRegMaps[curBBNum][varDsc->lvVarIndex] == REG_STK);
currentRefPosition->registerAssignment = RBM_NONE;
writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA);
}
}
if (currentRefPosition->registerAssignment == RBM_NONE)
{
assert(currentRefPosition->RegOptional());
assert(interval->isSpilled);
varDsc->SetRegNum(REG_STK);
if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval)
{
updateAssignedInterval(interval->assignedReg, nullptr, interval->registerType);
}
interval->assignedReg = nullptr;
interval->physReg = REG_NA;
interval->isActive = false;
// Set this as contained if it is not a multi-reg (we could potentially mark it s contained
// if all uses are from spill, but that adds complexity.
if ((currentRefPosition->refType == RefTypeUse) && !treeNode->IsMultiReg())
{
assert(treeNode != nullptr);
treeNode->SetContained();
}
return;
}
// In most cases, assigned and home registers will be the same
// The exception is the copyReg case, where we've assigned a register
// for a specific purpose, but will be keeping the register assignment
regNumber assignedReg = currentRefPosition->assignedReg();
regNumber homeReg = assignedReg;
// Undo any previous association with a physical register, UNLESS this
// is a copyReg
if (!currentRefPosition->copyReg)
{
regNumber oldAssignedReg = interval->physReg;
if (oldAssignedReg != REG_NA && assignedReg != oldAssignedReg)
{
RegRecord* oldRegRecord = getRegisterRecord(oldAssignedReg);
if (oldRegRecord->assignedInterval == interval)
{
updateAssignedInterval(oldRegRecord, nullptr, interval->registerType);
}
}
}
if (currentRefPosition->refType == RefTypeUse && !currentRefPosition->reload)
{
// Was this spilled after our predecessor was scheduled?
if (interval->physReg == REG_NA)
{
assert(inVarToRegMaps[curBBNum][varDsc->lvVarIndex] == REG_STK);
currentRefPosition->reload = true;
}
}
bool reload = currentRefPosition->reload;
bool spillAfter = currentRefPosition->spillAfter;
bool writeThru = currentRefPosition->writeThru;
// In the reload case we either:
// - Set the register to REG_STK if it will be referenced only from the home location, or
// - Set the register to the assigned register and set GTF_SPILLED if it must be loaded into a register.
if (reload)
{
assert(currentRefPosition->refType != RefTypeDef);
assert(interval->isSpilled);
varDsc->SetRegNum(REG_STK);
if (!spillAfter)
{
interval->physReg = assignedReg;
}
// If there is no treeNode, this must be a RefTypeExpUse, in
// which case we did the reload already
if (treeNode != nullptr)
{
treeNode->gtFlags |= GTF_SPILLED;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx());
}
if (spillAfter)
{
if (currentRefPosition->RegOptional())
{
// This is a use of lclVar that is flagged as reg-optional
// by lower/codegen and marked for both reload and spillAfter.
// In this case we can avoid unnecessary reload and spill
// by setting reg on lclVar to REG_STK and reg on tree node
// to REG_NA. Codegen will generate the code by considering
// it as a contained memory operand.
//
// Note that varDsc->GetRegNum() is already to REG_STK above.
interval->physReg = REG_NA;
writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA);
treeNode->gtFlags &= ~GTF_SPILLED;
treeNode->SetContained();
// We don't support RegOptional for multi-reg localvars.
assert(!treeNode->IsMultiReg());
}
else
{
treeNode->gtFlags |= GTF_SPILL;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
}
}
}
else
{
assert(currentRefPosition->refType == RefTypeExpUse);
}
}
else if (spillAfter && !RefTypeIsUse(currentRefPosition->refType) && (treeNode != nullptr) &&
(!treeNode->IsMultiReg() || treeNode->gtGetOp1()->IsMultiRegNode()))
{
// In the case of a pure def, don't bother spilling - just assign it to the
// stack. However, we need to remember that it was spilled.
// We can't do this in the case of a multi-reg node with a non-multireg source as
// we need the register to extract into.
assert(interval->isSpilled);
varDsc->SetRegNum(REG_STK);
interval->physReg = REG_NA;
writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA);
}
else // Not reload and Not pure-def that's spillAfter
{
if (currentRefPosition->copyReg || currentRefPosition->moveReg)
{
// For a copyReg or moveReg, we have two cases:
// - In the first case, we have a fixedReg - i.e. a register which the code
// generator is constrained to use.
// The code generator will generate the appropriate move to meet the requirement.
// - In the second case, we were forced to use a different register because of
// interference (or JitStressRegs).
// In this case, we generate a GT_COPY.
// In either case, we annotate the treeNode with the register in which the value
// currently lives. For moveReg, the homeReg is the new register (as assigned above).
// But for copyReg, the homeReg remains unchanged.
assert(treeNode != nullptr);
writeLocalReg(treeNode->AsLclVar(), interval->varNum, interval->physReg);
if (currentRefPosition->copyReg)
{
homeReg = interval->physReg;
}
else
{
assert(interval->isSplit);
interval->physReg = assignedReg;
}
if (!currentRefPosition->isFixedRegRef || currentRefPosition->moveReg)
{
// This is the second case, where we need to generate a copy
insertCopyOrReload(block, treeNode, currentRefPosition->getMultiRegIdx(), currentRefPosition);
}
}
else
{
interval->physReg = assignedReg;
if (!interval->isSpilled && !interval->isSplit)
{
if (varDsc->GetRegNum() != REG_STK)
{
// If the register assignments don't match, then this interval is split.
if (varDsc->GetRegNum() != assignedReg)
{
setIntervalAsSplit(interval);
varDsc->SetRegNum(REG_STK);
}
}
else
{
varDsc->SetRegNum(assignedReg);
}
}
}
if (spillAfter)
{
if (treeNode != nullptr)
{
treeNode->gtFlags |= GTF_SPILL;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
}
assert(interval->isSpilled);
interval->physReg = REG_NA;
varDsc->SetRegNum(REG_STK);
}
if (writeThru && (treeNode != nullptr))
{
// This is a def of a write-thru EH var (only defs are marked 'writeThru').
treeNode->gtFlags |= GTF_SPILL;
// We also mark writeThru defs that are not last-use with GTF_SPILLED to indicate that they are conceptually
// spilled and immediately "reloaded", i.e. the register remains live.
// Note that we can have a "last use" write that has no exposed uses in the standard
// (non-eh) control flow, but that may be used on an exception path. Hence the need
// to retain these defs, and to ensure that they write.
if (!currentRefPosition->lastUse)
{
treeNode->gtFlags |= GTF_SPILLED;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx());
}
}
}
if (currentRefPosition->singleDefSpill && (treeNode != nullptr))
{
// This is the first (and only) def of a single-def var (only defs are marked 'singleDefSpill').
// Mark it as GTF_SPILL, so it is spilled immediately to the stack at definition and
// GTF_SPILLED, so the variable stays live in the register.
//
// TODO: This approach would still create the resolution moves but during codegen, will check for
// `lvSpillAtSingleDef` to decide whether to generate spill or not. In future, see if there is some
// better way to avoid resolution moves, perhaps by updating the varDsc->SetRegNum(REG_STK) in this
// method?
treeNode->gtFlags |= GTF_SPILL;
treeNode->gtFlags |= GTF_SPILLED;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx());
}
varDsc->lvSpillAtSingleDef = true;
}
}
// Update the physRegRecord for the register, so that we know what vars are in
// regs at the block boundaries
RegRecord* physRegRecord = getRegisterRecord(homeReg);
if (spillAfter || currentRefPosition->lastUse)
{
interval->isActive = false;
interval->assignedReg = nullptr;
interval->physReg = REG_NA;
updateAssignedInterval(physRegRecord, nullptr, interval->registerType);
}
else
{
interval->isActive = true;
interval->assignedReg = physRegRecord;
updateAssignedInterval(physRegRecord, interval, interval->registerType);
}
}
void LinearScan::writeRegisters(RefPosition* currentRefPosition, GenTree* tree)
{
lsraAssignRegToTree(tree, currentRefPosition->assignedReg(), currentRefPosition->getMultiRegIdx());
}
//------------------------------------------------------------------------
// insertCopyOrReload: Insert a copy in the case where a tree node value must be moved
// to a different register at the point of use (GT_COPY), or it is reloaded to a different register
// than the one it was spilled from (GT_RELOAD).
//
// Arguments:
// block - basic block in which GT_COPY/GT_RELOAD is inserted.
// tree - This is the node to copy or reload.
// Insert copy or reload node between this node and its parent.
// multiRegIdx - register position of tree node for which copy or reload is needed.
// refPosition - The RefPosition at which copy or reload will take place.
//
// Notes:
// The GT_COPY or GT_RELOAD will be inserted in the proper spot in execution order where the reload is to occur.
//
// For example, for this tree (numbers are execution order, lower is earlier and higher is later):
//
// +---------+----------+
// | GT_ADD (3) |
// +---------+----------+
// |
// / '\'
// / '\'
// / '\'
// +-------------------+ +----------------------+
// | x (1) | "tree" | y (2) |
// +-------------------+ +----------------------+
//
// generate this tree:
//
// +---------+----------+
// | GT_ADD (4) |
// +---------+----------+
// |
// / '\'
// / '\'
// / '\'
// +-------------------+ +----------------------+
// | GT_RELOAD (3) | | y (2) |
// +-------------------+ +----------------------+
// |
// +-------------------+
// | x (1) | "tree"
// +-------------------+
//
// Note in particular that the GT_RELOAD node gets inserted in execution order immediately before the parent of "tree",
// which seems a bit weird since normally a node's parent (in this case, the parent of "x", GT_RELOAD in the "after"
// picture) immediately follows all of its children (that is, normally the execution ordering is postorder).
// The ordering must be this weird "out of normal order" way because the "x" node is being spilled, probably
// because the expression in the tree represented above by "y" has high register requirements. We don't want
// to reload immediately, of course. So we put GT_RELOAD where the reload should actually happen.
//
// Note that GT_RELOAD is required when we reload to a different register than the one we spilled to. It can also be
// used if we reload to the same register. Normally, though, in that case we just mark the node with GTF_SPILLED,
// and the unspilling code automatically reuses the same register, and does the reload when it notices that flag
// when considering a node's operands.
//
void LinearScan::insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition)
{
LIR::Range& blockRange = LIR::AsRange(block);
LIR::Use treeUse;
bool foundUse = blockRange.TryGetUse(tree, &treeUse);
assert(foundUse);
GenTree* parent = treeUse.User();
genTreeOps oper;
if (refPosition->reload)
{
oper = GT_RELOAD;
}
else
{
oper = GT_COPY;
INTRACK_STATS(updateLsraStat(STAT_COPY_REG, block->bbNum));
}
// If the parent is a reload/copy node, then tree must be a multi-reg node
// that has already had one of its registers spilled.
// It is possible that one of its RefTypeDef positions got spilled and the next
// use of it requires it to be in a different register.
//
// In this case set the i'th position reg of reload/copy node to the reg allocated
// for copy/reload refPosition. Essentially a copy/reload node will have a reg
// for each multi-reg position of its child. If there is a valid reg in i'th
// position of GT_COPY or GT_RELOAD node then the corresponding result of its
// child needs to be copied or reloaded to that reg.
if (parent->IsCopyOrReload())
{
noway_assert(parent->OperGet() == oper);
noway_assert(tree->IsMultiRegNode());
GenTreeCopyOrReload* copyOrReload = parent->AsCopyOrReload();
noway_assert(copyOrReload->GetRegNumByIdx(multiRegIdx) == REG_NA);
copyOrReload->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx);
}
else
{
var_types regType = tree->TypeGet();
if ((regType == TYP_STRUCT) && !tree->IsMultiRegNode())
{
assert(compiler->compEnregStructLocals());
assert(tree->IsLocal());
const GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
// We create struct copies with a primitive type so we don't bother copy node with parsing structHndl.
// Note that for multiReg node we keep each regType in the tree and don't need this.
regType = varDsc->GetRegisterType(lcl);
assert(regType != TYP_UNDEF);
}
// Create the new node, with "tree" as its only child.
GenTreeCopyOrReload* newNode = new (compiler, oper) GenTreeCopyOrReload(oper, regType, tree);
assert(refPosition->registerAssignment != RBM_NONE);
SetLsraAdded(newNode);
newNode->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx);
if (refPosition->copyReg)
{
// This is a TEMPORARY copy
assert(isCandidateLocalRef(tree) || tree->IsMultiRegLclVar());
newNode->SetLastUse(multiRegIdx);
}
// Insert the copy/reload after the spilled node and replace the use of the original node with a use
// of the copy/reload.
blockRange.InsertAfter(tree, newNode);
treeUse.ReplaceWith(newNode);
}
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
//------------------------------------------------------------------------
// insertUpperVectorSave: Insert code to save the upper half of a vector that lives
// in a callee-save register at the point of a kill (the upper half is
// not preserved).
//
// Arguments:
// tree - This is the node before which we will insert the Save.
// It will be a call or some node that turns into a call.
// refPosition - The RefTypeUpperVectorSave RefPosition.
// upperInterval - The Interval for the upper half of the large vector lclVar.
// block - the BasicBlock containing the call.
//
void LinearScan::insertUpperVectorSave(GenTree* tree,
RefPosition* refPosition,
Interval* upperVectorInterval,
BasicBlock* block)
{
JITDUMP("Inserting UpperVectorSave for RP #%d before %d.%s:\n", refPosition->rpNum, tree->gtTreeID,
GenTree::OpName(tree->gtOper));
Interval* lclVarInterval = upperVectorInterval->relatedInterval;
assert(lclVarInterval->isLocalVar == true);
assert(refPosition->getInterval() == upperVectorInterval);
regNumber lclVarReg = lclVarInterval->physReg;
if (lclVarReg == REG_NA)
{
return;
}
#ifdef DEBUG
if (tree->IsCall())
{
// Make sure that we do not insert vector save before calls that does not return.
assert(!tree->AsCall()->IsNoReturn());
}
#endif
LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarInterval->varNum);
assert(Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType()));
// On Arm64, we must always have a register to save the upper half,
// while on x86 we can spill directly to memory.
regNumber spillReg = refPosition->assignedReg();
#ifdef TARGET_ARM64
bool spillToMem = refPosition->spillAfter;
assert(spillReg != REG_NA);
#else
bool spillToMem = (spillReg == REG_NA);
assert(!refPosition->spillAfter);
#endif
LIR::Range& blockRange = LIR::AsRange(block);
// Insert the save before the call.
GenTree* saveLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, varDsc->lvType);
saveLcl->SetRegNum(lclVarReg);
SetLsraAdded(saveLcl);
GenTreeSIMD* simdNode = compiler->gtNewSIMDNode(LargeVectorSaveType, saveLcl, SIMDIntrinsicUpperSave,
varDsc->GetSimdBaseJitType(), genTypeSize(varDsc));
if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF)
{
// There are a few scenarios where we can get a LCL_VAR which
// doesn't know the underlying baseType. In that scenario, we
// will just lie and say it is a float. Codegen doesn't actually
// care what the type is but this avoids an assert that would
// otherwise be fired from the more general checks that happen.
simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT);
}
SetLsraAdded(simdNode);
simdNode->SetRegNum(spillReg);
if (spillToMem)
{
simdNode->gtFlags |= GTF_SPILL;
upperVectorInterval->physReg = REG_NA;
}
else
{
assert((genRegMask(spillReg) & RBM_FLT_CALLEE_SAVED) != RBM_NONE);
upperVectorInterval->physReg = spillReg;
}
blockRange.InsertBefore(tree, LIR::SeqTree(compiler, simdNode));
DISPTREE(simdNode);
JITDUMP("\n");
}
//------------------------------------------------------------------------
// insertUpperVectorRestore: Insert code to restore the upper half of a vector that has been partially spilled.
//
// Arguments:
// tree - This is the node for which we will insert the Restore.
// If non-null, it will be a use of the large vector lclVar.
// If null, the Restore will be added to the end of the block.
// upperVectorInterval - The Interval for the upper vector for the lclVar.
// block - the BasicBlock into which we will be inserting the code.
//
// Notes:
// In the case where 'tree' is non-null, we will insert the restore just prior to
// its use, in order to ensure the proper ordering.
//
void LinearScan::insertUpperVectorRestore(GenTree* tree,
RefPosition* refPosition,
Interval* upperVectorInterval,
BasicBlock* block)
{
JITDUMP("Adding UpperVectorRestore for RP #%d ", refPosition->rpNum);
Interval* lclVarInterval = upperVectorInterval->relatedInterval;
assert(lclVarInterval->isLocalVar == true);
regNumber lclVarReg = lclVarInterval->physReg;
// We should not call this method if the lclVar is not in a register (we should have simply marked the entire
// lclVar as spilled).
assert(lclVarReg != REG_NA);
LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarInterval->varNum);
assert(Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType()));
GenTree* restoreLcl = nullptr;
restoreLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, varDsc->lvType);
restoreLcl->SetRegNum(lclVarReg);
SetLsraAdded(restoreLcl);
GenTreeSIMD* simdNode = compiler->gtNewSIMDNode(varDsc->TypeGet(), restoreLcl, SIMDIntrinsicUpperRestore,
varDsc->GetSimdBaseJitType(), genTypeSize(varDsc->lvType));
if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF)
{
// There are a few scenarios where we can get a LCL_VAR which
// doesn't know the underlying baseType. In that scenario, we
// will just lie and say it is a float. Codegen doesn't actually
// care what the type is but this avoids an assert that would
// otherwise be fired from the more general checks that happen.
simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT);
}
regNumber restoreReg = upperVectorInterval->physReg;
SetLsraAdded(simdNode);
if (restoreReg == REG_NA)
{
// We need a stack location for this.
assert(lclVarInterval->isSpilled);
#ifdef TARGET_AMD64
assert(refPosition->assignedReg() == REG_NA);
simdNode->gtFlags |= GTF_NOREG_AT_USE;
#else
simdNode->gtFlags |= GTF_SPILLED;
assert(refPosition->assignedReg() != REG_NA);
restoreReg = refPosition->assignedReg();
#endif
}
simdNode->SetRegNum(restoreReg);
LIR::Range& blockRange = LIR::AsRange(block);
JITDUMP("Adding UpperVectorRestore ");
if (tree != nullptr)
{
JITDUMP("before %d.%s:\n", tree->gtTreeID, GenTree::OpName(tree->gtOper));
LIR::Use treeUse;
bool foundUse = blockRange.TryGetUse(tree, &treeUse);
assert(foundUse);
// We need to insert the restore prior to the use, not (necessarily) immediately after the lclVar.
blockRange.InsertBefore(treeUse.User(), LIR::SeqTree(compiler, simdNode));
}
else
{
JITDUMP("at end of " FMT_BB ":\n", block->bbNum);
if (block->KindIs(BBJ_COND, BBJ_SWITCH))
{
noway_assert(!blockRange.IsEmpty());
GenTree* branch = blockRange.LastNode();
assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE ||
branch->OperGet() == GT_SWITCH);
blockRange.InsertBefore(branch, LIR::SeqTree(compiler, simdNode));
}
else
{
assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS));
blockRange.InsertAtEnd(LIR::SeqTree(compiler, simdNode));
}
}
DISPTREE(simdNode);
JITDUMP("\n");
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
//------------------------------------------------------------------------
// initMaxSpill: Initializes the LinearScan members used to track the max number
// of concurrent spills. This is needed so that we can set the
// fields in Compiler, so that the code generator, in turn can
// allocate the right number of spill locations.
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Assumptions:
// This is called before any calls to updateMaxSpill().
void LinearScan::initMaxSpill()
{
needDoubleTmpForFPCall = false;
needFloatTmpForFPCall = false;
for (int i = 0; i < TYP_COUNT; i++)
{
maxSpill[i] = 0;
currentSpill[i] = 0;
}
}
//------------------------------------------------------------------------
// recordMaxSpill: Sets the fields in Compiler for the max number of concurrent spills.
// (See the comment on initMaxSpill.)
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Assumptions:
// This is called after updateMaxSpill() has been called for all "real"
// RefPositions.
void LinearScan::recordMaxSpill()
{
// Note: due to the temp normalization process (see tmpNormalizeType)
// only a few types should actually be seen here.
JITDUMP("Recording the maximum number of concurrent spills:\n");
#ifdef TARGET_X86
var_types returnType = RegSet::tmpNormalizeType(compiler->info.compRetType);
if (needDoubleTmpForFPCall || (returnType == TYP_DOUBLE))
{
JITDUMP("Adding a spill temp for moving a double call/return value between xmm reg and x87 stack.\n");
maxSpill[TYP_DOUBLE] += 1;
}
if (needFloatTmpForFPCall || (returnType == TYP_FLOAT))
{
JITDUMP("Adding a spill temp for moving a float call/return value between xmm reg and x87 stack.\n");
maxSpill[TYP_FLOAT] += 1;
}
#endif // TARGET_X86
compiler->codeGen->regSet.tmpBeginPreAllocateTemps();
for (int i = 0; i < TYP_COUNT; i++)
{
if (var_types(i) != RegSet::tmpNormalizeType(var_types(i)))
{
// Only normalized types should have anything in the maxSpill array.
// We assume here that if type 'i' does not normalize to itself, then
// nothing else normalizes to 'i', either.
assert(maxSpill[i] == 0);
}
if (maxSpill[i] != 0)
{
JITDUMP(" %s: %d\n", varTypeName(var_types(i)), maxSpill[i]);
compiler->codeGen->regSet.tmpPreAllocateTemps(var_types(i), maxSpill[i]);
}
}
JITDUMP("\n");
}
//------------------------------------------------------------------------
// updateMaxSpill: Update the maximum number of concurrent spills
//
// Arguments:
// refPosition - the current RefPosition being handled
//
// Return Value:
// None.
//
// Assumptions:
// The RefPosition has an associated interval (getInterval() will
// otherwise assert).
//
// Notes:
// This is called for each "real" RefPosition during the writeback
// phase of LSRA. It keeps track of how many concurrently-live
// spills there are, and the largest number seen so far.
void LinearScan::updateMaxSpill(RefPosition* refPosition)
{
RefType refType = refPosition->refType;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if ((refType == RefTypeUpperVectorSave) || (refType == RefTypeUpperVectorRestore))
{
Interval* interval = refPosition->getInterval();
// If this is not an 'upperVector', it must be a tree temp that has been already
// (fully) spilled.
if (!interval->isUpperVector)
{
assert(interval->firstRefPosition->spillAfter);
}
else
{
// The UpperVector RefPositions spill to the localVar's home location.
Interval* lclVarInterval = interval->relatedInterval;
assert(lclVarInterval->isSpilled || (!refPosition->spillAfter && !refPosition->reload));
}
return;
}
#endif // !FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (refPosition->spillAfter || refPosition->reload ||
(refPosition->RegOptional() && refPosition->assignedReg() == REG_NA))
{
Interval* interval = refPosition->getInterval();
if (!interval->isLocalVar)
{
GenTree* treeNode = refPosition->treeNode;
if (treeNode == nullptr)
{
assert(RefTypeIsUse(refType));
treeNode = interval->firstRefPosition->treeNode;
}
assert(treeNode != nullptr);
// The tmp allocation logic 'normalizes' types to a small number of
// types that need distinct stack locations from each other.
// Those types are currently gc refs, byrefs, <= 4 byte non-GC items,
// 8-byte non-GC items, and 16-byte or 32-byte SIMD vectors.
// LSRA is agnostic to those choices but needs
// to know what they are here.
var_types type;
if (!treeNode->IsMultiRegNode())
{
type = getDefType(treeNode);
}
else
{
type = treeNode->GetRegTypeByIndex(refPosition->getMultiRegIdx());
}
type = RegSet::tmpNormalizeType(type);
if (refPosition->spillAfter && !refPosition->reload)
{
currentSpill[type]++;
if (currentSpill[type] > maxSpill[type])
{
maxSpill[type] = currentSpill[type];
}
}
else if (refPosition->reload)
{
assert(currentSpill[type] > 0);
currentSpill[type]--;
}
else if (refPosition->RegOptional() && refPosition->assignedReg() == REG_NA)
{
// A spill temp not getting reloaded into a reg because it is
// marked as allocate if profitable and getting used from its
// memory location. To properly account max spill for typ we
// decrement spill count.
assert(RefTypeIsUse(refType));
assert(currentSpill[type] > 0);
currentSpill[type]--;
}
JITDUMP(" Max spill for %s is %d\n", varTypeName(type), maxSpill[type]);
}
}
}
// This is the final phase of register allocation. It writes the register assignments to
// the tree, and performs resolution across joins and backedges.
//
void LinearScan::resolveRegisters()
{
// Iterate over the tree and the RefPositions in lockstep
// - annotate the tree with register assignments by setting GetRegNum() or gtRegPair (for longs)
// on the tree node
// - track globally-live var locations
// - add resolution points at split/merge/critical points as needed
// Need to use the same traversal order as the one that assigns the location numbers.
// Dummy RefPositions have been added at any split, join or critical edge, at the
// point where resolution may be required. These are located:
// - for a split, at the top of the non-adjacent block
// - for a join, at the bottom of the non-adjacent joining block
// - for a critical edge, at the top of the target block of each critical
// edge.
// Note that a target block may have multiple incoming critical or split edges
//
// These RefPositions record the expected location of the Interval at that point.
// At each branch, we identify the location of each liveOut interval, and check
// against the RefPositions at the target.
BasicBlock* block;
LsraLocation currentLocation = MinLocation;
// Clear register assignments - these will be reestablished as lclVar defs (including RefTypeParamDefs)
// are encountered.
if (enregisterLocalVars)
{
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
assignedInterval->assignedReg = nullptr;
assignedInterval->physReg = REG_NA;
}
physRegRecord->assignedInterval = nullptr;
physRegRecord->recentRefPosition = nullptr;
}
// Clear "recentRefPosition" for lclVar intervals
for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
if (localVarIntervals[varIndex] != nullptr)
{
localVarIntervals[varIndex]->recentRefPosition = nullptr;
localVarIntervals[varIndex]->isActive = false;
}
else
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
}
}
}
// handle incoming arguments and special temps
RefPositionIterator refPosIterator = refPositions.begin();
RefPosition* currentRefPosition = &refPosIterator;
if (enregisterLocalVars)
{
VarToRegMap entryVarToRegMap = inVarToRegMaps[compiler->fgFirstBB->bbNum];
for (; refPosIterator != refPositions.end() &&
(currentRefPosition->refType == RefTypeParamDef || currentRefPosition->refType == RefTypeZeroInit);
++refPosIterator, currentRefPosition = &refPosIterator)
{
Interval* interval = currentRefPosition->getInterval();
assert(interval != nullptr && interval->isLocalVar);
resolveLocalRef(nullptr, nullptr, currentRefPosition);
regNumber reg = REG_STK;
int varIndex = interval->getVarIndex(compiler);
if (!currentRefPosition->spillAfter && currentRefPosition->registerAssignment != RBM_NONE)
{
reg = currentRefPosition->assignedReg();
}
else
{
reg = REG_STK;
interval->isActive = false;
}
setVarReg(entryVarToRegMap, varIndex, reg);
}
}
else
{
assert(refPosIterator == refPositions.end() ||
(refPosIterator->refType != RefTypeParamDef && refPosIterator->refType != RefTypeZeroInit));
}
// write back assignments
for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
assert(curBBNum == block->bbNum);
if (enregisterLocalVars)
{
// Record the var locations at the start of this block.
// (If it's fgFirstBB, we've already done that above, see entryVarToRegMap)
curBBStartLocation = currentRefPosition->nodeLocation;
if (block != compiler->fgFirstBB)
{
processBlockStartLocations(block);
}
// Handle the DummyDefs, updating the incoming var location.
for (; refPosIterator != refPositions.end() && currentRefPosition->refType == RefTypeDummyDef;
++refPosIterator, currentRefPosition = &refPosIterator)
{
assert(currentRefPosition->isIntervalRef());
// Don't mark dummy defs as reload
currentRefPosition->reload = false;
resolveLocalRef(nullptr, nullptr, currentRefPosition);
regNumber reg;
if (currentRefPosition->registerAssignment != RBM_NONE)
{
reg = currentRefPosition->assignedReg();
}
else
{
reg = REG_STK;
currentRefPosition->getInterval()->isActive = false;
}
setInVarRegForBB(curBBNum, currentRefPosition->getInterval()->varNum, reg);
}
}
// The next RefPosition should be for the block. Move past it.
assert(refPosIterator != refPositions.end());
assert(currentRefPosition->refType == RefTypeBB);
++refPosIterator;
currentRefPosition = &refPosIterator;
// Handle the RefPositions for the block
for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB &&
currentRefPosition->refType != RefTypeDummyDef;
++refPosIterator, currentRefPosition = &refPosIterator)
{
currentLocation = currentRefPosition->nodeLocation;
// Ensure that the spill & copy info is valid.
// First, if it's reload, it must not be copyReg or moveReg
assert(!currentRefPosition->reload || (!currentRefPosition->copyReg && !currentRefPosition->moveReg));
// If it's copyReg it must not be moveReg, and vice-versa
assert(!currentRefPosition->copyReg || !currentRefPosition->moveReg);
switch (currentRefPosition->refType)
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
case RefTypeUpperVectorSave:
case RefTypeUpperVectorRestore:
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
case RefTypeUse:
case RefTypeDef:
// These are the ones we're interested in
break;
case RefTypeKill:
case RefTypeFixedReg:
// These require no handling at resolution time
assert(currentRefPosition->referent != nullptr);
currentRefPosition->referent->recentRefPosition = currentRefPosition;
continue;
case RefTypeExpUse:
// Ignore the ExpUse cases - a RefTypeExpUse would only exist if the
// variable is dead at the entry to the next block. So we'll mark
// it as in its current location and resolution will take care of any
// mismatch.
assert(getNextBlock() == nullptr ||
!VarSetOps::IsMember(compiler, getNextBlock()->bbLiveIn,
currentRefPosition->getInterval()->getVarIndex(compiler)));
currentRefPosition->referent->recentRefPosition = currentRefPosition;
continue;
case RefTypeKillGCRefs:
// No action to take at resolution time, and no interval to update recentRefPosition for.
continue;
case RefTypeDummyDef:
case RefTypeParamDef:
case RefTypeZeroInit:
// Should have handled all of these already
default:
unreached();
break;
}
updateMaxSpill(currentRefPosition);
GenTree* treeNode = currentRefPosition->treeNode;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (currentRefPosition->refType == RefTypeUpperVectorSave)
{
// The treeNode is a call or something that might become one.
noway_assert(treeNode != nullptr);
// If the associated interval is an UpperVector, this must be a RefPosition for a LargeVectorType
// LocalVar.
// Otherwise, this is a non-lclVar interval that has been spilled, and we don't need to do anything.
Interval* interval = currentRefPosition->getInterval();
if (interval->isUpperVector)
{
Interval* localVarInterval = interval->relatedInterval;
if ((localVarInterval->physReg != REG_NA) && !localVarInterval->isPartiallySpilled)
{
// If the localVar is in a register, it must be in a register that is not trashed by
// the current node (otherwise it would have already been spilled).
assert((genRegMask(localVarInterval->physReg) & getKillSetForNode(treeNode)) == RBM_NONE);
// If we have allocated a register to spill it to, we will use that; otherwise, we will spill it
// to the stack. We can use as a temp register any non-arg caller-save register.
currentRefPosition->referent->recentRefPosition = currentRefPosition;
insertUpperVectorSave(treeNode, currentRefPosition, currentRefPosition->getInterval(), block);
localVarInterval->isPartiallySpilled = true;
}
}
else
{
// This is a non-lclVar interval that must have been spilled.
assert(!currentRefPosition->getInterval()->isLocalVar);
assert(currentRefPosition->getInterval()->firstRefPosition->spillAfter);
}
continue;
}
else if (currentRefPosition->refType == RefTypeUpperVectorRestore)
{
// Since we don't do partial restores of tree temp intervals, this must be an upperVector.
Interval* interval = currentRefPosition->getInterval();
Interval* localVarInterval = interval->relatedInterval;
assert(interval->isUpperVector && (localVarInterval != nullptr));
if (localVarInterval->physReg != REG_NA)
{
assert(localVarInterval->isPartiallySpilled);
assert((localVarInterval->assignedReg != nullptr) &&
(localVarInterval->assignedReg->regNum == localVarInterval->physReg) &&
(localVarInterval->assignedReg->assignedInterval == localVarInterval));
insertUpperVectorRestore(treeNode, currentRefPosition, interval, block);
}
localVarInterval->isPartiallySpilled = false;
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Most uses won't actually need to be recorded (they're on the def).
// In those cases, treeNode will be nullptr.
if (treeNode == nullptr)
{
// This is either a use, a dead def, or a field of a struct
Interval* interval = currentRefPosition->getInterval();
assert(currentRefPosition->refType == RefTypeUse ||
currentRefPosition->registerAssignment == RBM_NONE || interval->isStructField ||
interval->IsUpperVector());
// TODO-Review: Need to handle the case where any of the struct fields
// are reloaded/spilled at this use
assert(!interval->isStructField ||
(currentRefPosition->reload == false && currentRefPosition->spillAfter == false));
if (interval->isLocalVar && !interval->isStructField)
{
LclVarDsc* varDsc = interval->getLocalVar(compiler);
// This must be a dead definition. We need to mark the lclVar
// so that it's not considered a candidate for lvRegister, as
// this dead def will have to go to the stack.
assert(currentRefPosition->refType == RefTypeDef);
varDsc->SetRegNum(REG_STK);
}
continue;
}
assert(currentRefPosition->isIntervalRef());
if (currentRefPosition->getInterval()->isInternal)
{
treeNode->gtRsvdRegs |= currentRefPosition->registerAssignment;
}
else
{
writeRegisters(currentRefPosition, treeNode);
if (treeNode->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR) && currentRefPosition->getInterval()->isLocalVar)
{
resolveLocalRef(block, treeNode->AsLclVar(), currentRefPosition);
}
// Mark spill locations on temps
// (local vars are handled in resolveLocalRef, above)
// Note that the tree node will be changed from GTF_SPILL to GTF_SPILLED
// in codegen, taking care of the "reload" case for temps
else if (currentRefPosition->spillAfter || (currentRefPosition->nextRefPosition != nullptr &&
currentRefPosition->nextRefPosition->moveReg))
{
if (treeNode != nullptr)
{
if (currentRefPosition->spillAfter)
{
treeNode->gtFlags |= GTF_SPILL;
// If this is a constant interval that is reusing a pre-existing value, we actually need
// to generate the value at this point in order to spill it.
if (treeNode->IsReuseRegVal())
{
treeNode->ResetReuseRegVal();
}
// In case of multi-reg node, also set spill flag on the
// register specified by multi-reg index of current RefPosition.
// Note that the spill flag on treeNode indicates that one or
// more its allocated registers are in that state.
if (treeNode->IsMultiRegCall())
{
GenTreeCall* call = treeNode->AsCall();
call->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
#if FEATURE_ARG_SPLIT
else if (treeNode->OperIsPutArgSplit())
{
GenTreePutArgSplit* splitArg = treeNode->AsPutArgSplit();
splitArg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
#ifdef TARGET_ARM
else if (compFeatureArgSplit() && treeNode->OperIsMultiRegOp())
{
GenTreeMultiRegOp* multiReg = treeNode->AsMultiRegOp();
multiReg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
#endif // TARGET_ARM
#endif // FEATURE_ARG_SPLIT
}
// If the value is reloaded or moved to a different register, we need to insert
// a node to hold the register to which it should be reloaded
RefPosition* nextRefPosition = currentRefPosition->nextRefPosition;
noway_assert(nextRefPosition != nullptr);
if (INDEBUG(alwaysInsertReload() ||)
nextRefPosition->assignedReg() != currentRefPosition->assignedReg())
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Note that we asserted above that this is an Interval RefPosition.
Interval* currentInterval = currentRefPosition->getInterval();
if (!currentInterval->isUpperVector && nextRefPosition->refType == RefTypeUpperVectorSave)
{
// The currentRefPosition is a spill of a tree temp.
// These have no associated Restore, as we always spill if the vector is
// in a register when this is encountered.
// The nextRefPosition we're interested in (where we may need to insert a
// reload or flag as GTF_NOREG_AT_USE) is the subsequent RefPosition.
assert(!currentInterval->isLocalVar);
nextRefPosition = nextRefPosition->nextRefPosition;
assert(nextRefPosition->refType != RefTypeUpperVectorSave);
}
// UpperVector intervals may have unique assignments at each reference.
if (!currentInterval->isUpperVector)
#endif
{
if (nextRefPosition->assignedReg() != REG_NA)
{
insertCopyOrReload(block, treeNode, currentRefPosition->getMultiRegIdx(),
nextRefPosition);
}
else
{
assert(nextRefPosition->RegOptional());
// In case of tree temps, if def is spilled and use didn't
// get a register, set a flag on tree node to be treated as
// contained at the point of its use.
if (currentRefPosition->spillAfter && currentRefPosition->refType == RefTypeDef &&
nextRefPosition->refType == RefTypeUse)
{
assert(nextRefPosition->treeNode == nullptr);
treeNode->gtFlags |= GTF_NOREG_AT_USE;
}
}
}
}
}
// We should never have to "spill after" a temp use, since
// they're single use
else
{
unreached();
}
}
}
}
if (enregisterLocalVars)
{
processBlockEndLocations(block);
}
}
if (enregisterLocalVars)
{
#ifdef DEBUG
if (VERBOSE)
{
printf("-----------------------\n");
printf("RESOLVING BB BOUNDARIES\n");
printf("-----------------------\n");
printf("Resolution Candidates: ");
dumpConvertedVarSet(compiler, resolutionCandidateVars);
printf("\n");
printf("Has %sCritical Edges\n\n", hasCriticalEdges ? "" : "No ");
printf("Prior to Resolution\n");
for (BasicBlock* const block : compiler->Blocks())
{
printf("\n" FMT_BB, block->bbNum);
if (block->hasEHBoundaryIn())
{
JITDUMP(" EH flow in");
}
if (block->hasEHBoundaryOut())
{
JITDUMP(" EH flow out");
}
printf("\nuse def in out\n");
dumpConvertedVarSet(compiler, block->bbVarUse);
printf("\n");
dumpConvertedVarSet(compiler, block->bbVarDef);
printf("\n");
dumpConvertedVarSet(compiler, block->bbLiveIn);
printf("\n");
dumpConvertedVarSet(compiler, block->bbLiveOut);
printf("\n");
dumpInVarToRegMap(block);
dumpOutVarToRegMap(block);
}
printf("\n\n");
}
#endif // DEBUG
resolveEdges();
// Verify register assignments on variables
unsigned lclNum;
LclVarDsc* varDsc;
for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++)
{
if (!isCandidateVar(varDsc))
{
varDsc->SetRegNum(REG_STK);
}
else
{
Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex);
// Determine initial position for parameters
if (varDsc->lvIsParam)
{
regMaskTP initialRegMask = interval->firstRefPosition->registerAssignment;
regNumber initialReg = (initialRegMask == RBM_NONE || interval->firstRefPosition->spillAfter)
? REG_STK
: genRegNumFromMask(initialRegMask);
#ifdef TARGET_ARM
if (varTypeIsMultiReg(varDsc))
{
// TODO-ARM-NYI: Map the hi/lo intervals back to lvRegNum and GetOtherReg() (these should NYI
// before this)
assert(!"Multi-reg types not yet supported");
}
else
#endif // TARGET_ARM
{
varDsc->SetArgInitReg(initialReg);
JITDUMP(" Set V%02u argument initial register to %s\n", lclNum, getRegName(initialReg));
}
// Stack args that are part of dependently-promoted structs should never be register candidates (see
// LinearScan::isRegCandidate).
assert(varDsc->lvIsRegArg || !compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc));
}
// If lvRegNum is REG_STK, that means that either no register
// was assigned, or (more likely) that the same register was not
// used for all references. In that case, codegen gets the register
// from the tree node.
if (varDsc->GetRegNum() == REG_STK || interval->isSpilled || interval->isSplit)
{
// For codegen purposes, we'll set lvRegNum to whatever register
// it's currently in as we go.
// However, we never mark an interval as lvRegister if it has either been spilled
// or split.
varDsc->lvRegister = false;
// Skip any dead defs or exposed uses
// (first use exposed will only occur when there is no explicit initialization)
RefPosition* firstRefPosition = interval->firstRefPosition;
while ((firstRefPosition != nullptr) && (firstRefPosition->refType == RefTypeExpUse))
{
firstRefPosition = firstRefPosition->nextRefPosition;
}
if (firstRefPosition == nullptr)
{
// Dead interval
varDsc->lvLRACandidate = false;
if (varDsc->lvRefCnt() == 0)
{
varDsc->lvOnFrame = false;
}
else
{
// We may encounter cases where a lclVar actually has no references, but
// a non-zero refCnt. For safety (in case this is some "hidden" lclVar that we're
// not correctly recognizing), we'll mark those as needing a stack location.
// TODO-Cleanup: Make this an assert if/when we correct the refCnt
// updating.
varDsc->lvOnFrame = true;
}
}
else
{
// If the interval was not spilled, it doesn't need a stack location.
if (!interval->isSpilled)
{
varDsc->lvOnFrame = false;
}
if (firstRefPosition->registerAssignment == RBM_NONE || firstRefPosition->spillAfter)
{
// Either this RefPosition is spilled, or regOptional or it is not a "real" def or use
assert(
firstRefPosition->spillAfter || firstRefPosition->RegOptional() ||
(firstRefPosition->refType != RefTypeDef && firstRefPosition->refType != RefTypeUse));
varDsc->SetRegNum(REG_STK);
}
else
{
varDsc->SetRegNum(firstRefPosition->assignedReg());
}
}
}
else
{
{
varDsc->lvRegister = true;
varDsc->lvOnFrame = false;
}
#ifdef DEBUG
regMaskTP registerAssignment = genRegMask(varDsc->GetRegNum());
assert(!interval->isSpilled && !interval->isSplit);
RefPosition* refPosition = interval->firstRefPosition;
assert(refPosition != nullptr);
while (refPosition != nullptr)
{
// All RefPositions must match, except for dead definitions,
// copyReg/moveReg and RefTypeExpUse positions
if (refPosition->registerAssignment != RBM_NONE && !refPosition->copyReg &&
!refPosition->moveReg && refPosition->refType != RefTypeExpUse)
{
assert(refPosition->registerAssignment == registerAssignment);
}
refPosition = refPosition->nextRefPosition;
}
#endif // DEBUG
}
}
}
}
#ifdef DEBUG
if (VERBOSE)
{
printf("Trees after linear scan register allocator (LSRA)\n");
compiler->fgDispBasicBlocks(true);
}
verifyFinalAllocation();
#endif // DEBUG
compiler->raMarkStkVars();
recordMaxSpill();
// TODO-CQ: Review this comment and address as needed.
// Change all unused promoted non-argument struct locals to a non-GC type (in this case TYP_INT)
// so that the gc tracking logic and lvMustInit logic will ignore them.
// Extract the code that does this from raAssignVars, and call it here.
// PRECONDITIONS: Ensure that lvPromoted is set on promoted structs, if and
// only if it is promoted on all paths.
// Call might be something like:
// compiler->BashUnusedStructLocals();
}
//
//------------------------------------------------------------------------
// insertMove: Insert a move of a lclVar with the given lclNum into the given block.
//
// Arguments:
// block - the BasicBlock into which the move will be inserted.
// insertionPoint - the instruction before which to insert the move
// lclNum - the lclNum of the var to be moved
// fromReg - the register from which the var is moving
// toReg - the register to which the var is moving
//
// Return Value:
// None.
//
// Notes:
// If insertionPoint is non-NULL, insert before that instruction;
// otherwise, insert "near" the end (prior to the branch, if any).
// If fromReg or toReg is REG_STK, then move from/to memory, respectively.
void LinearScan::insertMove(
BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber fromReg, regNumber toReg)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
// the lclVar must be a register candidate
assert(isRegCandidate(varDsc));
// One or both MUST be a register
assert(fromReg != REG_STK || toReg != REG_STK);
// They must not be the same register.
assert(fromReg != toReg);
// This var can't be marked lvRegister now
varDsc->SetRegNum(REG_STK);
GenTree* src = compiler->gtNewLclvNode(lclNum, varDsc->TypeGet());
SetLsraAdded(src);
// There are three cases we need to handle:
// - We are loading a lclVar from the stack.
// - We are storing a lclVar to the stack.
// - We are copying a lclVar between registers.
//
// In the first and second cases, the lclVar node will be marked with GTF_SPILLED and GTF_SPILL, respectively.
// It is up to the code generator to ensure that any necessary normalization is done when loading or storing the
// lclVar's value.
//
// In the third case, we generate GT_COPY(GT_LCL_VAR) and type each node with the normalized type of the lclVar.
// This is safe because a lclVar is always normalized once it is in a register.
GenTree* dst = src;
if (fromReg == REG_STK)
{
src->gtFlags |= GTF_SPILLED;
src->SetRegNum(toReg);
}
else if (toReg == REG_STK)
{
src->gtFlags |= GTF_SPILL;
src->SetRegNum(fromReg);
}
else
{
var_types movType = varDsc->GetRegisterType();
src->gtType = movType;
dst = new (compiler, GT_COPY) GenTreeCopyOrReload(GT_COPY, movType, src);
// This is the new home of the lclVar - indicate that by clearing the GTF_VAR_DEATH flag.
// Note that if src is itself a lastUse, this will have no effect.
dst->gtFlags &= ~(GTF_VAR_DEATH);
src->SetRegNum(fromReg);
dst->SetRegNum(toReg);
SetLsraAdded(dst);
}
dst->SetUnusedValue();
LIR::Range treeRange = LIR::SeqTree(compiler, dst);
LIR::Range& blockRange = LIR::AsRange(block);
if (insertionPoint != nullptr)
{
blockRange.InsertBefore(insertionPoint, std::move(treeRange));
}
else
{
// Put the copy at the bottom
GenTree* lastNode = blockRange.LastNode();
if (block->KindIs(BBJ_COND, BBJ_SWITCH))
{
noway_assert(!blockRange.IsEmpty());
GenTree* branch = lastNode;
assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE ||
branch->OperGet() == GT_SWITCH);
blockRange.InsertBefore(branch, std::move(treeRange));
}
else
{
// These block kinds don't have a branch at the end.
assert((lastNode == nullptr) || (!lastNode->OperIsConditionalJump() &&
!lastNode->OperIs(GT_SWITCH_TABLE, GT_SWITCH, GT_RETURN, GT_RETFILT)));
blockRange.InsertAtEnd(std::move(treeRange));
}
}
}
void LinearScan::insertSwap(
BasicBlock* block, GenTree* insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2)
{
#ifdef DEBUG
if (VERBOSE)
{
const char* insertionPointString = "top";
if (insertionPoint == nullptr)
{
insertionPointString = "bottom";
}
printf(" " FMT_BB " %s: swap V%02u in %s with V%02u in %s\n", block->bbNum, insertionPointString, lclNum1,
getRegName(reg1), lclNum2, getRegName(reg2));
}
#endif // DEBUG
LclVarDsc* varDsc1 = compiler->lvaGetDesc(lclNum1);
LclVarDsc* varDsc2 = compiler->lvaGetDesc(lclNum2);
assert(reg1 != REG_STK && reg1 != REG_NA && reg2 != REG_STK && reg2 != REG_NA);
GenTree* lcl1 = compiler->gtNewLclvNode(lclNum1, varDsc1->TypeGet());
lcl1->SetRegNum(reg1);
SetLsraAdded(lcl1);
GenTree* lcl2 = compiler->gtNewLclvNode(lclNum2, varDsc2->TypeGet());
lcl2->SetRegNum(reg2);
SetLsraAdded(lcl2);
GenTree* swap = compiler->gtNewOperNode(GT_SWAP, TYP_VOID, lcl1, lcl2);
swap->SetRegNum(REG_NA);
SetLsraAdded(swap);
lcl1->gtNext = lcl2;
lcl2->gtPrev = lcl1;
lcl2->gtNext = swap;
swap->gtPrev = lcl2;
LIR::Range swapRange = LIR::SeqTree(compiler, swap);
LIR::Range& blockRange = LIR::AsRange(block);
if (insertionPoint != nullptr)
{
blockRange.InsertBefore(insertionPoint, std::move(swapRange));
}
else
{
// Put the copy at the bottom
// If there's a branch, make an embedded statement that executes just prior to the branch
if (block->KindIs(BBJ_COND, BBJ_SWITCH))
{
noway_assert(!blockRange.IsEmpty());
GenTree* branch = blockRange.LastNode();
assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE ||
branch->OperGet() == GT_SWITCH);
blockRange.InsertBefore(branch, std::move(swapRange));
}
else
{
assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS));
blockRange.InsertAtEnd(std::move(swapRange));
}
}
}
//------------------------------------------------------------------------
// getTempRegForResolution: Get a free register to use for resolution code.
//
// Arguments:
// fromBlock - The "from" block on the edge being resolved.
// toBlock - The "to"block on the edge
// type - the type of register required
//
// Return Value:
// Returns a register that is free on the given edge, or REG_NA if none is available.
//
// Notes:
// It is up to the caller to check the return value, and to determine whether a register is
// available, and to handle that case appropriately.
// It is also up to the caller to cache the return value, as this is not cheap to compute.
regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type)
{
// TODO-Throughput: This would be much more efficient if we add RegToVarMaps instead of VarToRegMaps
// and they would be more space-efficient as well.
VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum);
VarToRegMap toVarToRegMap = getInVarToRegMap(toBlock->bbNum);
#ifdef TARGET_ARM
regMaskTP freeRegs;
if (type == TYP_DOUBLE)
{
// We have to consider all float registers for TYP_DOUBLE
freeRegs = allRegs(TYP_FLOAT);
}
else
{
freeRegs = allRegs(type);
}
#else // !TARGET_ARM
regMaskTP freeRegs = allRegs(type);
#endif // !TARGET_ARM
#ifdef DEBUG
if (getStressLimitRegs() == LSRA_LIMIT_SMALL_SET)
{
return REG_NA;
}
#endif // DEBUG
INDEBUG(freeRegs = stressLimitRegs(nullptr, freeRegs));
// We are only interested in the variables that are live-in to the "to" block.
VarSetOps::Iter iter(compiler, toBlock->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex) && freeRegs != RBM_NONE)
{
regNumber fromReg = getVarReg(fromVarToRegMap, varIndex);
regNumber toReg = getVarReg(toVarToRegMap, varIndex);
assert(fromReg != REG_NA && toReg != REG_NA);
if (fromReg != REG_STK)
{
freeRegs &= ~genRegMask(fromReg, getIntervalForLocalVar(varIndex)->registerType);
}
if (toReg != REG_STK)
{
freeRegs &= ~genRegMask(toReg, getIntervalForLocalVar(varIndex)->registerType);
}
}
#ifdef TARGET_ARM
if (type == TYP_DOUBLE)
{
// Exclude any doubles for which the odd half isn't in freeRegs.
freeRegs = freeRegs & ((freeRegs << 1) & RBM_ALLDOUBLE);
}
#endif
if (freeRegs == RBM_NONE)
{
return REG_NA;
}
else
{
regNumber tempReg = genRegNumFromMask(genFindLowestBit(freeRegs));
return tempReg;
}
}
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// addResolutionForDouble: Add resolution move(s) for TYP_DOUBLE interval
// and update location.
//
// Arguments:
// block - the BasicBlock into which the move will be inserted.
// insertionPoint - the instruction before which to insert the move
// sourceIntervals - maintains sourceIntervals[reg] which each 'reg' is associated with
// location - maintains location[reg] which is the location of the var that was originally in 'reg'.
// toReg - the register to which the var is moving
// fromReg - the register from which the var is moving
// resolveType - the type of resolution to be performed
//
// Return Value:
// None.
//
// Notes:
// It inserts at least one move and updates incoming parameter 'location'.
//
void LinearScan::addResolutionForDouble(BasicBlock* block,
GenTree* insertionPoint,
Interval** sourceIntervals,
regNumberSmall* location,
regNumber toReg,
regNumber fromReg,
ResolveType resolveType)
{
regNumber secondHalfTargetReg = REG_NEXT(fromReg);
Interval* intervalToBeMoved1 = sourceIntervals[fromReg];
Interval* intervalToBeMoved2 = sourceIntervals[secondHalfTargetReg];
assert(!(intervalToBeMoved1 == nullptr && intervalToBeMoved2 == nullptr));
if (intervalToBeMoved1 != nullptr)
{
if (intervalToBeMoved1->registerType == TYP_DOUBLE)
{
// TYP_DOUBLE interval occupies a double register, i.e. two float registers.
assert(intervalToBeMoved2 == nullptr);
assert(genIsValidDoubleReg(toReg));
}
else
{
// TYP_FLOAT interval occupies 1st half of double register, i.e. 1st float register
assert(genIsValidFloatReg(toReg));
}
addResolution(block, insertionPoint, intervalToBeMoved1, toReg, fromReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[fromReg] = (regNumberSmall)toReg;
}
if (intervalToBeMoved2 != nullptr)
{
// TYP_FLOAT interval occupies 2nd half of double register.
assert(intervalToBeMoved2->registerType == TYP_FLOAT);
regNumber secondHalfTempReg = REG_NEXT(toReg);
addResolution(block, insertionPoint, intervalToBeMoved2, secondHalfTempReg, secondHalfTargetReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[secondHalfTargetReg] = (regNumberSmall)secondHalfTempReg;
}
return;
}
#endif // TARGET_ARM
//------------------------------------------------------------------------
// addResolution: Add a resolution move of the given interval
//
// Arguments:
// block - the BasicBlock into which the move will be inserted.
// insertionPoint - the instruction before which to insert the move
// interval - the interval of the var to be moved
// toReg - the register to which the var is moving
// fromReg - the register from which the var is moving
//
// Return Value:
// None.
//
// Notes:
// For joins, we insert at the bottom (indicated by an insertionPoint
// of nullptr), while for splits we insert at the top.
// This is because for joins 'block' is a pred of the join, while for splits it is a succ.
// For critical edges, this function may be called twice - once to move from
// the source (fromReg), if any, to the stack, in which case toReg will be
// REG_STK, and we insert at the bottom (leave insertionPoint as nullptr).
// The next time, we want to move from the stack to the destination (toReg),
// in which case fromReg will be REG_STK, and we insert at the top.
void LinearScan::addResolution(
BasicBlock* block, GenTree* insertionPoint, Interval* interval, regNumber toReg, regNumber fromReg)
{
#ifdef DEBUG
const char* insertionPointString;
if (insertionPoint == nullptr)
{
// We can't add resolution to a register at the bottom of a block that has an EHBoundaryOut,
// except in the case of the "EH Dummy" resolution from the stack.
assert((block->bbNum > bbNumMaxBeforeResolution) || (fromReg == REG_STK) ||
!blockInfo[block->bbNum].hasEHBoundaryOut);
insertionPointString = "bottom";
}
else
{
// We can't add resolution at the top of a block that has an EHBoundaryIn,
// except in the case of the "EH Dummy" resolution to the stack.
assert((block->bbNum > bbNumMaxBeforeResolution) || (toReg == REG_STK) ||
!blockInfo[block->bbNum].hasEHBoundaryIn);
insertionPointString = "top";
}
// We should never add resolution move inside BBCallAlwaysPairTail.
noway_assert(!block->isBBCallAlwaysPairTail());
#endif // DEBUG
JITDUMP(" " FMT_BB " %s: move V%02u from ", block->bbNum, insertionPointString, interval->varNum);
JITDUMP("%s to %s", getRegName(fromReg), getRegName(toReg));
insertMove(block, insertionPoint, interval->varNum, fromReg, toReg);
if (fromReg == REG_STK || toReg == REG_STK)
{
assert(interval->isSpilled);
}
else
{
// We should have already marked this as spilled or split.
assert((interval->isSpilled) || (interval->isSplit));
}
INTRACK_STATS(updateLsraStat(STAT_RESOLUTION_MOV, block->bbNum));
}
//------------------------------------------------------------------------
// handleOutgoingCriticalEdges: Performs the necessary resolution on all critical edges that feed out of 'block'
//
// Arguments:
// block - the block with outgoing critical edges.
//
// Return Value:
// None..
//
// Notes:
// For all outgoing critical edges (i.e. any successor of this block which is
// a join edge), if there are any conflicts, split the edge by adding a new block,
// and generate the resolution code into that block.
void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
{
VARSET_TP outResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveOut, resolutionCandidateVars));
if (VarSetOps::IsEmpty(compiler, outResolutionSet))
{
return;
}
VARSET_TP sameResolutionSet(VarSetOps::MakeEmpty(compiler));
VARSET_TP diffResolutionSet(VarSetOps::MakeEmpty(compiler));
// Get the outVarToRegMap for this block
VarToRegMap outVarToRegMap = getOutVarToRegMap(block->bbNum);
unsigned succCount = block->NumSucc(compiler);
assert(succCount > 1);
// First, determine the live regs at the end of this block so that we know what regs are
// available to copy into.
// Note that for this purpose we use the full live-out set, because we must ensure that
// even the registers that remain the same across the edge are preserved correctly.
regMaskTP liveOutRegs = RBM_NONE;
VarSetOps::Iter liveOutIter(compiler, block->bbLiveOut);
unsigned liveOutVarIndex = 0;
while (liveOutIter.NextElem(&liveOutVarIndex))
{
regNumber fromReg = getVarReg(outVarToRegMap, liveOutVarIndex);
if (fromReg != REG_STK)
{
regMaskTP fromRegMask = genRegMask(fromReg, getIntervalForLocalVar(liveOutVarIndex)->registerType);
liveOutRegs |= fromRegMask;
}
}
// Next, if this blocks ends with a switch table, or for Arm64, ends with JCMP instruction,
// make sure to not copy into the registers that are consumed at the end of this block.
//
// Note: Only switches and JCMP (for Arm4) have input regs (and so can be fed by copies), so those
// are the only block-ending branches that need special handling.
regMaskTP consumedRegs = RBM_NONE;
if (block->bbJumpKind == BBJ_SWITCH)
{
// At this point, Lowering has transformed any non-switch-table blocks into
// cascading ifs.
GenTree* switchTable = LIR::AsRange(block).LastNode();
assert(switchTable != nullptr && switchTable->OperGet() == GT_SWITCH_TABLE);
consumedRegs = switchTable->gtRsvdRegs;
GenTree* op1 = switchTable->gtGetOp1();
GenTree* op2 = switchTable->gtGetOp2();
noway_assert(op1 != nullptr && op2 != nullptr);
assert(op1->GetRegNum() != REG_NA && op2->GetRegNum() != REG_NA);
// No floating point values, so no need to worry about the register type
// (i.e. for ARM32, where we used the genRegMask overload with a type).
assert(varTypeIsIntegralOrI(op1) && varTypeIsIntegralOrI(op2));
consumedRegs |= genRegMask(op1->GetRegNum());
consumedRegs |= genRegMask(op2->GetRegNum());
// Special handling for GT_COPY to not resolve into the source
// of switch's operand.
if (op1->OperIs(GT_COPY))
{
GenTree* srcOp1 = op1->gtGetOp1();
consumedRegs |= genRegMask(srcOp1->GetRegNum());
}
}
#ifdef TARGET_ARM64
// Next, if this blocks ends with a JCMP, we have to make sure:
// 1. Not to copy into the register that JCMP uses
// e.g. JCMP w21, BRANCH
// 2. Not to copy into the source of JCMP's operand before it is consumed
// e.g. Should not use w0 since it will contain wrong value after resolution
// call METHOD
// ; mov w0, w19 <-- should not resolve in w0 here.
// mov w21, w0
// JCMP w21, BRANCH
// 3. Not to modify the local variable it must consume
// Note: GT_COPY has special handling in codegen and its generation is merged with the
// node that consumes its result. So both, the input and output regs of GT_COPY must be
// excluded from the set available for resolution.
LclVarDsc* jcmpLocalVarDsc = nullptr;
if (block->bbJumpKind == BBJ_COND)
{
GenTree* lastNode = LIR::AsRange(block).LastNode();
if (lastNode->OperIs(GT_JCMP))
{
GenTree* op1 = lastNode->gtGetOp1();
consumedRegs |= genRegMask(op1->GetRegNum());
if (op1->OperIs(GT_COPY))
{
GenTree* srcOp1 = op1->gtGetOp1();
consumedRegs |= genRegMask(srcOp1->GetRegNum());
}
if (op1->IsLocal())
{
GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
jcmpLocalVarDsc = &compiler->lvaTable[lcl->GetLclNum()];
}
}
}
#endif
VarToRegMap sameVarToRegMap = sharedCriticalVarToRegMap;
regMaskTP sameWriteRegs = RBM_NONE;
regMaskTP diffReadRegs = RBM_NONE;
// For each var that may require resolution, classify them as:
// - in the same register at the end of this block and at each target (no resolution needed)
// - in different registers at different targets (resolve separately):
// diffResolutionSet
// - in the same register at each target at which it's live, but different from the end of
// this block. We may be able to resolve these as if it is "join", but only if they do not
// write to any registers that are read by those in the diffResolutionSet:
// sameResolutionSet
VarSetOps::Iter outResolutionSetIter(compiler, outResolutionSet);
unsigned outResolutionSetVarIndex = 0;
while (outResolutionSetIter.NextElem(&outResolutionSetVarIndex))
{
regNumber fromReg = getVarReg(outVarToRegMap, outResolutionSetVarIndex);
bool maybeSameLivePaths = false;
bool liveOnlyAtSplitEdge = true;
regNumber sameToReg = REG_NA;
for (unsigned succIndex = 0; succIndex < succCount; succIndex++)
{
BasicBlock* succBlock = block->GetSucc(succIndex, compiler);
if (!VarSetOps::IsMember(compiler, succBlock->bbLiveIn, outResolutionSetVarIndex))
{
maybeSameLivePaths = true;
continue;
}
else if (liveOnlyAtSplitEdge)
{
// Is the var live only at those target blocks which are connected by a split edge to this block
liveOnlyAtSplitEdge = ((succBlock->bbPreds->flNext == nullptr) && (succBlock != compiler->fgFirstBB));
}
regNumber toReg = getVarReg(getInVarToRegMap(succBlock->bbNum), outResolutionSetVarIndex);
if (sameToReg == REG_NA)
{
sameToReg = toReg;
continue;
}
if (toReg == sameToReg)
{
continue;
}
sameToReg = REG_NA;
break;
}
// Check for the cases where we can't write to a register.
// We only need to check for these cases if sameToReg is an actual register (not REG_STK).
if (sameToReg != REG_NA && sameToReg != REG_STK)
{
// If there's a path on which this var isn't live, it may use the original value in sameToReg.
// In this case, sameToReg will be in the liveOutRegs of this block.
// Similarly, if sameToReg is in sameWriteRegs, it has already been used (i.e. for a lclVar that's
// live only at another target), and we can't copy another lclVar into that reg in this block.
regMaskTP sameToRegMask =
genRegMask(sameToReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType);
if (maybeSameLivePaths &&
(((sameToRegMask & liveOutRegs) != RBM_NONE) || ((sameToRegMask & sameWriteRegs) != RBM_NONE)))
{
sameToReg = REG_NA;
}
// If this register is busy because it is used by a switch table at the end of the block
// (or for Arm64, it is consumed by JCMP), we can't do the copy in this block since we can't
// insert it after the switch (or for Arm64, can't insert and overwrite the operand/source
// of operand of JCMP).
if ((sameToRegMask & consumedRegs) != RBM_NONE)
{
sameToReg = REG_NA;
}
#ifdef TARGET_ARM64
if (jcmpLocalVarDsc && (jcmpLocalVarDsc->lvVarIndex == outResolutionSetVarIndex))
{
sameToReg = REG_NA;
}
#endif
// If the var is live only at those blocks connected by a split edge and not live-in at some of the
// target blocks, we will resolve it the same way as if it were in diffResolutionSet and resolution
// will be deferred to the handling of split edges, which means copy will only be at those target(s).
//
// Another way to achieve similar resolution for vars live only at split edges is by removing them
// from consideration up-front but it requires that we traverse those edges anyway to account for
// the registers that must not be overwritten.
if (liveOnlyAtSplitEdge && maybeSameLivePaths)
{
sameToReg = REG_NA;
}
}
if (sameToReg == REG_NA)
{
VarSetOps::AddElemD(compiler, diffResolutionSet, outResolutionSetVarIndex);
if (fromReg != REG_STK)
{
diffReadRegs |= genRegMask(fromReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType);
}
}
else if (sameToReg != fromReg)
{
VarSetOps::AddElemD(compiler, sameResolutionSet, outResolutionSetVarIndex);
setVarReg(sameVarToRegMap, outResolutionSetVarIndex, sameToReg);
if (sameToReg != REG_STK)
{
sameWriteRegs |= genRegMask(sameToReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType);
}
}
}
if (!VarSetOps::IsEmpty(compiler, sameResolutionSet))
{
if ((sameWriteRegs & diffReadRegs) != RBM_NONE)
{
// We cannot split the "same" and "diff" regs if the "same" set writes registers
// that must be read by the "diff" set. (Note that when these are done as a "batch"
// we carefully order them to ensure all the input regs are read before they are
// overwritten.)
VarSetOps::UnionD(compiler, diffResolutionSet, sameResolutionSet);
VarSetOps::ClearD(compiler, sameResolutionSet);
}
else
{
// For any vars in the sameResolutionSet, we can simply add the move at the end of "block".
resolveEdge(block, nullptr, ResolveSharedCritical, sameResolutionSet);
}
}
if (!VarSetOps::IsEmpty(compiler, diffResolutionSet))
{
for (unsigned succIndex = 0; succIndex < succCount; succIndex++)
{
BasicBlock* succBlock = block->GetSucc(succIndex, compiler);
// Any "diffResolutionSet" resolution for a block with no other predecessors will be handled later
// as split resolution.
if ((succBlock->bbPreds->flNext == nullptr) && (succBlock != compiler->fgFirstBB))
{
continue;
}
// Now collect the resolution set for just this edge, if any.
// Check only the vars in diffResolutionSet that are live-in to this successor.
VarToRegMap succInVarToRegMap = getInVarToRegMap(succBlock->bbNum);
VARSET_TP edgeResolutionSet(VarSetOps::Intersection(compiler, diffResolutionSet, succBlock->bbLiveIn));
VarSetOps::Iter iter(compiler, edgeResolutionSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
regNumber fromReg = getVarReg(outVarToRegMap, varIndex);
regNumber toReg = getVarReg(succInVarToRegMap, varIndex);
if (fromReg == toReg)
{
VarSetOps::RemoveElemD(compiler, edgeResolutionSet, varIndex);
}
}
if (!VarSetOps::IsEmpty(compiler, edgeResolutionSet))
{
// For EH vars, we can always safely load them from the stack into the target for this block,
// so if we have only EH vars, we'll do that instead of splitting the edge.
if ((compiler->compHndBBtabCount > 0) && VarSetOps::IsSubset(compiler, edgeResolutionSet, exceptVars))
{
GenTree* insertionPoint = LIR::AsRange(succBlock).FirstNode();
VarSetOps::Iter edgeSetIter(compiler, edgeResolutionSet);
unsigned edgeVarIndex = 0;
while (edgeSetIter.NextElem(&edgeVarIndex))
{
regNumber toReg = getVarReg(succInVarToRegMap, edgeVarIndex);
setVarReg(succInVarToRegMap, edgeVarIndex, REG_STK);
if (toReg != REG_STK)
{
Interval* interval = getIntervalForLocalVar(edgeVarIndex);
assert(interval->isWriteThru);
addResolution(succBlock, insertionPoint, interval, toReg, REG_STK);
JITDUMP(" (EHvar)\n");
}
}
}
else
{
resolveEdge(block, succBlock, ResolveCritical, edgeResolutionSet);
}
}
}
}
}
//------------------------------------------------------------------------
// resolveEdges: Perform resolution across basic block edges
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Notes:
// Traverse the basic blocks.
// - If this block has a single predecessor that is not the immediately
// preceding block, perform any needed 'split' resolution at the beginning of this block
// - Otherwise if this block has critical incoming edges, handle them.
// - If this block has a single successor that has multiple predecesors, perform any needed
// 'join' resolution at the end of this block.
// Note that a block may have both 'split' or 'critical' incoming edge(s) and 'join' outgoing
// edges.
void LinearScan::resolveEdges()
{
JITDUMP("RESOLVING EDGES\n");
// The resolutionCandidateVars set was initialized with all the lclVars that are live-in to
// any block. We now intersect that set with any lclVars that ever spilled or split.
// If there are no candidates for resoultion, simply return.
VarSetOps::IntersectionD(compiler, resolutionCandidateVars, splitOrSpilledVars);
if (VarSetOps::IsEmpty(compiler, resolutionCandidateVars))
{
return;
}
// Handle all the critical edges first.
// We will try to avoid resolution across critical edges in cases where all the critical-edge
// targets of a block have the same home. We will then split the edges only for the
// remaining mismatches. We visit the out-edges, as that allows us to share the moves that are
// common among all the targets.
if (hasCriticalEdges)
{
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
// This is a new block added during resolution - we don't need to visit these now.
continue;
}
if (blockInfo[block->bbNum].hasCriticalOutEdge)
{
handleOutgoingCriticalEdges(block);
}
}
}
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
// This is a new block added during resolution - we don't need to visit these now.
continue;
}
unsigned succCount = block->NumSucc(compiler);
BasicBlock* uniquePredBlock = block->GetUniquePred(compiler);
// First, if this block has a single predecessor,
// we may need resolution at the beginning of this block.
// This may be true even if it's the block we used for starting locations,
// if a variable was spilled.
VARSET_TP inResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveIn, resolutionCandidateVars));
if (!VarSetOps::IsEmpty(compiler, inResolutionSet))
{
if (uniquePredBlock != nullptr)
{
// We may have split edges during critical edge resolution, and in the process split
// a non-critical edge as well.
// It is unlikely that we would ever have more than one of these in sequence (indeed,
// I don't think it's possible), but there's no need to assume that it can't.
while (uniquePredBlock->bbNum > bbNumMaxBeforeResolution)
{
uniquePredBlock = uniquePredBlock->GetUniquePred(compiler);
noway_assert(uniquePredBlock != nullptr);
}
resolveEdge(uniquePredBlock, block, ResolveSplit, inResolutionSet);
}
}
// Finally, if this block has a single successor:
// - and that has at least one other predecessor (otherwise we will do the resolution at the
// top of the successor),
// - and that is not the target of a critical edge (otherwise we've already handled it)
// we may need resolution at the end of this block.
if (succCount == 1)
{
BasicBlock* succBlock = block->GetSucc(0, compiler);
if (succBlock->GetUniquePred(compiler) == nullptr)
{
VARSET_TP outResolutionSet(
VarSetOps::Intersection(compiler, succBlock->bbLiveIn, resolutionCandidateVars));
if (!VarSetOps::IsEmpty(compiler, outResolutionSet))
{
resolveEdge(block, succBlock, ResolveJoin, outResolutionSet);
}
}
}
}
// Now, fixup the mapping for any blocks that were adding for edge splitting.
// See the comment prior to the call to fgSplitEdge() in resolveEdge().
// Note that we could fold this loop in with the checking code below, but that
// would only improve the debug case, and would clutter up the code somewhat.
if (compiler->fgBBNumMax > bbNumMaxBeforeResolution)
{
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
// There may be multiple blocks inserted when we split. But we must always have exactly
// one path (i.e. all blocks must be single-successor and single-predecessor),
// and only one block along the path may be non-empty.
// Note that we may have a newly-inserted block that is empty, but which connects
// two non-resolution blocks. This happens when an edge is split that requires it.
BasicBlock* succBlock = block;
do
{
succBlock = succBlock->GetUniqueSucc();
noway_assert(succBlock != nullptr);
} while ((succBlock->bbNum > bbNumMaxBeforeResolution) && succBlock->isEmpty());
BasicBlock* predBlock = block;
do
{
predBlock = predBlock->GetUniquePred(compiler);
noway_assert(predBlock != nullptr);
} while ((predBlock->bbNum > bbNumMaxBeforeResolution) && predBlock->isEmpty());
unsigned succBBNum = succBlock->bbNum;
unsigned predBBNum = predBlock->bbNum;
if (block->isEmpty())
{
// For the case of the empty block, find the non-resolution block (succ or pred).
if (predBBNum > bbNumMaxBeforeResolution)
{
assert(succBBNum <= bbNumMaxBeforeResolution);
predBBNum = 0;
}
else
{
succBBNum = 0;
}
}
else
{
assert((succBBNum <= bbNumMaxBeforeResolution) && (predBBNum <= bbNumMaxBeforeResolution));
}
SplitEdgeInfo info = {predBBNum, succBBNum};
getSplitBBNumToTargetBBNumMap()->Set(block->bbNum, info);
// Set both the live-in and live-out to the live-in of the successor (by construction liveness
// doesn't change in a split block).
VarSetOps::Assign(compiler, block->bbLiveIn, succBlock->bbLiveIn);
VarSetOps::Assign(compiler, block->bbLiveOut, succBlock->bbLiveIn);
}
}
}
#ifdef DEBUG
// Make sure the varToRegMaps match up on all edges.
bool foundMismatch = false;
for (BasicBlock* const block : compiler->Blocks())
{
if (block->isEmpty() && block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
VarToRegMap toVarToRegMap = getInVarToRegMap(block->bbNum);
for (BasicBlock* const predBlock : block->PredBlocks())
{
VarToRegMap fromVarToRegMap = getOutVarToRegMap(predBlock->bbNum);
VarSetOps::Iter iter(compiler, block->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
regNumber fromReg = getVarReg(fromVarToRegMap, varIndex);
regNumber toReg = getVarReg(toVarToRegMap, varIndex);
if (fromReg != toReg)
{
Interval* interval = getIntervalForLocalVar(varIndex);
// The fromReg and toReg may not match for a write-thru interval where the toReg is
// REG_STK, since the stack value is always valid for that case (so no move is needed).
if (!interval->isWriteThru || (toReg != REG_STK))
{
if (!foundMismatch)
{
foundMismatch = true;
printf("Found mismatched var locations after resolution!\n");
}
printf(" V%02u: " FMT_BB " to " FMT_BB ": %s to %s\n", interval->varNum, predBlock->bbNum,
block->bbNum, getRegName(fromReg), getRegName(toReg));
}
}
}
}
}
assert(!foundMismatch);
#endif
JITDUMP("\n");
}
//------------------------------------------------------------------------
// resolveEdge: Perform the specified type of resolution between two blocks.
//
// Arguments:
// fromBlock - the block from which the edge originates
// toBlock - the block at which the edge terminates
// resolveType - the type of resolution to be performed
// liveSet - the set of tracked lclVar indices which may require resolution
//
// Return Value:
// None.
//
// Assumptions:
// The caller must have performed the analysis to determine the type of the edge.
//
// Notes:
// This method emits the correctly ordered moves necessary to place variables in the
// correct registers across a Split, Join or Critical edge.
// In order to avoid overwriting register values before they have been moved to their
// new home (register/stack), it first does the register-to-stack moves (to free those
// registers), then the register to register moves, ensuring that the target register
// is free before the move, and then finally the stack to register moves.
void LinearScan::resolveEdge(BasicBlock* fromBlock,
BasicBlock* toBlock,
ResolveType resolveType,
VARSET_VALARG_TP liveSet)
{
VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum);
VarToRegMap toVarToRegMap;
if (resolveType == ResolveSharedCritical)
{
toVarToRegMap = sharedCriticalVarToRegMap;
}
else
{
toVarToRegMap = getInVarToRegMap(toBlock->bbNum);
}
// The block to which we add the resolution moves depends on the resolveType
BasicBlock* block;
switch (resolveType)
{
case ResolveJoin:
case ResolveSharedCritical:
block = fromBlock;
break;
case ResolveSplit:
block = toBlock;
break;
case ResolveCritical:
// fgSplitEdge may add one or two BasicBlocks. It returns the block that splits
// the edge from 'fromBlock' and 'toBlock', but if it inserts that block right after
// a block with a fall-through it will have to create another block to handle that edge.
// These new blocks can be mapped to existing blocks in order to correctly handle
// the calls to recordVarLocationsAtStartOfBB() from codegen. That mapping is handled
// in resolveEdges(), after all the edge resolution has been done (by calling this
// method for each edge).
block = compiler->fgSplitEdge(fromBlock, toBlock);
// Split edges are counted against fromBlock.
INTRACK_STATS(updateLsraStat(STAT_SPLIT_EDGE, fromBlock->bbNum));
break;
default:
unreached();
break;
}
#ifndef TARGET_XARCH
// We record tempregs for beginning and end of each block.
// For amd64/x86 we only need a tempReg for float - we'll use xchg for int.
// TODO-Throughput: It would be better to determine the tempRegs on demand, but the code below
// modifies the varToRegMaps so we don't have all the correct registers at the time
// we need to get the tempReg.
regNumber tempRegInt =
(resolveType == ResolveSharedCritical) ? REG_NA : getTempRegForResolution(fromBlock, toBlock, TYP_INT);
#endif // !TARGET_XARCH
regNumber tempRegFlt = REG_NA;
#ifdef TARGET_ARM
regNumber tempRegDbl = REG_NA;
#endif
if ((compiler->compFloatingPointUsed) && (resolveType != ResolveSharedCritical))
{
#ifdef TARGET_ARM
// Try to reserve a double register for TYP_DOUBLE and use it for TYP_FLOAT too if available.
tempRegDbl = getTempRegForResolution(fromBlock, toBlock, TYP_DOUBLE);
if (tempRegDbl != REG_NA)
{
tempRegFlt = tempRegDbl;
}
else
#endif // TARGET_ARM
{
tempRegFlt = getTempRegForResolution(fromBlock, toBlock, TYP_FLOAT);
}
}
regMaskTP targetRegsToDo = RBM_NONE;
regMaskTP targetRegsReady = RBM_NONE;
regMaskTP targetRegsFromStack = RBM_NONE;
// The following arrays capture the location of the registers as they are moved:
// - location[reg] gives the current location of the var that was originally in 'reg'.
// (Note that a var may be moved more than once.)
// - source[reg] gives the original location of the var that needs to be moved to 'reg'.
// For example, if a var is in rax and needs to be moved to rsi, then we would start with:
// location[rax] == rax
// source[rsi] == rax -- this doesn't change
// Then, if for some reason we need to move it temporary to rbx, we would have:
// location[rax] == rbx
// Once we have completed the move, we will have:
// location[rax] == REG_NA
// This indicates that the var originally in rax is now in its target register.
regNumberSmall location[REG_COUNT];
C_ASSERT(sizeof(char) == sizeof(regNumberSmall)); // for memset to work
memset(location, REG_NA, REG_COUNT);
regNumberSmall source[REG_COUNT];
memset(source, REG_NA, REG_COUNT);
// What interval is this register associated with?
// (associated with incoming reg)
Interval* sourceIntervals[REG_COUNT];
memset(&sourceIntervals, 0, sizeof(sourceIntervals));
// Intervals for vars that need to be loaded from the stack
Interval* stackToRegIntervals[REG_COUNT];
memset(&stackToRegIntervals, 0, sizeof(stackToRegIntervals));
// Get the starting insertion point for the "to" resolution
GenTree* insertionPoint = nullptr;
if (resolveType == ResolveSplit || resolveType == ResolveCritical)
{
insertionPoint = LIR::AsRange(block).FirstNode();
}
// If this is an edge between EH regions, we may have "extra" live-out EH vars.
// If we are adding resolution at the end of the block, we need to create "virtual" moves
// for these so that their registers are freed and can be reused.
if ((resolveType == ResolveJoin) && (compiler->compHndBBtabCount > 0))
{
VARSET_TP extraLiveSet(VarSetOps::Diff(compiler, block->bbLiveOut, toBlock->bbLiveIn));
VarSetOps::IntersectionD(compiler, extraLiveSet, exceptVars);
VarSetOps::Iter iter(compiler, extraLiveSet);
unsigned extraVarIndex = 0;
while (iter.NextElem(&extraVarIndex))
{
Interval* interval = getIntervalForLocalVar(extraVarIndex);
assert(interval->isWriteThru);
regNumber fromReg = getVarReg(fromVarToRegMap, extraVarIndex);
if (fromReg != REG_STK)
{
addResolution(block, insertionPoint, interval, REG_STK, fromReg);
JITDUMP(" (EH DUMMY)\n");
setVarReg(fromVarToRegMap, extraVarIndex, REG_STK);
}
}
}
// First:
// - Perform all moves from reg to stack (no ordering needed on these)
// - For reg to reg moves, record the current location, associating their
// source location with the target register they need to go into
// - For stack to reg moves (done last, no ordering needed between them)
// record the interval associated with the target reg
// TODO-Throughput: We should be looping over the liveIn and liveOut registers, since
// that will scale better than the live variables
VarSetOps::Iter iter(compiler, liveSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
Interval* interval = getIntervalForLocalVar(varIndex);
regNumber fromReg = getVarReg(fromVarToRegMap, varIndex);
regNumber toReg = getVarReg(toVarToRegMap, varIndex);
if (fromReg == toReg)
{
continue;
}
if (interval->isWriteThru && (toReg == REG_STK))
{
// We don't actually move a writeThru var back to the stack, as its stack value is always valid.
// However, if this is a Join edge (i.e. the move is happening at the bottom of the block),
// and it is a "normal" flow edge, we will go ahead and generate a mov instruction, which will be
// a NOP but will cause the variable to be removed from being live in the register.
if ((resolveType == ResolveSplit) || block->hasEHBoundaryOut())
{
continue;
}
}
// For Critical edges, the location will not change on either side of the edge,
// since we'll add a new block to do the move.
if (resolveType == ResolveSplit)
{
setVarReg(toVarToRegMap, varIndex, fromReg);
}
else if (resolveType == ResolveJoin || resolveType == ResolveSharedCritical)
{
setVarReg(fromVarToRegMap, varIndex, toReg);
}
assert(fromReg < UCHAR_MAX && toReg < UCHAR_MAX);
if (fromReg == REG_STK)
{
stackToRegIntervals[toReg] = interval;
targetRegsFromStack |= genRegMask(toReg);
}
else if (toReg == REG_STK)
{
// Do the reg to stack moves now
addResolution(block, insertionPoint, interval, REG_STK, fromReg);
JITDUMP(" (%s)\n",
(interval->isWriteThru && (toReg == REG_STK)) ? "EH DUMMY" : resolveTypeName[resolveType]);
}
else
{
location[fromReg] = (regNumberSmall)fromReg;
source[toReg] = (regNumberSmall)fromReg;
sourceIntervals[fromReg] = interval;
targetRegsToDo |= genRegMask(toReg);
}
}
// REGISTER to REGISTER MOVES
// First, find all the ones that are ready to move now
regMaskTP targetCandidates = targetRegsToDo;
while (targetCandidates != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetCandidates);
targetCandidates &= ~targetRegMask;
regNumber targetReg = genRegNumFromMask(targetRegMask);
if (location[targetReg] == REG_NA)
{
#ifdef TARGET_ARM
regNumber sourceReg = (regNumber)source[targetReg];
Interval* interval = sourceIntervals[sourceReg];
if (interval->registerType == TYP_DOUBLE)
{
// For ARM32, make sure that both of the float halves of the double register are available.
assert(genIsValidDoubleReg(targetReg));
regNumber anotherHalfRegNum = REG_NEXT(targetReg);
if (location[anotherHalfRegNum] == REG_NA)
{
targetRegsReady |= targetRegMask;
}
}
else
#endif // TARGET_ARM
{
targetRegsReady |= targetRegMask;
}
}
}
// Perform reg to reg moves
while (targetRegsToDo != RBM_NONE)
{
while (targetRegsReady != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetRegsReady);
targetRegsToDo &= ~targetRegMask;
targetRegsReady &= ~targetRegMask;
regNumber targetReg = genRegNumFromMask(targetRegMask);
assert(location[targetReg] != targetReg);
assert(targetReg < REG_COUNT);
regNumber sourceReg = (regNumber)source[targetReg];
assert(sourceReg < REG_COUNT);
regNumber fromReg = (regNumber)location[sourceReg];
// stack to reg movs should be done last as part of "targetRegsFromStack"
assert(fromReg < REG_STK);
Interval* interval = sourceIntervals[sourceReg];
assert(interval != nullptr);
addResolution(block, insertionPoint, interval, targetReg, fromReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
sourceIntervals[sourceReg] = nullptr;
location[sourceReg] = REG_NA;
regMaskTP fromRegMask = genRegMask(fromReg);
// Do we have a free targetReg?
if (fromReg == sourceReg)
{
if (source[fromReg] != REG_NA && ((targetRegsFromStack & fromRegMask) != fromRegMask))
{
targetRegsReady |= fromRegMask;
#ifdef TARGET_ARM
if (genIsValidDoubleReg(fromReg))
{
// Ensure that either:
// - the Interval targeting fromReg is not double, or
// - the other half of the double is free.
Interval* otherInterval = sourceIntervals[source[fromReg]];
regNumber upperHalfReg = REG_NEXT(fromReg);
if ((otherInterval->registerType == TYP_DOUBLE) && (location[upperHalfReg] != REG_NA))
{
targetRegsReady &= ~fromRegMask;
}
}
}
else if (genIsValidFloatReg(fromReg) && !genIsValidDoubleReg(fromReg))
{
// We may have freed up the other half of a double where the lower half
// was already free.
regNumber lowerHalfReg = REG_PREV(fromReg);
regNumber lowerHalfSrcReg = (regNumber)source[lowerHalfReg];
regNumber lowerHalfSrcLoc = (regNumber)location[lowerHalfReg];
regMaskTP lowerHalfRegMask = genRegMask(lowerHalfReg);
// Necessary conditions:
// - There is a source register for this reg (lowerHalfSrcReg != REG_NA)
// - It is currently free (lowerHalfSrcLoc == REG_NA)
// - The source interval isn't yet completed (sourceIntervals[lowerHalfSrcReg] != nullptr)
// - It's not in the ready set ((targetRegsReady & lowerHalfRegMask) ==
// RBM_NONE)
// - It's not resolved from stack ((targetRegsFromStack & lowerHalfRegMask) !=
// lowerHalfRegMask)
if ((lowerHalfSrcReg != REG_NA) && (lowerHalfSrcLoc == REG_NA) &&
(sourceIntervals[lowerHalfSrcReg] != nullptr) &&
((targetRegsReady & lowerHalfRegMask) == RBM_NONE) &&
((targetRegsFromStack & lowerHalfRegMask) != lowerHalfRegMask))
{
// This must be a double interval, otherwise it would be in targetRegsReady, or already
// completed.
assert(sourceIntervals[lowerHalfSrcReg]->registerType == TYP_DOUBLE);
targetRegsReady |= lowerHalfRegMask;
}
#endif // TARGET_ARM
}
}
}
if (targetRegsToDo != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetRegsToDo);
regNumber targetReg = genRegNumFromMask(targetRegMask);
// Is it already there due to other moves?
// If not, move it to the temp reg, OR swap it with another register
regNumber sourceReg = (regNumber)source[targetReg];
regNumber fromReg = (regNumber)location[sourceReg];
if (targetReg == fromReg)
{
targetRegsToDo &= ~targetRegMask;
}
else
{
regNumber tempReg = REG_NA;
bool useSwap = false;
if (emitter::isFloatReg(targetReg))
{
#ifdef TARGET_ARM
if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE)
{
// ARM32 requires a double temp register for TYP_DOUBLE.
tempReg = tempRegDbl;
}
else
#endif // TARGET_ARM
tempReg = tempRegFlt;
}
#ifdef TARGET_XARCH
else
{
useSwap = true;
}
#else // !TARGET_XARCH
else
{
tempReg = tempRegInt;
}
#endif // !TARGET_XARCH
if (useSwap || tempReg == REG_NA)
{
// First, we have to figure out the destination register for what's currently in fromReg,
// so that we can find its sourceInterval.
regNumber otherTargetReg = REG_NA;
// By chance, is fromReg going where it belongs?
if (location[source[fromReg]] == targetReg)
{
otherTargetReg = fromReg;
// If we can swap, we will be done with otherTargetReg as well.
// Otherwise, we'll spill it to the stack and reload it later.
if (useSwap)
{
regMaskTP fromRegMask = genRegMask(fromReg);
targetRegsToDo &= ~fromRegMask;
}
}
else
{
// Look at the remaining registers from targetRegsToDo (which we expect to be relatively
// small at this point) to find out what's currently in targetReg.
regMaskTP mask = targetRegsToDo;
while (mask != RBM_NONE && otherTargetReg == REG_NA)
{
regMaskTP nextRegMask = genFindLowestBit(mask);
regNumber nextReg = genRegNumFromMask(nextRegMask);
mask &= ~nextRegMask;
if (location[source[nextReg]] == targetReg)
{
otherTargetReg = nextReg;
}
}
}
assert(otherTargetReg != REG_NA);
if (useSwap)
{
// Generate a "swap" of fromReg and targetReg
insertSwap(block, insertionPoint, sourceIntervals[source[otherTargetReg]]->varNum, targetReg,
sourceIntervals[sourceReg]->varNum, fromReg);
location[sourceReg] = REG_NA;
location[source[otherTargetReg]] = (regNumberSmall)fromReg;
INTRACK_STATS(updateLsraStat(STAT_RESOLUTION_MOV, block->bbNum));
}
else
{
// Spill "targetReg" to the stack and add its eventual target (otherTargetReg)
// to "targetRegsFromStack", which will be handled below.
// NOTE: This condition is very rare. Setting COMPlus_JitStressRegs=0x203
// has been known to trigger it in JIT SH.
// First, spill "otherInterval" from targetReg to the stack.
Interval* otherInterval = sourceIntervals[source[otherTargetReg]];
setIntervalAsSpilled(otherInterval);
addResolution(block, insertionPoint, otherInterval, REG_STK, targetReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[source[otherTargetReg]] = REG_STK;
regMaskTP otherTargetRegMask = genRegMask(otherTargetReg);
targetRegsFromStack |= otherTargetRegMask;
stackToRegIntervals[otherTargetReg] = otherInterval;
targetRegsToDo &= ~otherTargetRegMask;
// Now, move the interval that is going to targetReg.
addResolution(block, insertionPoint, sourceIntervals[sourceReg], targetReg, fromReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[sourceReg] = REG_NA;
// Add its "fromReg" to "targetRegsReady", only if:
// - It was one of the target register we originally determined.
// - It is not the eventual target (otherTargetReg) because its
// value will be retrieved from STK.
if (source[fromReg] != REG_NA && fromReg != otherTargetReg)
{
regMaskTP fromRegMask = genRegMask(fromReg);
targetRegsReady |= fromRegMask;
#ifdef TARGET_ARM
if (genIsValidDoubleReg(fromReg))
{
// Ensure that either:
// - the Interval targeting fromReg is not double, or
// - the other half of the double is free.
Interval* otherInterval = sourceIntervals[source[fromReg]];
regNumber upperHalfReg = REG_NEXT(fromReg);
if ((otherInterval->registerType == TYP_DOUBLE) && (location[upperHalfReg] != REG_NA))
{
targetRegsReady &= ~fromRegMask;
}
}
#endif // TARGET_ARM
}
}
targetRegsToDo &= ~targetRegMask;
}
else
{
compiler->codeGen->regSet.rsSetRegsModified(genRegMask(tempReg) DEBUGARG(true));
#ifdef TARGET_ARM
if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE)
{
assert(genIsValidDoubleReg(targetReg));
assert(genIsValidDoubleReg(tempReg));
addResolutionForDouble(block, insertionPoint, sourceIntervals, location, tempReg, targetReg,
resolveType);
}
else
#endif // TARGET_ARM
{
assert(sourceIntervals[targetReg] != nullptr);
addResolution(block, insertionPoint, sourceIntervals[targetReg], tempReg, targetReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[targetReg] = (regNumberSmall)tempReg;
}
targetRegsReady |= targetRegMask;
}
}
}
}
// Finally, perform stack to reg moves
// All the target regs will be empty at this point
while (targetRegsFromStack != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetRegsFromStack);
targetRegsFromStack &= ~targetRegMask;
regNumber targetReg = genRegNumFromMask(targetRegMask);
Interval* interval = stackToRegIntervals[targetReg];
assert(interval != nullptr);
addResolution(block, insertionPoint, interval, targetReg, REG_STK);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
}
}
#if TRACK_LSRA_STATS
const char* LinearScan::getStatName(unsigned stat)
{
LsraStat lsraStat = (LsraStat)stat;
assert(lsraStat != LsraStat::COUNT);
static const char* const lsraStatNames[] = {
#define LSRA_STAT_DEF(stat, name) name,
#include "lsra_stats.h"
#undef LSRA_STAT_DEF
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) #stat,
#include "lsra_score.h"
#undef REG_SEL_DEF
};
assert(stat < ArrLen(lsraStatNames));
return lsraStatNames[lsraStat];
}
LsraStat LinearScan::getLsraStatFromScore(RegisterScore registerScore)
{
switch (registerScore)
{
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) \
case RegisterScore::stat: \
return LsraStat::STAT_##stat;
#include "lsra_score.h"
#undef REG_SEL_DEF
default:
return LsraStat::STAT_FREE;
}
}
// ----------------------------------------------------------
// updateLsraStat: Increment LSRA stat counter.
//
// Arguments:
// stat - LSRA stat enum
// bbNum - Basic block to which LSRA stat needs to be
// associated with.
//
void LinearScan::updateLsraStat(LsraStat stat, unsigned bbNum)
{
if (bbNum > bbNumMaxBeforeResolution)
{
// This is a newly created basic block as part of resolution.
// These blocks contain resolution moves that are already accounted.
return;
}
++(blockInfo[bbNum].stats[(unsigned)stat]);
}
// -----------------------------------------------------------
// dumpLsraStats - dumps Lsra stats to given file.
//
// Arguments:
// file - file to which stats are to be written.
//
void LinearScan::dumpLsraStats(FILE* file)
{
unsigned sumStats[LsraStat::COUNT] = {0};
weight_t wtdStats[LsraStat::COUNT] = {0};
fprintf(file, "----------\n");
fprintf(file, "LSRA Stats");
#ifdef DEBUG
if (!VERBOSE)
{
fprintf(file, " : %s\n", compiler->info.compFullName);
}
else
{
// In verbose mode no need to print full name
// while printing lsra stats.
fprintf(file, "\n");
}
#else
fprintf(file, " : %s\n", compiler->eeGetMethodFullName(compiler->info.compCompHnd));
#endif
fprintf(file, "----------\n");
#ifdef DEBUG
fprintf(file, "Register selection order: %S\n",
JitConfig.JitLsraOrdering() == nullptr ? W("ABCDEFGHIJKLMNOPQ") : JitConfig.JitLsraOrdering());
#endif
fprintf(file, "Total Tracked Vars: %d\n", compiler->lvaTrackedCount);
fprintf(file, "Total Reg Cand Vars: %d\n", regCandidateVarCount);
fprintf(file, "Total number of Intervals: %d\n",
static_cast<unsigned>((intervals.size() == 0 ? 0 : (intervals.size() - 1))));
fprintf(file, "Total number of RefPositions: %d\n", static_cast<unsigned>(refPositions.size() - 1));
// compute total number of spill temps created
unsigned numSpillTemps = 0;
for (int i = 0; i < TYP_COUNT; i++)
{
numSpillTemps += maxSpill[i];
}
fprintf(file, "Total Number of spill temps created: %d\n", numSpillTemps);
fprintf(file, "..........\n");
bool addedBlockHeader = false;
bool anyNonZeroStat = false;
// Iterate for block 0
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
unsigned lsraStat = blockInfo[0].stats[statIndex];
if (lsraStat != 0)
{
if (!addedBlockHeader)
{
addedBlockHeader = true;
fprintf(file, FMT_BB " [%8.2f]: ", 0, blockInfo[0].weight);
fprintf(file, "%s = %d", getStatName(statIndex), lsraStat);
}
else
{
fprintf(file, ", %s = %d", getStatName(statIndex), lsraStat);
}
sumStats[statIndex] += lsraStat;
wtdStats[statIndex] += (lsraStat * blockInfo[0].weight);
anyNonZeroStat = true;
}
}
if (anyNonZeroStat)
{
fprintf(file, "\n");
}
// Iterate for remaining blocks
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
addedBlockHeader = false;
anyNonZeroStat = false;
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex];
if (lsraStat != 0)
{
if (!addedBlockHeader)
{
addedBlockHeader = true;
fprintf(file, FMT_BB " [%8.2f]: ", block->bbNum, block->bbWeight);
fprintf(file, "%s = %d", getStatName(statIndex), lsraStat);
}
else
{
fprintf(file, ", %s = %d", getStatName(statIndex), lsraStat);
}
sumStats[statIndex] += lsraStat;
wtdStats[statIndex] += (lsraStat * block->bbWeight);
anyNonZeroStat = true;
}
}
if (anyNonZeroStat)
{
fprintf(file, "\n");
}
}
fprintf(file, "..........\n");
for (int regSelectI = 0; regSelectI < LsraStat::COUNT; regSelectI++)
{
if (regSelectI == firstRegSelStat)
{
fprintf(file, "..........\n");
}
if ((regSelectI < firstRegSelStat) || (sumStats[regSelectI] != 0))
{
// Print register selection stats
if (regSelectI >= firstRegSelStat)
{
fprintf(file, "Total %s [#%2d] : %d Weighted: %f\n", getStatName(regSelectI),
(regSelectI - firstRegSelStat + 1), sumStats[regSelectI], wtdStats[regSelectI]);
}
else
{
fprintf(file, "Total %s : %d Weighted: %f\n", getStatName(regSelectI), sumStats[regSelectI],
wtdStats[regSelectI]);
}
}
}
printf("\n");
}
// -----------------------------------------------------------
// dumpLsraStatsCsvFormat - dumps Lsra stats to given file in csv format.
//
// Arguments:
// file - file to which stats are to be written.
//
void LinearScan::dumpLsraStatsCsv(FILE* file)
{
unsigned sumStats[LsraStat::COUNT] = {0};
// Write the header if the file is empty
if (ftell(file) == 0)
{
// header
fprintf(file, "\"Method Name\"");
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
fprintf(file, ",\"%s\"", LinearScan::getStatName(statIndex));
}
fprintf(file, ",\"PerfScore\"\n");
}
// bbNum == 0
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
sumStats[statIndex] += blockInfo[0].stats[statIndex];
}
// blocks
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
sumStats[statIndex] += blockInfo[block->bbNum].stats[statIndex];
}
}
fprintf(file, "\"%s\"", compiler->info.compFullName);
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
fprintf(file, ",%u", sumStats[statIndex]);
}
fprintf(file, ",%.2f\n", compiler->info.compPerfScore);
}
// -----------------------------------------------------------
// dumpLsraStatsSummary - dumps Lsra stats summary to given file
//
// Arguments:
// file - file to which stats are to be written.
//
void LinearScan::dumpLsraStatsSummary(FILE* file)
{
unsigned sumStats[LsraStat::STAT_FREE] = {0};
weight_t wtdStats[LsraStat::STAT_FREE] = {0.0};
// Iterate for block 0
for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++)
{
unsigned lsraStat = blockInfo[0].stats[statIndex];
sumStats[statIndex] += lsraStat;
wtdStats[statIndex] += (lsraStat * blockInfo[0].weight);
}
// Iterate for remaining blocks
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++)
{
unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex];
sumStats[statIndex] += lsraStat;
wtdStats[statIndex] += (lsraStat * block->bbWeight);
}
}
for (int regSelectI = 0; regSelectI < LsraStat::STAT_FREE; regSelectI++)
{
fprintf(file, ", %s %u %sWt %f", getStatName(regSelectI), sumStats[regSelectI], getStatName(regSelectI),
wtdStats[regSelectI]);
}
}
#endif // TRACK_LSRA_STATS
#ifdef DEBUG
void dumpRegMask(regMaskTP regs)
{
if (regs == RBM_ALLINT)
{
printf("[allInt]");
}
else if (regs == (RBM_ALLINT & ~RBM_FPBASE))
{
printf("[allIntButFP]");
}
else if (regs == RBM_ALLFLOAT)
{
printf("[allFloat]");
}
else if (regs == RBM_ALLDOUBLE)
{
printf("[allDouble]");
}
else
{
dspRegMask(regs);
}
}
static const char* getRefTypeName(RefType refType)
{
switch (refType)
{
#define DEF_REFTYPE(memberName, memberValue, shortName) \
case memberName: \
return #memberName;
#include "lsra_reftypes.h"
#undef DEF_REFTYPE
default:
return nullptr;
}
}
static const char* getRefTypeShortName(RefType refType)
{
switch (refType)
{
#define DEF_REFTYPE(memberName, memberValue, shortName) \
case memberName: \
return shortName;
#include "lsra_reftypes.h"
#undef DEF_REFTYPE
default:
return nullptr;
}
}
//------------------------------------------------------------------------
// getScoreName: Returns the texual name of register score
const char* LinearScan::getScoreName(RegisterScore score)
{
switch (score)
{
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) \
case stat: \
return shortname;
#include "lsra_score.h"
#undef REG_SEL_DEF
default:
return " - ";
}
}
void RefPosition::dump(LinearScan* linearScan)
{
printf("<RefPosition #%-3u @%-3u", rpNum, nodeLocation);
printf(" %s ", getRefTypeName(refType));
if (this->IsPhysRegRef())
{
this->getReg()->tinyDump();
}
else if (getInterval())
{
this->getInterval()->tinyDump();
}
if (this->treeNode)
{
printf("%s", treeNode->OpName(treeNode->OperGet()));
if (this->treeNode->IsMultiRegNode())
{
printf("[%d]", this->multiRegIdx);
}
}
printf(" " FMT_BB " ", this->bbNum);
printf("regmask=");
dumpRegMask(registerAssignment);
printf(" minReg=%d", minRegCandidateCount);
if (this->lastUse)
{
printf(" last");
}
if (this->reload)
{
printf(" reload");
}
if (this->spillAfter)
{
printf(" spillAfter");
}
if (this->singleDefSpill)
{
printf(" singleDefSpill");
}
if (this->writeThru)
{
printf(" writeThru");
}
if (this->moveReg)
{
printf(" move");
}
if (this->copyReg)
{
printf(" copy");
}
if (this->isFixedRegRef)
{
printf(" fixed");
}
if (this->isLocalDefUse)
{
printf(" local");
}
if (this->delayRegFree)
{
printf(" delay");
}
if (this->outOfOrder)
{
printf(" outOfOrder");
}
if (this->RegOptional())
{
printf(" regOptional");
}
printf(" wt=%.2f", linearScan->getWeight(this));
printf(">\n");
}
void RegRecord::dump()
{
tinyDump();
}
void Interval::dump()
{
printf("Interval %2u:", intervalIndex);
if (isLocalVar)
{
printf(" (V%02u)", varNum);
}
else if (IsUpperVector())
{
assert(relatedInterval != nullptr);
printf(" (U%02u)", relatedInterval->varNum);
}
printf(" %s", varTypeName(registerType));
if (isInternal)
{
printf(" (INTERNAL)");
}
if (isSpilled)
{
printf(" (SPILLED)");
}
if (isSplit)
{
printf(" (SPLIT)");
}
if (isStructField)
{
printf(" (field)");
}
if (isPromotedStruct)
{
printf(" (promoted struct)");
}
if (hasConflictingDefUse)
{
printf(" (def-use conflict)");
}
if (hasInterferingUses)
{
printf(" (interfering uses)");
}
if (isSpecialPutArg)
{
printf(" (specialPutArg)");
}
if (isConstant)
{
printf(" (constant)");
}
if (isWriteThru)
{
printf(" (writeThru)");
}
printf(" RefPositions {");
for (RefPosition* refPosition = this->firstRefPosition; refPosition != nullptr;
refPosition = refPosition->nextRefPosition)
{
printf("#%u@%u", refPosition->rpNum, refPosition->nodeLocation);
if (refPosition->nextRefPosition)
{
printf(" ");
}
}
printf("}");
// this is not used (yet?)
// printf(" SpillOffset %d", this->spillOffset);
printf(" physReg:%s", getRegName(physReg));
printf(" Preferences=");
dumpRegMask(this->registerPreferences);
if (relatedInterval)
{
printf(" RelatedInterval ");
relatedInterval->microDump();
}
printf("\n");
}
// print out very concise representation
void Interval::tinyDump()
{
printf("<Ivl:%u", intervalIndex);
if (isLocalVar)
{
printf(" V%02u", varNum);
}
else if (IsUpperVector())
{
assert(relatedInterval != nullptr);
printf(" (U%02u)", relatedInterval->varNum);
}
else if (isInternal)
{
printf(" internal");
}
printf("> ");
}
// print out extremely concise representation
void Interval::microDump()
{
if (isLocalVar)
{
printf("<V%02u/L%u>", varNum, intervalIndex);
return;
}
else if (IsUpperVector())
{
assert(relatedInterval != nullptr);
printf(" (U%02u)", relatedInterval->varNum);
}
char intervalTypeChar = 'I';
if (isInternal)
{
intervalTypeChar = 'T';
}
printf("<%c%u>", intervalTypeChar, intervalIndex);
}
void RegRecord::tinyDump()
{
printf("<Reg:%-3s> ", getRegName(regNum));
}
void LinearScan::dumpDefList()
{
if (!VERBOSE)
{
return;
}
JITDUMP("DefList: { ");
bool first = true;
for (RefInfoListNode *listNode = defList.Begin(), *end = defList.End(); listNode != end;
listNode = listNode->Next())
{
GenTree* node = listNode->treeNode;
JITDUMP("%sN%03u.t%d. %s", first ? "" : "; ", node->gtSeqNum, node->gtTreeID, GenTree::OpName(node->OperGet()));
first = false;
}
JITDUMP(" }\n");
}
void LinearScan::lsraDumpIntervals(const char* msg)
{
printf("\nLinear scan intervals %s:\n", msg);
for (Interval& interval : intervals)
{
// only dump something if it has references
// if (interval->firstRefPosition)
interval.dump();
}
printf("\n");
}
// Dumps a tree node as a destination or source operand, with the style
// of dump dependent on the mode
void LinearScan::lsraGetOperandString(GenTree* tree,
LsraTupleDumpMode mode,
char* operandString,
unsigned operandStringLength)
{
const char* lastUseChar = "";
if (tree->OperIsScalarLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
lastUseChar = "*";
}
switch (mode)
{
case LinearScan::LSRA_DUMP_PRE:
case LinearScan::LSRA_DUMP_REFPOS:
_snprintf_s(operandString, operandStringLength, operandStringLength, "t%d%s", tree->gtTreeID, lastUseChar);
break;
case LinearScan::LSRA_DUMP_POST:
{
Compiler* compiler = JitTls::GetCompiler();
if (!tree->gtHasReg())
{
_snprintf_s(operandString, operandStringLength, operandStringLength, "STK%s", lastUseChar);
}
else
{
int charCount = _snprintf_s(operandString, operandStringLength, operandStringLength, "%s%s",
getRegName(tree->GetRegNum()), lastUseChar);
operandString += charCount;
operandStringLength -= charCount;
if (tree->IsMultiRegNode())
{
unsigned regCount = tree->IsMultiRegLclVar() ? compiler->lvaGetDesc(tree->AsLclVar())->lvFieldCnt
: tree->GetMultiRegCount();
for (unsigned regIndex = 1; regIndex < regCount; regIndex++)
{
charCount = _snprintf_s(operandString, operandStringLength, operandStringLength, ",%s%s",
getRegName(tree->GetRegByIndex(regIndex)), lastUseChar);
operandString += charCount;
operandStringLength -= charCount;
}
}
}
}
break;
default:
printf("ERROR: INVALID TUPLE DUMP MODE\n");
break;
}
}
void LinearScan::lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest)
{
Compiler* compiler = JitTls::GetCompiler();
const unsigned operandStringLength = 6 * MAX_MULTIREG_COUNT + 1;
char operandString[operandStringLength];
const char* emptyDestOperand = " ";
char spillChar = ' ';
if (mode == LinearScan::LSRA_DUMP_POST)
{
if ((tree->gtFlags & GTF_SPILL) != 0)
{
spillChar = 'S';
}
if (!hasDest && tree->gtHasReg())
{
// A node can define a register, but not produce a value for a parent to consume,
// i.e. in the "localDefUse" case.
// There used to be an assert here that we wouldn't spill such a node.
// However, we can have unused lclVars that wind up being the node at which
// it is spilled. This probably indicates a bug, but we don't realy want to
// assert during a dump.
if (spillChar == 'S')
{
spillChar = '$';
}
else
{
spillChar = '*';
}
hasDest = true;
}
}
printf("%c N%03u. ", spillChar, tree->gtSeqNum);
LclVarDsc* varDsc = nullptr;
unsigned varNum = UINT_MAX;
if (tree->IsLocal())
{
varNum = tree->AsLclVarCommon()->GetLclNum();
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvLRACandidate)
{
hasDest = false;
}
}
if (hasDest)
{
if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED)
{
assert(tree->gtHasReg());
}
lsraGetOperandString(tree, mode, operandString, operandStringLength);
printf("%-15s =", operandString);
}
else
{
printf("%-15s ", emptyDestOperand);
}
if (varDsc != nullptr)
{
if (varDsc->lvLRACandidate)
{
if (mode == LSRA_DUMP_REFPOS)
{
printf(" V%02u(L%d)", varNum, getIntervalForLocalVar(varDsc->lvVarIndex)->intervalIndex);
}
else
{
lsraGetOperandString(tree, mode, operandString, operandStringLength);
printf(" V%02u(%s)", varNum, operandString);
if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED)
{
printf("R");
}
}
}
else
{
printf(" V%02u MEM", varNum);
}
}
else if (tree->OperIs(GT_ASG))
{
assert(!tree->gtHasReg());
printf(" asg%s ", GenTree::OpName(tree->OperGet()));
}
else
{
compiler->gtDispNodeName(tree);
if (tree->OperKind() & GTK_LEAF)
{
compiler->gtDispLeaf(tree, nullptr);
}
}
}
//------------------------------------------------------------------------
// DumpOperandDefs: dumps the registers defined by a node.
//
// Arguments:
// operand - The operand for which to compute a register count.
//
// Returns:
// The number of registers defined by `operand`.
//
void LinearScan::DumpOperandDefs(
GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength)
{
assert(operand != nullptr);
assert(operandString != nullptr);
if (operand->OperIs(GT_ARGPLACE))
{
return;
}
int dstCount = ComputeOperandDstCount(operand);
if (dstCount != 0)
{
// This operand directly produces registers; print it.
if (!first)
{
printf(",");
}
lsraGetOperandString(operand, mode, operandString, operandStringLength);
printf("%s", operandString);
first = false;
}
else if (operand->isContained())
{
// This is a contained node. Dump the defs produced by its operands.
for (GenTree* op : operand->Operands())
{
DumpOperandDefs(op, first, mode, operandString, operandStringLength);
}
}
}
void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
{
BasicBlock* block;
LsraLocation currentLoc = 1; // 0 is the entry
const unsigned operandStringLength = 6 * MAX_MULTIREG_COUNT + 1;
char operandString[operandStringLength];
// currentRefPosition is not used for LSRA_DUMP_PRE
// We keep separate iterators for defs, so that we can print them
// on the lhs of the dump
RefPositionIterator refPosIterator = refPositions.begin();
RefPosition* currentRefPosition = &refPosIterator;
switch (mode)
{
case LSRA_DUMP_PRE:
printf("TUPLE STYLE DUMP BEFORE LSRA\n");
break;
case LSRA_DUMP_REFPOS:
printf("TUPLE STYLE DUMP WITH REF POSITIONS\n");
break;
case LSRA_DUMP_POST:
printf("TUPLE STYLE DUMP WITH REGISTER ASSIGNMENTS\n");
break;
default:
printf("ERROR: INVALID TUPLE DUMP MODE\n");
return;
}
if (mode != LSRA_DUMP_PRE)
{
printf("Incoming Parameters: ");
for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB;
++refPosIterator, currentRefPosition = &refPosIterator)
{
Interval* interval = currentRefPosition->getInterval();
assert(interval != nullptr && interval->isLocalVar);
printf(" V%02d", interval->varNum);
if (mode == LSRA_DUMP_POST)
{
regNumber reg;
if (currentRefPosition->registerAssignment == RBM_NONE)
{
reg = REG_STK;
}
else
{
reg = currentRefPosition->assignedReg();
}
const LclVarDsc* varDsc = compiler->lvaGetDesc(interval->varNum);
printf("(");
regNumber assignedReg = varDsc->GetRegNum();
regNumber argReg = (varDsc->lvIsRegArg) ? varDsc->GetArgReg() : REG_STK;
assert(reg == assignedReg || varDsc->lvRegister == false);
if (reg != argReg)
{
printf(getRegName(argReg));
printf("=>");
}
printf("%s)", getRegName(reg));
}
}
printf("\n");
}
for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
currentLoc += 2;
if (mode == LSRA_DUMP_REFPOS)
{
bool printedBlockHeader = false;
// We should find the boundary RefPositions in the order of exposed uses, dummy defs, and the blocks
for (; refPosIterator != refPositions.end() &&
(currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef ||
(currentRefPosition->refType == RefTypeBB && !printedBlockHeader));
++refPosIterator, currentRefPosition = &refPosIterator)
{
Interval* interval = nullptr;
if (currentRefPosition->isIntervalRef())
{
interval = currentRefPosition->getInterval();
}
switch (currentRefPosition->refType)
{
case RefTypeExpUse:
assert(interval != nullptr);
assert(interval->isLocalVar);
printf(" Exposed use of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum);
break;
case RefTypeDummyDef:
assert(interval != nullptr);
assert(interval->isLocalVar);
printf(" Dummy def of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum);
break;
case RefTypeBB:
block->dspBlockHeader(compiler);
printedBlockHeader = true;
printf("=====\n");
break;
default:
printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum);
break;
}
}
}
else
{
block->dspBlockHeader(compiler);
printf("=====\n");
}
if (enregisterLocalVars && mode == LSRA_DUMP_POST && block != compiler->fgFirstBB &&
block->bbNum <= bbNumMaxBeforeResolution)
{
printf("Predecessor for variable locations: " FMT_BB "\n", blockInfo[block->bbNum].predBBNum);
dumpInVarToRegMap(block);
}
if (block->bbNum > bbNumMaxBeforeResolution)
{
SplitEdgeInfo splitEdgeInfo;
splitBBNumToTargetBBNumMap->Lookup(block->bbNum, &splitEdgeInfo);
assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution);
assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution);
printf("New block introduced for resolution from " FMT_BB " to " FMT_BB "\n", splitEdgeInfo.fromBBNum,
splitEdgeInfo.toBBNum);
}
for (GenTree* node : LIR::AsRange(block))
{
GenTree* tree = node;
int produce = tree->IsValue() ? ComputeOperandDstCount(tree) : 0;
int consume = ComputeAvailableSrcCount(tree);
lsraDispNode(tree, mode, produce != 0 && mode != LSRA_DUMP_REFPOS);
if (mode != LSRA_DUMP_REFPOS)
{
if (consume > 0)
{
printf("; ");
bool first = true;
for (GenTree* operand : tree->Operands())
{
DumpOperandDefs(operand, first, mode, operandString, operandStringLength);
}
}
}
else
{
// Print each RefPosition on a new line, but
// printing all the kills for each node on a single line
// and combining the fixed regs with their associated def or use
bool killPrinted = false;
RefPosition* lastFixedRegRefPos = nullptr;
for (; refPosIterator != refPositions.end() &&
(currentRefPosition->refType == RefTypeUse || currentRefPosition->refType == RefTypeFixedReg ||
currentRefPosition->refType == RefTypeKill || currentRefPosition->refType == RefTypeDef) &&
(currentRefPosition->nodeLocation == tree->gtSeqNum ||
currentRefPosition->nodeLocation == tree->gtSeqNum + 1);
++refPosIterator, currentRefPosition = &refPosIterator)
{
Interval* interval = nullptr;
if (currentRefPosition->isIntervalRef())
{
interval = currentRefPosition->getInterval();
}
switch (currentRefPosition->refType)
{
case RefTypeUse:
if (currentRefPosition->IsPhysRegRef())
{
printf("\n Use:R%d(#%d)",
currentRefPosition->getReg()->regNum, currentRefPosition->rpNum);
}
else
{
assert(interval != nullptr);
printf("\n Use:");
interval->microDump();
printf("(#%d)", currentRefPosition->rpNum);
if (currentRefPosition->isFixedRegRef && !interval->isInternal)
{
assert(genMaxOneBit(currentRefPosition->registerAssignment));
assert(lastFixedRegRefPos != nullptr);
printf(" Fixed:%s(#%d)", getRegName(currentRefPosition->assignedReg()),
lastFixedRegRefPos->rpNum);
lastFixedRegRefPos = nullptr;
}
if (currentRefPosition->isLocalDefUse)
{
printf(" LocalDefUse");
}
if (currentRefPosition->lastUse)
{
printf(" *");
}
}
break;
case RefTypeDef:
{
// Print each def on a new line
assert(interval != nullptr);
printf("\n Def:");
interval->microDump();
printf("(#%d)", currentRefPosition->rpNum);
if (currentRefPosition->isFixedRegRef)
{
assert(genMaxOneBit(currentRefPosition->registerAssignment));
printf(" %s", getRegName(currentRefPosition->assignedReg()));
}
if (currentRefPosition->isLocalDefUse)
{
printf(" LocalDefUse");
}
if (currentRefPosition->lastUse)
{
printf(" *");
}
if (interval->relatedInterval != nullptr)
{
printf(" Pref:");
interval->relatedInterval->microDump();
}
}
break;
case RefTypeKill:
if (!killPrinted)
{
printf("\n Kill: ");
killPrinted = true;
}
printf(getRegName(currentRefPosition->assignedReg()));
printf(" ");
break;
case RefTypeFixedReg:
lastFixedRegRefPos = currentRefPosition;
break;
default:
printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum);
break;
}
}
}
printf("\n");
}
if (enregisterLocalVars && mode == LSRA_DUMP_POST)
{
dumpOutVarToRegMap(block);
}
printf("\n");
}
printf("\n\n");
}
void LinearScan::dumpLsraAllocationEvent(
LsraDumpEvent event, Interval* interval, regNumber reg, BasicBlock* currentBlock, RegisterScore registerScore)
{
if (!(VERBOSE))
{
return;
}
if ((interval != nullptr) && (reg != REG_NA) && (reg != REG_STK))
{
registersToDump |= getRegMask(reg, interval->registerType);
dumpRegRecordTitleIfNeeded();
}
switch (event)
{
// Conflicting def/use
case LSRA_EVENT_DEFUSE_CONFLICT:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("DUconflict ");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE1:
printf(indentFormat, " Case #1 use defRegAssignment");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE2:
printf(indentFormat, " Case #2 use useRegAssignment");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE3:
printf(indentFormat, " Case #3 use useRegAssignment");
dumpRegRecords();
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE4:
printf(indentFormat, " Case #4 use defRegAssignment");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE5:
printf(indentFormat, " Case #5 set def to all regs");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE6:
printf(indentFormat, " Case #6 need a copy");
dumpRegRecords();
if (interval == nullptr)
{
printf(indentFormat, " NULL interval");
dumpRegRecords();
}
else if (interval->firstRefPosition->multiRegIdx != 0)
{
printf(indentFormat, " (multiReg)");
dumpRegRecords();
}
break;
case LSRA_EVENT_SPILL:
dumpRefPositionShort(activeRefPosition, currentBlock);
assert(interval != nullptr && interval->assignedReg != nullptr);
printf("Spill %-4s ", getRegName(interval->assignedReg->regNum));
dumpRegRecords();
break;
// Restoring the previous register
case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL:
case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL:
assert(interval != nullptr);
if ((activeRefPosition == nullptr) || (activeRefPosition->refType == RefTypeBB))
{
printf(emptyRefPositionFormat, "");
}
else
{
dumpRefPositionShort(activeRefPosition, currentBlock);
}
printf((event == LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL) ? "Restr %-4s " : "SRstr %-4s ",
getRegName(reg));
dumpRegRecords();
break;
case LSRA_EVENT_DONE_KILL_GC_REFS:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("Done ");
break;
case LSRA_EVENT_NO_GC_KILLS:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("None ");
break;
// Block boundaries
case LSRA_EVENT_START_BB:
// The RefTypeBB comes after the RefTypeDummyDefs associated with that block,
// so we may have a RefTypeDummyDef at the time we dump this event.
// In that case we'll have another "EVENT" associated with it, so we need to
// print the full line now.
if (activeRefPosition->refType != RefTypeBB)
{
dumpNewBlock(currentBlock, activeRefPosition->nodeLocation);
dumpRegRecords();
}
else
{
dumpRefPositionShort(activeRefPosition, currentBlock);
}
break;
// Allocation decisions
case LSRA_EVENT_NEEDS_NEW_REG:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("Free %-4s ", getRegName(reg));
dumpRegRecords();
break;
case LSRA_EVENT_ZERO_REF:
assert(interval != nullptr && interval->isLocalVar);
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("NoRef ");
dumpRegRecords();
break;
case LSRA_EVENT_FIXED_REG:
case LSRA_EVENT_EXP_USE:
case LSRA_EVENT_KEPT_ALLOCATION:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("Keep %-4s ", getRegName(reg));
break;
case LSRA_EVENT_COPY_REG:
assert(interval != nullptr && interval->recentRefPosition != nullptr);
dumpRefPositionShort(activeRefPosition, currentBlock);
if (allocationPassComplete || (registerScore == 0))
{
printf("Copy %-4s ", getRegName(reg));
}
else
{
printf("%-5s(C) %-4s ", getScoreName(registerScore), getRegName(reg));
}
break;
case LSRA_EVENT_MOVE_REG:
assert(interval != nullptr && interval->recentRefPosition != nullptr);
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("Move %-4s ", getRegName(reg));
dumpRegRecords();
break;
case LSRA_EVENT_ALLOC_REG:
dumpRefPositionShort(activeRefPosition, currentBlock);
if (allocationPassComplete || (registerScore == 0))
{
printf("Alloc %-4s ", getRegName(reg));
}
else
{
printf("%-5s(A) %-4s ", getScoreName(registerScore), getRegName(reg));
}
break;
case LSRA_EVENT_REUSE_REG:
dumpRefPositionShort(activeRefPosition, currentBlock);
if (allocationPassComplete || (registerScore == 0))
{
printf("Reuse %-4s ", getRegName(reg));
}
else
{
printf("%-5s(A) %-4s ", getScoreName(registerScore), getRegName(reg));
}
break;
case LSRA_EVENT_NO_ENTRY_REG_ALLOCATED:
assert(interval != nullptr && interval->isLocalVar);
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("LoRef ");
break;
case LSRA_EVENT_NO_REG_ALLOCATED:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("NoReg ");
break;
case LSRA_EVENT_RELOAD:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("ReLod %-4s ", getRegName(reg));
dumpRegRecords();
break;
case LSRA_EVENT_SPECIAL_PUTARG:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("PtArg %-4s ", getRegName(reg));
break;
case LSRA_EVENT_UPPER_VECTOR_SAVE:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("UVSav %-4s ", getRegName(reg));
break;
case LSRA_EVENT_UPPER_VECTOR_RESTORE:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("UVRes %-4s ", getRegName(reg));
break;
// We currently don't dump anything for these events.
case LSRA_EVENT_DEFUSE_FIXED_DELAY_USE:
case LSRA_EVENT_SPILL_EXTENDED_LIFETIME:
case LSRA_EVENT_END_BB:
case LSRA_EVENT_FREE_REGS:
case LSRA_EVENT_INCREMENT_RANGE_END:
case LSRA_EVENT_LAST_USE:
case LSRA_EVENT_LAST_USE_DELAYED:
break;
default:
printf("????? %-4s ", getRegName(reg));
dumpRegRecords();
break;
}
}
//------------------------------------------------------------------------
// dumpRegRecordHeader: Dump the header for a column-based dump of the register state.
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Assumptions:
// Reg names fit in 4 characters (minimum width of the columns)
//
// Notes:
// In order to make the table as dense as possible (for ease of reading the dumps),
// we determine the minimum regColumnWidth width required to represent:
// regs, by name (e.g. eax or xmm0) - this is fixed at 4 characters.
// intervals, as Vnn for lclVar intervals, or as I<num> for other intervals.
// The table is indented by the amount needed for dumpRefPositionShort, which is
// captured in shortRefPositionDumpWidth.
//
void LinearScan::dumpRegRecordHeader()
{
printf("The following table has one or more rows for each RefPosition that is handled during allocation.\n"
"The first column provides the basic information about the RefPosition, with its type (e.g. Def,\n"
"Use, Fixd) followed by a '*' if it is a last use, and a 'D' if it is delayRegFree, and then the\n"
"action taken during allocation (e.g. Alloc a new register, or Keep an existing one).\n"
"The subsequent columns show the Interval occupying each register, if any, followed by 'a' if it is\n"
"active, a 'p' if it is a large vector that has been partially spilled, and 'i'if it is inactive.\n"
"Columns are only printed up to the last modifed register, which may increase during allocation,\n"
"in which case additional columns will appear. \n"
"Registers which are not marked modified have ---- in their column.\n\n");
// First, determine the width of each register column (which holds a reg name in the
// header, and an interval name in each subsequent row).
int intervalNumberWidth = (int)log10((double)intervals.size()) + 1;
// The regColumnWidth includes the identifying character (I or V) and an 'i', 'p' or 'a' (inactive,
// partially-spilled or active)
regColumnWidth = intervalNumberWidth + 2;
if (regColumnWidth < 4)
{
regColumnWidth = 4;
}
sprintf_s(intervalNameFormat, MAX_FORMAT_CHARS, "%%c%%-%dd", regColumnWidth - 2);
sprintf_s(regNameFormat, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth);
// Next, determine the width of the short RefPosition (see dumpRefPositionShort()).
// This is in the form:
// nnn.#mmm NAME TYPEld
// Where:
// nnn is the Location, right-justified to the width needed for the highest location.
// mmm is the RefPosition rpNum, left-justified to the width needed for the highest rpNum.
// NAME is dumped by dumpReferentName(), and is "regColumnWidth".
// TYPE is RefTypeNameShort, and is 4 characters
// l is either '*' (if a last use) or ' ' (otherwise)
// d is either 'D' (if a delayed use) or ' ' (otherwise)
maxNodeLocation = (maxNodeLocation == 0)
? 1
: maxNodeLocation; // corner case of a method with an infinite loop without any gentree nodes
assert(maxNodeLocation >= 1);
assert(refPositions.size() >= 1);
int nodeLocationWidth = (int)log10((double)maxNodeLocation) + 1;
int refPositionWidth = (int)log10((double)refPositions.size()) + 1;
int refTypeInfoWidth = 4 /*TYPE*/ + 2 /* last-use and delayed */ + 1 /* space */;
int locationAndRPNumWidth = nodeLocationWidth + 2 /* .# */ + refPositionWidth + 1 /* space */;
int shortRefPositionDumpWidth = locationAndRPNumWidth + regColumnWidth + 1 /* space */ + refTypeInfoWidth;
sprintf_s(shortRefPositionFormat, MAX_FORMAT_CHARS, "%%%dd.#%%-%dd ", nodeLocationWidth, refPositionWidth);
sprintf_s(emptyRefPositionFormat, MAX_FORMAT_CHARS, "%%-%ds", shortRefPositionDumpWidth);
// The width of the "allocation info"
// - a 8-character allocation decision
// - a space
// - a 4-character register
// - a space
int allocationInfoWidth = 8 + 1 + 4 + 1;
// Next, determine the width of the legend for each row. This includes:
// - a short RefPosition dump (shortRefPositionDumpWidth), which includes a space
// - the allocation info (allocationInfoWidth), which also includes a space
regTableIndent = shortRefPositionDumpWidth + allocationInfoWidth;
// BBnn printed left-justified in the NAME Typeld and allocationInfo space.
int bbNumWidth = (int)log10((double)compiler->fgBBNumMax) + 1;
// In the unlikely event that BB numbers overflow the space, we'll simply omit the predBB
int predBBNumDumpSpace = regTableIndent - locationAndRPNumWidth - bbNumWidth - 9; // 'BB' + ' PredBB'
if (predBBNumDumpSpace < bbNumWidth)
{
sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd", shortRefPositionDumpWidth - 2);
}
else
{
sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd PredBB%%-%dd", bbNumWidth, predBBNumDumpSpace);
}
if (compiler->shouldDumpASCIITrees())
{
columnSeparator = "|";
line = "-";
leftBox = "+";
middleBox = "+";
rightBox = "+";
}
else
{
columnSeparator = "\xe2\x94\x82";
line = "\xe2\x94\x80";
leftBox = "\xe2\x94\x9c";
middleBox = "\xe2\x94\xbc";
rightBox = "\xe2\x94\xa4";
}
sprintf_s(indentFormat, MAX_FORMAT_CHARS, "%%-%ds", regTableIndent);
// Now, set up the legend format for the RefPosition info
sprintf_s(legendFormat, MAX_LEGEND_FORMAT_CHARS, "%%-%d.%ds%%-%d.%ds%%-%ds%%s", nodeLocationWidth + 1,
nodeLocationWidth + 1, refPositionWidth + 2, refPositionWidth + 2, regColumnWidth + 1);
// Print a "title row" including the legend and the reg names.
lastDumpedRegisters = RBM_NONE;
dumpRegRecordTitleIfNeeded();
}
void LinearScan::dumpRegRecordTitleIfNeeded()
{
if ((lastDumpedRegisters != registersToDump) || (rowCountSinceLastTitle > MAX_ROWS_BETWEEN_TITLES))
{
lastUsedRegNumIndex = 0;
int lastRegNumIndex = compiler->compFloatingPointUsed ? REG_FP_LAST : REG_INT_LAST;
for (int regNumIndex = 0; regNumIndex <= lastRegNumIndex; regNumIndex++)
{
if ((registersToDump & genRegMask((regNumber)regNumIndex)) != 0)
{
lastUsedRegNumIndex = regNumIndex;
}
}
dumpRegRecordTitle();
lastDumpedRegisters = registersToDump;
}
}
void LinearScan::dumpRegRecordTitleLines()
{
for (int i = 0; i < regTableIndent; i++)
{
printf("%s", line);
}
for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
{
regNumber regNum = (regNumber)regNumIndex;
if (shouldDumpReg(regNum))
{
printf("%s", middleBox);
for (int i = 0; i < regColumnWidth; i++)
{
printf("%s", line);
}
}
}
printf("%s\n", rightBox);
}
void LinearScan::dumpRegRecordTitle()
{
dumpRegRecordTitleLines();
// Print out the legend for the RefPosition info
printf(legendFormat, "Loc ", "RP# ", "Name ", "Type Action Reg ");
// Print out the register name column headers
char columnFormatArray[MAX_FORMAT_CHARS];
sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%s%%-%d.%ds", columnSeparator, regColumnWidth, regColumnWidth);
for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
{
regNumber regNum = (regNumber)regNumIndex;
if (shouldDumpReg(regNum))
{
const char* regName = getRegName(regNum);
printf(columnFormatArray, regName);
}
}
printf("%s\n", columnSeparator);
rowCountSinceLastTitle = 0;
dumpRegRecordTitleLines();
}
void LinearScan::dumpRegRecords()
{
static char columnFormatArray[18];
for (regNumber regNum = REG_FIRST; regNum <= (regNumber)lastUsedRegNumIndex; regNum = REG_NEXT(regNum))
{
if (shouldDumpReg(regNum))
{
printf("%s", columnSeparator);
RegRecord& regRecord = physRegs[regNum];
Interval* interval = regRecord.assignedInterval;
if (interval != nullptr)
{
dumpIntervalName(interval);
char activeChar = interval->isActive ? 'a' : 'i';
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (interval->isPartiallySpilled)
{
activeChar = 'p';
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
printf("%c", activeChar);
}
else if ((genRegMask(regNum) & regsBusyUntilKill) != RBM_NONE)
{
printf(columnFormatArray, "Busy");
}
else
{
sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth);
printf(columnFormatArray, "");
}
}
}
printf("%s\n", columnSeparator);
rowCountSinceLastTitle++;
}
void LinearScan::dumpIntervalName(Interval* interval)
{
if (interval->isLocalVar)
{
printf(intervalNameFormat, 'V', interval->varNum);
}
else if (interval->IsUpperVector())
{
printf(intervalNameFormat, 'U', interval->relatedInterval->varNum);
}
else if (interval->isConstant)
{
printf(intervalNameFormat, 'C', interval->intervalIndex);
}
else
{
printf(intervalNameFormat, 'I', interval->intervalIndex);
}
}
void LinearScan::dumpEmptyRefPosition()
{
printf(emptyRefPositionFormat, "");
}
//------------------------------------------------------------------------
// dumpNewBlock: Dump a line for a new block in a column-based dump of the register state.
//
// Arguments:
// currentBlock - the new block to be dumped
//
void LinearScan::dumpNewBlock(BasicBlock* currentBlock, LsraLocation location)
{
if (!VERBOSE)
{
return;
}
// Always print a title row before a RefTypeBB (except for the first, because we
// will already have printed it before the parameters)
if ((currentBlock != compiler->fgFirstBB) && (currentBlock != nullptr))
{
dumpRegRecordTitle();
}
// If the activeRefPosition is a DummyDef, then don't print anything further (printing the
// title line makes it clearer that we're "about to" start the next block).
if (activeRefPosition->refType == RefTypeDummyDef)
{
dumpEmptyRefPosition();
printf("DDefs ");
printf(regNameFormat, "");
return;
}
printf(shortRefPositionFormat, location, activeRefPosition->rpNum);
if (currentBlock == nullptr)
{
printf(regNameFormat, "END");
printf(" ");
printf(regNameFormat, "");
}
else
{
printf(bbRefPosFormat, currentBlock->bbNum,
currentBlock == compiler->fgFirstBB ? 0 : blockInfo[currentBlock->bbNum].predBBNum);
}
}
// Note that the size of this dump is computed in dumpRegRecordHeader().
//
void LinearScan::dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock)
{
static RefPosition* lastPrintedRefPosition = nullptr;
if (refPosition == lastPrintedRefPosition)
{
dumpEmptyRefPosition();
return;
}
lastPrintedRefPosition = refPosition;
if (refPosition->refType == RefTypeBB)
{
dumpNewBlock(currentBlock, refPosition->nodeLocation);
return;
}
printf(shortRefPositionFormat, refPosition->nodeLocation, refPosition->rpNum);
if (refPosition->isIntervalRef())
{
Interval* interval = refPosition->getInterval();
dumpIntervalName(interval);
char lastUseChar = ' ';
char delayChar = ' ';
if (refPosition->lastUse)
{
lastUseChar = '*';
if (refPosition->delayRegFree)
{
delayChar = 'D';
}
}
printf(" %s%c%c ", getRefTypeShortName(refPosition->refType), lastUseChar, delayChar);
}
else if (refPosition->IsPhysRegRef())
{
RegRecord* regRecord = refPosition->getReg();
printf(regNameFormat, getRegName(regRecord->regNum));
printf(" %s ", getRefTypeShortName(refPosition->refType));
}
else
{
assert(refPosition->refType == RefTypeKillGCRefs);
// There's no interval or reg name associated with this.
printf(regNameFormat, " ");
printf(" %s ", getRefTypeShortName(refPosition->refType));
}
}
//------------------------------------------------------------------------
// LinearScan::IsResolutionMove:
// Returns true if the given node is a move inserted by LSRA
// resolution.
//
// Arguments:
// node - the node to check.
//
bool LinearScan::IsResolutionMove(GenTree* node)
{
if (!IsLsraAdded(node))
{
return false;
}
switch (node->OperGet())
{
case GT_LCL_VAR:
case GT_COPY:
return node->IsUnusedValue();
case GT_SWAP:
return true;
default:
return false;
}
}
//------------------------------------------------------------------------
// LinearScan::IsResolutionNode:
// Returns true if the given node is either a move inserted by LSRA
// resolution or an operand to such a move.
//
// Arguments:
// containingRange - the range that contains the node to check.
// node - the node to check.
//
bool LinearScan::IsResolutionNode(LIR::Range& containingRange, GenTree* node)
{
for (;;)
{
if (IsResolutionMove(node))
{
return true;
}
if (!IsLsraAdded(node) || (node->OperGet() != GT_LCL_VAR))
{
return false;
}
LIR::Use use;
bool foundUse = containingRange.TryGetUse(node, &use);
assert(foundUse);
node = use.User();
}
}
//------------------------------------------------------------------------
// verifyFinalAllocation: Traverse the RefPositions and verify various invariants.
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Notes:
// If verbose is set, this will also dump a table of the final allocations.
void LinearScan::verifyFinalAllocation()
{
if (VERBOSE)
{
printf("\nFinal allocation\n");
}
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
for (Interval& interval : intervals)
{
interval.assignedReg = nullptr;
interval.physReg = REG_NA;
}
DBEXEC(VERBOSE, dumpRegRecordTitle());
BasicBlock* currentBlock = nullptr;
GenTree* firstBlockEndResolutionNode = nullptr;
LsraLocation currentLocation = MinLocation;
for (RefPosition& refPosition : refPositions)
{
RefPosition* currentRefPosition = &refPosition;
Interval* interval = nullptr;
RegRecord* regRecord = nullptr;
regNumber regNum = REG_NA;
activeRefPosition = currentRefPosition;
if (currentRefPosition->refType != RefTypeBB)
{
if (currentRefPosition->IsPhysRegRef())
{
regRecord = currentRefPosition->getReg();
regRecord->recentRefPosition = currentRefPosition;
regNum = regRecord->regNum;
}
else if (currentRefPosition->isIntervalRef())
{
interval = currentRefPosition->getInterval();
interval->recentRefPosition = currentRefPosition;
if (currentRefPosition->registerAssignment != RBM_NONE)
{
if (!genMaxOneBit(currentRefPosition->registerAssignment))
{
assert(currentRefPosition->refType == RefTypeExpUse ||
currentRefPosition->refType == RefTypeDummyDef);
}
else
{
regNum = currentRefPosition->assignedReg();
regRecord = getRegisterRecord(regNum);
}
}
}
}
LsraLocation newLocation = currentRefPosition->nodeLocation;
currentLocation = newLocation;
switch (currentRefPosition->refType)
{
case RefTypeBB:
{
if (currentBlock == nullptr)
{
currentBlock = startBlockSequence();
}
else
{
// Verify the resolution moves at the end of the previous block.
for (GenTree* node = firstBlockEndResolutionNode; node != nullptr; node = node->gtNext)
{
assert(enregisterLocalVars);
// Only verify nodes that are actually moves; don't bother with the nodes that are
// operands to moves.
if (IsResolutionMove(node))
{
verifyResolutionMove(node, currentLocation);
}
}
// Validate the locations at the end of the previous block.
if (enregisterLocalVars)
{
VarToRegMap outVarToRegMap = outVarToRegMaps[currentBlock->bbNum];
VarSetOps::Iter iter(compiler, currentBlock->bbLiveOut);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (localVarIntervals[varIndex] == nullptr)
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
continue;
}
regNumber regNum = getVarReg(outVarToRegMap, varIndex);
interval = getIntervalForLocalVar(varIndex);
if (interval->physReg != regNum)
{
assert(regNum == REG_STK);
assert((interval->physReg == REG_NA) || interval->isWriteThru);
}
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
interval->isActive = false;
}
}
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
// Now, record the locations at the beginning of this block.
currentBlock = moveToNextBlock();
}
if (currentBlock != nullptr)
{
if (enregisterLocalVars)
{
VarToRegMap inVarToRegMap = inVarToRegMaps[currentBlock->bbNum];
VarSetOps::Iter iter(compiler, currentBlock->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (localVarIntervals[varIndex] == nullptr)
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
continue;
}
regNumber regNum = getVarReg(inVarToRegMap, varIndex);
interval = getIntervalForLocalVar(varIndex);
interval->physReg = regNum;
interval->assignedReg = &(physRegs[regNum]);
interval->isActive = true;
physRegs[regNum].assignedInterval = interval;
}
}
if (VERBOSE)
{
dumpRefPositionShort(currentRefPosition, currentBlock);
dumpRegRecords();
}
// Finally, handle the resolution moves, if any, at the beginning of the next block.
firstBlockEndResolutionNode = nullptr;
bool foundNonResolutionNode = false;
LIR::Range& currentBlockRange = LIR::AsRange(currentBlock);
for (GenTree* node : currentBlockRange)
{
if (IsResolutionNode(currentBlockRange, node))
{
assert(enregisterLocalVars);
if (foundNonResolutionNode)
{
firstBlockEndResolutionNode = node;
break;
}
else if (IsResolutionMove(node))
{
// Only verify nodes that are actually moves; don't bother with the nodes that are
// operands to moves.
verifyResolutionMove(node, currentLocation);
}
}
else
{
foundNonResolutionNode = true;
}
}
}
}
break;
case RefTypeKill:
assert(regRecord != nullptr);
assert(regRecord->assignedInterval == nullptr);
dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
break;
case RefTypeFixedReg:
assert(regRecord != nullptr);
dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
break;
case RefTypeUpperVectorSave:
dumpLsraAllocationEvent(LSRA_EVENT_UPPER_VECTOR_SAVE, nullptr, REG_NA, currentBlock);
break;
case RefTypeUpperVectorRestore:
dumpLsraAllocationEvent(LSRA_EVENT_UPPER_VECTOR_RESTORE, nullptr, REG_NA, currentBlock);
break;
case RefTypeDef:
case RefTypeUse:
case RefTypeParamDef:
case RefTypeZeroInit:
assert(interval != nullptr);
if (interval->isSpecialPutArg)
{
dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, interval, regNum);
break;
}
if (currentRefPosition->reload)
{
interval->isActive = true;
assert(regNum != REG_NA);
interval->physReg = regNum;
interval->assignedReg = regRecord;
regRecord->assignedInterval = interval;
dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, nullptr, regRecord->regNum, currentBlock);
}
if (regNum == REG_NA)
{
// If this interval is still assigned to a register
if (interval->physReg != REG_NA)
{
// then unassign it if no new register was assigned to the RefTypeDef
if (RefTypeIsDef(currentRefPosition->refType))
{
assert(interval->assignedReg != nullptr);
if (interval->assignedReg->assignedInterval == interval)
{
interval->assignedReg->assignedInterval = nullptr;
}
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
}
}
dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, interval);
}
else if (RefTypeIsDef(currentRefPosition->refType))
{
interval->isActive = true;
if (VERBOSE)
{
if (interval->isConstant && (currentRefPosition->treeNode != nullptr) &&
currentRefPosition->treeNode->IsReuseRegVal())
{
dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, nullptr, regRecord->regNum, currentBlock);
}
else
{
dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, nullptr, regRecord->regNum, currentBlock);
}
}
}
else if (currentRefPosition->copyReg)
{
dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, interval, regRecord->regNum, currentBlock);
}
else if (currentRefPosition->moveReg)
{
assert(interval->assignedReg != nullptr);
interval->assignedReg->assignedInterval = nullptr;
interval->physReg = regNum;
interval->assignedReg = regRecord;
regRecord->assignedInterval = interval;
if (VERBOSE)
{
dumpEmptyRefPosition();
printf("Move %-4s ", getRegName(regRecord->regNum));
}
}
else
{
dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
}
if (currentRefPosition->lastUse || (currentRefPosition->spillAfter && !currentRefPosition->writeThru))
{
interval->isActive = false;
}
if (regNum != REG_NA)
{
if (currentRefPosition->spillAfter)
{
if (VERBOSE)
{
// If refPos is marked as copyReg, then the reg that is spilled
// is the homeReg of the interval not the reg currently assigned
// to refPos.
regNumber spillReg = regNum;
if (currentRefPosition->copyReg)
{
assert(interval != nullptr);
spillReg = interval->physReg;
}
dumpRegRecords();
dumpEmptyRefPosition();
if (currentRefPosition->writeThru)
{
printf("WThru %-4s ", getRegName(spillReg));
}
else
{
printf("Spill %-4s ", getRegName(spillReg));
}
}
}
else if (currentRefPosition->copyReg)
{
regRecord->assignedInterval = interval;
}
else
{
if (RefTypeIsDef(currentRefPosition->refType))
{
// Interval was assigned to a different register.
// Clear the assigned interval of current register.
if (interval->physReg != REG_NA && interval->physReg != regNum)
{
interval->assignedReg->assignedInterval = nullptr;
}
}
interval->physReg = regNum;
interval->assignedReg = regRecord;
regRecord->assignedInterval = interval;
}
}
break;
case RefTypeKillGCRefs:
// No action to take.
// However, we will assert that, at resolution time, no registers contain GC refs.
{
DBEXEC(VERBOSE, printf(" "));
regMaskTP candidateRegs = currentRefPosition->registerAssignment;
while (candidateRegs != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
candidateRegs &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
RegRecord* regRecord = getRegisterRecord(nextReg);
Interval* assignedInterval = regRecord->assignedInterval;
assert(assignedInterval == nullptr || !varTypeIsGC(assignedInterval->registerType));
}
}
break;
case RefTypeExpUse:
case RefTypeDummyDef:
// Do nothing; these will be handled by the RefTypeBB.
DBEXEC(VERBOSE, dumpRefPositionShort(currentRefPosition, currentBlock));
DBEXEC(VERBOSE, printf(" "));
break;
case RefTypeInvalid:
// for these 'currentRefPosition->refType' values, No action to take
break;
}
if (currentRefPosition->refType != RefTypeBB)
{
DBEXEC(VERBOSE, dumpRegRecords());
if (interval != nullptr)
{
if (currentRefPosition->copyReg)
{
assert(interval->physReg != regNum);
regRecord->assignedInterval = nullptr;
assert(interval->assignedReg != nullptr);
regRecord = interval->assignedReg;
}
if (currentRefPosition->spillAfter || currentRefPosition->lastUse)
{
assert(!currentRefPosition->spillAfter || currentRefPosition->IsActualRef());
if (RefTypeIsDef(currentRefPosition->refType))
{
// If an interval got assigned to a different register (while the different
// register got spilled), then clear the assigned interval of current register.
if (interval->physReg != REG_NA && interval->physReg != regNum)
{
interval->assignedReg->assignedInterval = nullptr;
}
}
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
// regRegcord could be null if the RefPosition does not require a register.
if (regRecord != nullptr)
{
regRecord->assignedInterval = nullptr;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
else if (interval->isUpperVector && !currentRefPosition->RegOptional())
{
// These only require a register if they are not RegOptional, and their lclVar
// interval is living in a register and not already partially spilled.
if ((currentRefPosition->refType == RefTypeUpperVectorSave) ||
(currentRefPosition->refType == RefTypeUpperVectorRestore))
{
Interval* lclVarInterval = interval->relatedInterval;
assert((lclVarInterval->physReg == REG_NA) || lclVarInterval->isPartiallySpilled);
}
}
#endif
else
{
assert(currentRefPosition->RegOptional());
}
}
}
}
}
// Now, verify the resolution blocks.
// Currently these are nearly always at the end of the method, but that may not always be the case.
// So, we'll go through all the BBs looking for blocks whose bbNum is greater than bbNumMaxBeforeResolution.
for (BasicBlock* const currentBlock : compiler->Blocks())
{
if (currentBlock->bbNum > bbNumMaxBeforeResolution)
{
// If we haven't enregistered an lclVars, we have no resolution blocks.
assert(enregisterLocalVars);
if (VERBOSE)
{
dumpRegRecordTitle();
printf(shortRefPositionFormat, 0, 0);
assert(currentBlock->bbPreds != nullptr && currentBlock->bbPreds->getBlock() != nullptr);
printf(bbRefPosFormat, currentBlock->bbNum, currentBlock->bbPreds->getBlock()->bbNum);
dumpRegRecords();
}
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
// Set the incoming register assignments
VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum);
VarSetOps::Iter iter(compiler, currentBlock->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (localVarIntervals[varIndex] == nullptr)
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
continue;
}
regNumber regNum = getVarReg(inVarToRegMap, varIndex);
Interval* interval = getIntervalForLocalVar(varIndex);
interval->physReg = regNum;
interval->assignedReg = &(physRegs[regNum]);
interval->isActive = true;
physRegs[regNum].assignedInterval = interval;
}
// Verify the moves in this block
LIR::Range& currentBlockRange = LIR::AsRange(currentBlock);
for (GenTree* node : currentBlockRange)
{
assert(IsResolutionNode(currentBlockRange, node));
if (IsResolutionMove(node))
{
// Only verify nodes that are actually moves; don't bother with the nodes that are
// operands to moves.
verifyResolutionMove(node, currentLocation);
}
}
// Verify the outgoing register assignments
{
VarToRegMap outVarToRegMap = getOutVarToRegMap(currentBlock->bbNum);
VarSetOps::Iter iter(compiler, currentBlock->bbLiveOut);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (localVarIntervals[varIndex] == nullptr)
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
continue;
}
regNumber regNum = getVarReg(outVarToRegMap, varIndex);
Interval* interval = getIntervalForLocalVar(varIndex);
// Either the register assignments match, or the outgoing assignment is on the stack
// and this is a write-thru interval.
assert(interval->physReg == regNum || (interval->physReg == REG_NA && regNum == REG_STK) ||
(interval->isWriteThru && regNum == REG_STK));
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
interval->isActive = false;
}
}
}
}
DBEXEC(VERBOSE, printf("\n"));
}
//------------------------------------------------------------------------
// verifyResolutionMove: Verify a resolution statement. Called by verifyFinalAllocation()
//
// Arguments:
// resolutionMove - A GenTree* that must be a resolution move.
// currentLocation - The LsraLocation of the most recent RefPosition that has been verified.
//
// Return Value:
// None.
//
// Notes:
// If verbose is set, this will also dump the moves into the table of final allocations.
void LinearScan::verifyResolutionMove(GenTree* resolutionMove, LsraLocation currentLocation)
{
GenTree* dst = resolutionMove;
assert(IsResolutionMove(dst));
if (dst->OperGet() == GT_SWAP)
{
GenTreeLclVarCommon* left = dst->gtGetOp1()->AsLclVarCommon();
GenTreeLclVarCommon* right = dst->gtGetOp2()->AsLclVarCommon();
regNumber leftRegNum = left->GetRegNum();
regNumber rightRegNum = right->GetRegNum();
LclVarDsc* leftVarDsc = compiler->lvaGetDesc(left);
LclVarDsc* rightVarDsc = compiler->lvaGetDesc(right);
Interval* leftInterval = getIntervalForLocalVar(leftVarDsc->lvVarIndex);
Interval* rightInterval = getIntervalForLocalVar(rightVarDsc->lvVarIndex);
assert(leftInterval->physReg == leftRegNum && rightInterval->physReg == rightRegNum);
leftInterval->physReg = rightRegNum;
rightInterval->physReg = leftRegNum;
leftInterval->assignedReg = &physRegs[rightRegNum];
rightInterval->assignedReg = &physRegs[leftRegNum];
physRegs[rightRegNum].assignedInterval = leftInterval;
physRegs[leftRegNum].assignedInterval = rightInterval;
if (VERBOSE)
{
printf(shortRefPositionFormat, currentLocation, 0);
dumpIntervalName(leftInterval);
printf(" Swap ");
printf(" %-4s ", getRegName(rightRegNum));
dumpRegRecords();
printf(shortRefPositionFormat, currentLocation, 0);
dumpIntervalName(rightInterval);
printf(" \" ");
printf(" %-4s ", getRegName(leftRegNum));
dumpRegRecords();
}
return;
}
regNumber dstRegNum = dst->GetRegNum();
regNumber srcRegNum;
GenTreeLclVarCommon* lcl;
if (dst->OperGet() == GT_COPY)
{
lcl = dst->gtGetOp1()->AsLclVarCommon();
srcRegNum = lcl->GetRegNum();
}
else
{
lcl = dst->AsLclVarCommon();
if ((lcl->gtFlags & GTF_SPILLED) != 0)
{
srcRegNum = REG_STK;
}
else
{
assert((lcl->gtFlags & GTF_SPILL) != 0);
srcRegNum = dstRegNum;
dstRegNum = REG_STK;
}
}
Interval* interval = getIntervalForLocalVarNode(lcl);
assert(interval->physReg == srcRegNum || (srcRegNum == REG_STK && interval->physReg == REG_NA));
if (srcRegNum != REG_STK)
{
physRegs[srcRegNum].assignedInterval = nullptr;
}
if (dstRegNum != REG_STK)
{
interval->physReg = dstRegNum;
interval->assignedReg = &(physRegs[dstRegNum]);
physRegs[dstRegNum].assignedInterval = interval;
interval->isActive = true;
}
else
{
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
interval->isActive = false;
}
if (VERBOSE)
{
printf(shortRefPositionFormat, currentLocation, 0);
dumpIntervalName(interval);
printf(" Move ");
printf(" %-4s ", getRegName(dstRegNum));
dumpRegRecords();
}
}
#endif // DEBUG
LinearScan::RegisterSelection::RegisterSelection(LinearScan* linearScan)
{
this->linearScan = linearScan;
#ifdef DEBUG
mappingTable = new ScoreMappingTable(linearScan->compiler->getAllocator(CMK_LSRA));
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) \
mappingTable->Set(stat, &LinearScan::RegisterSelection::try_##stat);
#include "lsra_score.h"
#undef REG_SEL_DEF
LPCWSTR ordering = JitConfig.JitLsraOrdering();
if (ordering == nullptr)
{
ordering = W("ABCDEFGHIJKLMNOPQ");
}
for (int orderId = 0; orderId < REGSELECT_HEURISTIC_COUNT; orderId++)
{
// Make sure we do not set repeated entries
assert(RegSelectionOrder[orderId] == NONE);
switch (ordering[orderId])
{
#define REG_SEL_DEF(enum_name, value, shortname, orderSeqId) \
case orderSeqId: \
RegSelectionOrder[orderId] = enum_name; \
break;
#include "lsra_score.h"
#undef REG_SEL_DEF
default:
assert(!"Invalid lsraOrdering value.");
}
}
#endif // DEBUG
}
// ----------------------------------------------------------
// reset: Resets the values of all the fields used for register selection.
//
void LinearScan::RegisterSelection::reset(Interval* interval, RefPosition* refPos)
{
currentInterval = interval;
refPosition = refPos;
score = 0;
regType = linearScan->getRegisterType(currentInterval, refPosition);
currentLocation = refPosition->nodeLocation;
nextRefPos = refPosition->nextRefPosition;
candidates = refPosition->registerAssignment;
preferences = currentInterval->registerPreferences;
// This is not actually a preference, it's merely to track the lclVar that this
// "specialPutArg" is using.
relatedInterval = currentInterval->isSpecialPutArg ? nullptr : currentInterval->relatedInterval;
relatedPreferences = (relatedInterval == nullptr) ? RBM_NONE : relatedInterval->getCurrentPreferences();
rangeEndLocation = refPosition->getRangeEndLocation();
relatedLastLocation = rangeEndLocation;
preferCalleeSave = currentInterval->preferCalleeSave;
rangeEndRefPosition = nullptr;
lastRefPosition = currentInterval->lastRefPosition;
lastLocation = MinLocation;
prevRegRec = currentInterval->assignedReg;
// These are used in the post-selection updates, and must be set for any selection.
freeCandidates = RBM_NONE;
matchingConstants = RBM_NONE;
unassignedSet = RBM_NONE;
coversSet = RBM_NONE;
preferenceSet = RBM_NONE;
coversRelatedSet = RBM_NONE;
coversFullSet = RBM_NONE;
foundRegBit = REG_NA;
found = false;
skipAllocation = false;
coversSetsCalculated = false;
}
// ----------------------------------------------------------
// applySelection: Apply the heuristic to the candidates.
//
// Arguments:
// selectionScore: The score corresponding to the heuristics we apply.
// selectionCandidates: The possible candidates for the heuristic to apply.
//
// Return Values:
// 'true' if there was a single register candidate available after the heuristic is applied.
//
bool LinearScan::RegisterSelection::applySelection(int selectionScore, regMaskTP selectionCandidates)
{
regMaskTP newCandidates = candidates & selectionCandidates;
if (newCandidates != RBM_NONE)
{
score += selectionScore;
candidates = newCandidates;
return LinearScan::isSingleRegister(candidates);
}
return false;
}
// ----------------------------------------------------------
// applySingleRegSelection: Select a single register, if it is in the candidate set.
//
// Arguments:
// selectionScore: The score corresponding to the heuristics we apply.
// selectionCandidates: The possible candidates for the heuristic to apply.
//
// Return Values:
// 'true' if there was a single register candidate available after the heuristic is applied.
//
bool LinearScan::RegisterSelection::applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate)
{
assert(LinearScan::isSingleRegister(selectionCandidate));
regMaskTP newCandidates = candidates & selectionCandidate;
if (newCandidates != RBM_NONE)
{
candidates = newCandidates;
return true;
}
return false;
}
// ----------------------------------------------------------
// try_FREE: Apply the FREE heuristic.
//
void LinearScan::RegisterSelection::try_FREE()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
found = applySelection(FREE, freeCandidates);
}
// ----------------------------------------------------------
// try_CONST_AVAILABLE: Apply the CONST_AVAILABLE (matching constant) heuristic.
//
// Note: we always need to define the 'matchingConstants' set.
//
void LinearScan::RegisterSelection::try_CONST_AVAILABLE()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType))
{
found = applySelection(CONST_AVAILABLE, matchingConstants);
}
}
// ----------------------------------------------------------
// try_THIS_ASSIGNED: Apply the THIS_ASSIGNED heuristic.
//
void LinearScan::RegisterSelection::try_THIS_ASSIGNED()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
if (prevRegRec != nullptr)
{
found = applySelection(THIS_ASSIGNED, freeCandidates & preferences & prevRegBit);
}
}
// ----------------------------------------------------------
// try_COVERS: Apply the COVERS heuristic.
//
void LinearScan::RegisterSelection::try_COVERS()
{
assert(!found);
calculateCoversSets();
found = applySelection(COVERS, coversSet & preferenceSet);
}
// ----------------------------------------------------------
// try_OWN_PREFERENCE: Apply the OWN_PREFERENCE heuristic.
//
// Note: 'preferenceSet' already includes only freeCandidates.
//
void LinearScan::RegisterSelection::try_OWN_PREFERENCE()
{
assert(!found);
#ifdef DEBUG
calculateCoversSets();
#endif
found = applySelection(OWN_PREFERENCE, (preferenceSet & freeCandidates));
}
// ----------------------------------------------------------
// try_COVERS_RELATED: Apply the COVERS_RELATED heuristic.
//
void LinearScan::RegisterSelection::try_COVERS_RELATED()
{
assert(!found);
#ifdef DEBUG
calculateCoversSets();
#endif
found = applySelection(COVERS_RELATED, (coversRelatedSet & freeCandidates));
}
// ----------------------------------------------------------
// try_RELATED_PREFERENCE: Apply the RELATED_PREFERENCE heuristic.
//
void LinearScan::RegisterSelection::try_RELATED_PREFERENCE()
{
assert(!found);
found = applySelection(RELATED_PREFERENCE, relatedPreferences & freeCandidates);
}
// ----------------------------------------------------------
// try_CALLER_CALLEE: Apply the CALLER_CALLEE heuristic.
//
void LinearScan::RegisterSelection::try_CALLER_CALLEE()
{
assert(!found);
found = applySelection(CALLER_CALLEE, callerCalleePrefs & freeCandidates);
}
// ----------------------------------------------------------
// try_UNASSIGNED: Apply the UNASSIGNED heuristic.
//
void LinearScan::RegisterSelection::try_UNASSIGNED()
{
assert(!found);
#ifdef DEBUG
calculateCoversSets();
#endif
found = applySelection(UNASSIGNED, unassignedSet);
}
// ----------------------------------------------------------
// try_COVERS_FULL: Apply the COVERS_FULL heuristic.
//
void LinearScan::RegisterSelection::try_COVERS_FULL()
{
assert(!found);
#ifdef DEBUG
calculateCoversSets();
#endif
found = applySelection(COVERS_FULL, (coversFullSet & freeCandidates));
}
// ----------------------------------------------------------
// try_BEST_FIT: Apply the BEST_FIT heuristic.
//
void LinearScan::RegisterSelection::try_BEST_FIT()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
regMaskTP bestFitSet = RBM_NONE;
// If the best score includes COVERS_FULL, pick the one that's killed soonest.
// If none cover the full range, the BEST_FIT is the one that's killed later.
bool earliestIsBest = ((score & COVERS_FULL) != 0);
LsraLocation bestFitLocation = earliestIsBest ? MaxLocation : MinLocation;
for (regMaskTP bestFitCandidates = candidates; bestFitCandidates != RBM_NONE;)
{
regMaskTP bestFitCandidateBit = genFindLowestBit(bestFitCandidates);
bestFitCandidates &= ~bestFitCandidateBit;
regNumber bestFitCandidateRegNum = genRegNumFromMask(bestFitCandidateBit);
// Find the next RefPosition of the register.
LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(bestFitCandidateRegNum, regType);
LsraLocation nextPhysRefLocation = linearScan->getNextFixedRef(bestFitCandidateRegNum, regType);
nextPhysRefLocation = Min(nextPhysRefLocation, nextIntervalLocation);
// If the nextPhysRefLocation is a fixedRef for the rangeEndRefPosition, increment it so that
// we don't think it isn't covering the live range.
// This doesn't handle the case where earlier RefPositions for this Interval are also
// FixedRefs of this regNum, but at least those are only interesting in the case where those
// are "local last uses" of the Interval - otherwise the liveRange would interfere with the reg.
// TODO: This duplicates code in an earlier loop, and is basically here to duplicate previous
// behavior; see if we can avoid this.
if (nextPhysRefLocation == rangeEndLocation && rangeEndRefPosition->isFixedRefOfReg(bestFitCandidateRegNum))
{
INDEBUG(linearScan->dumpLsraAllocationEvent(LSRA_EVENT_INCREMENT_RANGE_END, currentInterval));
nextPhysRefLocation++;
}
if (nextPhysRefLocation == bestFitLocation)
{
bestFitSet |= bestFitCandidateBit;
}
else
{
bool isBetter = false;
if (nextPhysRefLocation > lastLocation)
{
// This covers the full range; favor it if the other doesn't, or if it's a closer match.
if ((bestFitLocation <= lastLocation) || (nextPhysRefLocation < bestFitLocation))
{
isBetter = true;
}
}
else
{
// This doesn't cover the full range; favor it if the other doesn't either, but this ends later.
if ((bestFitLocation <= lastLocation) && (nextPhysRefLocation > bestFitLocation))
{
isBetter = true;
}
}
if (isBetter)
{
bestFitSet = bestFitCandidateBit;
bestFitLocation = nextPhysRefLocation;
}
}
}
assert(bestFitSet != RBM_NONE);
found = applySelection(BEST_FIT, bestFitSet);
}
// ----------------------------------------------------------
// try_IS_PREV_REG: Apply the IS_PREV_REG heuristic.
//
// Note: Oddly, the previous heuristics only considered this if it covered the range.
// TODO: Check if Only applies if we have freeCandidates.
//
void LinearScan::RegisterSelection::try_IS_PREV_REG()
{
// TODO: We do not check found here.
if ((prevRegRec != nullptr) && ((score & COVERS_FULL) != 0))
{
found = applySingleRegSelection(IS_PREV_REG, prevRegBit);
}
}
// ----------------------------------------------------------
// try_REG_ORDER: Apply the REG_ORDER heuristic. Only applies if we have freeCandidates.
//
void LinearScan::RegisterSelection::try_REG_ORDER()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
// This will always result in a single candidate. That is, it is the tie-breaker
// for free candidates, and doesn't make sense as anything other than the last
// heuristic for free registers.
unsigned lowestRegOrder = UINT_MAX;
regMaskTP lowestRegOrderBit = RBM_NONE;
for (regMaskTP regOrderCandidates = candidates; regOrderCandidates != RBM_NONE;)
{
regMaskTP regOrderCandidateBit = genFindLowestBit(regOrderCandidates);
regOrderCandidates &= ~regOrderCandidateBit;
regNumber regOrderCandidateRegNum = genRegNumFromMask(regOrderCandidateBit);
unsigned thisRegOrder = linearScan->getRegisterRecord(regOrderCandidateRegNum)->regOrder;
if (thisRegOrder < lowestRegOrder)
{
lowestRegOrder = thisRegOrder;
lowestRegOrderBit = regOrderCandidateBit;
}
}
assert(lowestRegOrderBit != RBM_NONE);
found = applySingleRegSelection(REG_ORDER, lowestRegOrderBit);
}
// ----------------------------------------------------------
// try_SPILL_COST: Apply the SPILL_COST heuristic.
//
void LinearScan::RegisterSelection::try_SPILL_COST()
{
assert(!found);
// The set of registers with the lowest spill weight.
regMaskTP lowestCostSpillSet = RBM_NONE;
// Apply the SPILL_COST heuristic and eliminate regs that can't be spilled.
// The spill weight for 'refPosition' (the one we're allocating now).
weight_t thisSpillWeight = linearScan->getWeight(refPosition);
// The spill weight for the best candidate we've found so far.
weight_t bestSpillWeight = FloatingPointUtils::infinite_double();
// True if we found registers with lower spill weight than this refPosition.
bool foundLowerSpillWeight = false;
for (regMaskTP spillCandidates = candidates; spillCandidates != RBM_NONE;)
{
regMaskTP spillCandidateBit = genFindLowestBit(spillCandidates);
spillCandidates &= ~spillCandidateBit;
regNumber spillCandidateRegNum = genRegNumFromMask(spillCandidateBit);
RegRecord* spillCandidateRegRecord = &linearScan->physRegs[spillCandidateRegNum];
Interval* assignedInterval = spillCandidateRegRecord->assignedInterval;
// Can and should the interval in this register be spilled for this one,
// if we don't find a better alternative?
if ((linearScan->getNextIntervalRef(spillCandidateRegNum, regType) == currentLocation) &&
!assignedInterval->getNextRefPosition()->RegOptional())
{
continue;
}
if (!linearScan->isSpillCandidate(currentInterval, refPosition, spillCandidateRegRecord))
{
continue;
}
weight_t currentSpillWeight = 0;
RefPosition* recentRefPosition = assignedInterval != nullptr ? assignedInterval->recentRefPosition : nullptr;
if ((recentRefPosition != nullptr) &&
(recentRefPosition->RegOptional() && !(assignedInterval->isLocalVar && recentRefPosition->IsActualRef())))
{
// We do not "spillAfter" if previous (recent) refPosition was regOptional or if it
// is not an actual ref. In those cases, we will reload in future (next) refPosition.
// For such cases, consider the spill cost of next refposition.
// See notes in "spillInterval()".
RefPosition* reloadRefPosition = assignedInterval->getNextRefPosition();
if (reloadRefPosition != nullptr)
{
currentSpillWeight = linearScan->getWeight(reloadRefPosition);
}
}
// Only consider spillCost if we were not able to calculate weight of reloadRefPosition.
if (currentSpillWeight == 0)
{
currentSpillWeight = linearScan->spillCost[spillCandidateRegNum];
#ifdef TARGET_ARM
if (currentInterval->registerType == TYP_DOUBLE)
{
currentSpillWeight = max(currentSpillWeight, linearScan->spillCost[REG_NEXT(spillCandidateRegNum)]);
}
#endif
}
if (currentSpillWeight < bestSpillWeight)
{
bestSpillWeight = currentSpillWeight;
lowestCostSpillSet = spillCandidateBit;
}
else if (currentSpillWeight == bestSpillWeight)
{
lowestCostSpillSet |= spillCandidateBit;
}
}
if (lowestCostSpillSet == RBM_NONE)
{
return;
}
// We won't spill if this refPosition is RegOptional() and we have no candidates
// with a lower spill cost.
if ((bestSpillWeight >= thisSpillWeight) && refPosition->RegOptional())
{
currentInterval->assignedReg = nullptr;
skipAllocation = true;
found = true;
}
// We must have at least one with the lowest spill cost.
assert(lowestCostSpillSet != RBM_NONE);
found = applySelection(SPILL_COST, lowestCostSpillSet);
}
// ----------------------------------------------------------
// try_FAR_NEXT_REF: Apply the FAR_NEXT_REF heuristic.
//
void LinearScan::RegisterSelection::try_FAR_NEXT_REF()
{
assert(!found);
LsraLocation farthestLocation = MinLocation;
regMaskTP farthestSet = RBM_NONE;
for (regMaskTP farthestCandidates = candidates; farthestCandidates != RBM_NONE;)
{
regMaskTP farthestCandidateBit = genFindLowestBit(farthestCandidates);
farthestCandidates &= ~farthestCandidateBit;
regNumber farthestCandidateRegNum = genRegNumFromMask(farthestCandidateBit);
// Find the next RefPosition of the register.
LsraLocation nextIntervalLocation =
linearScan->getNextIntervalRef(farthestCandidateRegNum, currentInterval->registerType);
LsraLocation nextPhysRefLocation = Min(linearScan->nextFixedRef[farthestCandidateRegNum], nextIntervalLocation);
if (nextPhysRefLocation == farthestLocation)
{
farthestSet |= farthestCandidateBit;
}
else if (nextPhysRefLocation > farthestLocation)
{
farthestSet = farthestCandidateBit;
farthestLocation = nextPhysRefLocation;
}
}
// We must have at least one with the lowest spill cost.
assert(farthestSet != RBM_NONE);
found = applySelection(FAR_NEXT_REF, farthestSet);
}
// ----------------------------------------------------------
// try_PREV_REG_OPT: Apply the PREV_REG_OPT heuristic.
//
void LinearScan::RegisterSelection::try_PREV_REG_OPT()
{
assert(!found);
regMaskTP prevRegOptSet = RBM_NONE;
for (regMaskTP prevRegOptCandidates = candidates; prevRegOptCandidates != RBM_NONE;)
{
regMaskTP prevRegOptCandidateBit = genFindLowestBit(prevRegOptCandidates);
prevRegOptCandidates &= ~prevRegOptCandidateBit;
regNumber prevRegOptCandidateRegNum = genRegNumFromMask(prevRegOptCandidateBit);
Interval* assignedInterval = linearScan->physRegs[prevRegOptCandidateRegNum].assignedInterval;
bool foundPrevRegOptReg = true;
#ifdef DEBUG
bool hasAssignedInterval = false;
#endif
if ((assignedInterval != nullptr) && (assignedInterval->recentRefPosition != nullptr))
{
foundPrevRegOptReg &=
(assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional());
#ifdef DEBUG
hasAssignedInterval = true;
#endif
}
#ifndef TARGET_ARM
else
{
foundPrevRegOptReg = false;
}
#endif
#ifdef TARGET_ARM
// If current interval is TYP_DOUBLE, verify if the other half register matches the heuristics.
// We have three cases:
// 1. One of the register of the pair have an assigned interval: Check if that register's refPosition
// matches the heuristics. If yes, add it to the set.
// 2. Both registers of the pair have an assigned interval: Conservatively "and" conditions for
// heuristics of their corresponding refPositions. If both register's heuristic matches, add them
// to the set. TODO-CQ-ARM: We may implement a better condition later.
// 3. None of the register have an assigned interval: Skip adding register and assert.
if (currentInterval->registerType == TYP_DOUBLE)
{
regNumber anotherHalfRegNum = linearScan->findAnotherHalfRegNum(prevRegOptCandidateRegNum);
assignedInterval = linearScan->physRegs[anotherHalfRegNum].assignedInterval;
if ((assignedInterval != nullptr) && (assignedInterval->recentRefPosition != nullptr))
{
if (assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional())
{
foundPrevRegOptReg &= (assignedInterval->recentRefPosition->reload &&
assignedInterval->recentRefPosition->RegOptional());
}
#ifdef DEBUG
hasAssignedInterval = true;
#endif
}
}
#endif
if (foundPrevRegOptReg)
{
// TODO-Cleanup: Previously, we always used the highest regNum with a previous regOptional
// RefPosition, which is not really consistent with the way other selection criteria are
// applied. should probably be: prevRegOptSet |= prevRegOptCandidateBit;
prevRegOptSet = prevRegOptCandidateBit;
}
#ifdef DEBUG
// The assigned should be non-null, and should have a recentRefPosition, however since
// this is a heuristic, we don't want a fatal error, so we just assert (not noway_assert).
if (!hasAssignedInterval)
{
assert(!"Spill candidate has no assignedInterval recentRefPosition");
}
#endif
}
found = applySelection(PREV_REG_OPT, prevRegOptSet);
}
// ----------------------------------------------------------
// try_REG_NUM: Apply the REG_NUM heuristic.
//
void LinearScan::RegisterSelection::try_REG_NUM()
{
assert(!found);
found = applySingleRegSelection(REG_NUM, genFindLowestBit(candidates));
}
// ----------------------------------------------------------
// calculateCoversSets: Calculate the necessary covers set registers to be used
// for heuristics lke COVERS, COVERS_RELATED, COVERS_FULL.
//
void LinearScan::RegisterSelection::calculateCoversSets()
{
if (freeCandidates == RBM_NONE || coversSetsCalculated)
{
return;
}
preferenceSet = (candidates & preferences);
regMaskTP coversCandidates = (preferenceSet == RBM_NONE) ? candidates : preferenceSet;
for (; coversCandidates != RBM_NONE;)
{
regMaskTP coversCandidateBit = genFindLowestBit(coversCandidates);
coversCandidates &= ~coversCandidateBit;
regNumber coversCandidateRegNum = genRegNumFromMask(coversCandidateBit);
// If we have a single candidate we don't need to compute the preference-related sets, but we
// do need to compute the unassignedSet.
if (!found)
{
// Find the next RefPosition of the register.
LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(coversCandidateRegNum, regType);
LsraLocation nextPhysRefLocation = linearScan->getNextFixedRef(coversCandidateRegNum, regType);
LsraLocation coversCandidateLocation = Min(nextPhysRefLocation, nextIntervalLocation);
// If the nextPhysRefLocation is a fixedRef for the rangeEndRefPosition, increment it so that
// we don't think it isn't covering the live range.
// This doesn't handle the case where earlier RefPositions for this Interval are also
// FixedRefs of this regNum, but at least those are only interesting in the case where those
// are "local last uses" of the Interval - otherwise the liveRange would interfere with the reg.
if (coversCandidateLocation == rangeEndLocation &&
rangeEndRefPosition->isFixedRefOfReg(coversCandidateRegNum))
{
INDEBUG(linearScan->dumpLsraAllocationEvent(LSRA_EVENT_INCREMENT_RANGE_END, currentInterval));
coversCandidateLocation++;
}
if (coversCandidateLocation > rangeEndLocation)
{
coversSet |= coversCandidateBit;
}
if ((coversCandidateBit & relatedPreferences) != RBM_NONE)
{
if (coversCandidateLocation > relatedLastLocation)
{
coversRelatedSet |= coversCandidateBit;
}
}
else if (coversCandidateBit == refPosition->registerAssignment)
{
// If we had a fixed-reg def of a reg that will be killed before the use, prefer it to any other
// registers with the same score. (Note that we haven't changed the original registerAssignment
// on the RefPosition).
// Overload the RELATED_PREFERENCE value.
// TODO-CQ: Consider if this should be split out.
coversRelatedSet |= coversCandidateBit;
}
// Does this cover the full range of the interval?
if (coversCandidateLocation > lastLocation)
{
coversFullSet |= coversCandidateBit;
}
}
// The register is considered unassigned if it has no assignedInterval, OR
// if its next reference is beyond the range of this interval.
if (linearScan->nextIntervalRef[coversCandidateRegNum] > lastLocation)
{
unassignedSet |= coversCandidateBit;
}
}
coversSetsCalculated = true;
}
// ----------------------------------------------------------
// select: For given `currentInterval` and `refPosition`, selects a register to be assigned.
//
// Arguments:
// currentInterval - Current interval for which register needs to be selected.
// refPosition - Refposition within the interval for which register needs to be selected.
//
// Return Values:
// Register bit selected (a single register) and REG_NA if no register was selected.
//
regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval,
RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore))
{
#ifdef DEBUG
*registerScore = NONE;
#endif
reset(currentInterval, refPosition);
// process data-structures
if (RefTypeIsDef(refPosition->refType))
{
if (currentInterval->hasConflictingDefUse)
{
linearScan->resolveConflictingDefAndUse(currentInterval, refPosition);
candidates = refPosition->registerAssignment;
}
// Otherwise, check for the case of a fixed-reg def of a reg that will be killed before the
// use, or interferes at the point of use (which shouldn't happen, but Lower doesn't mark
// the contained nodes as interfering).
// Note that we may have a ParamDef RefPosition that is marked isFixedRegRef, but which
// has had its registerAssignment changed to no longer be a single register.
else if (refPosition->isFixedRegRef && nextRefPos != nullptr && RefTypeIsUse(nextRefPos->refType) &&
!nextRefPos->isFixedRegRef && genMaxOneBit(refPosition->registerAssignment))
{
regNumber defReg = refPosition->assignedReg();
RegRecord* defRegRecord = linearScan->getRegisterRecord(defReg);
RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition;
assert(currFixedRegRefPosition != nullptr &&
currFixedRegRefPosition->nodeLocation == refPosition->nodeLocation);
// If there is another fixed reference to this register before the use, change the candidates
// on this RefPosition to include that of nextRefPos.
RefPosition* nextFixedRegRefPosition = defRegRecord->getNextRefPosition();
if (nextFixedRegRefPosition != nullptr &&
nextFixedRegRefPosition->nodeLocation <= nextRefPos->getRefEndLocation())
{
candidates |= nextRefPos->registerAssignment;
if (preferences == refPosition->registerAssignment)
{
preferences = candidates;
}
}
}
}
preferences &= candidates;
if (preferences == RBM_NONE)
{
preferences = candidates;
}
#ifdef DEBUG
candidates = linearScan->stressLimitRegs(refPosition, candidates);
#endif
assert(candidates != RBM_NONE);
Interval* nextRelatedInterval = relatedInterval;
Interval* finalRelatedInterval = relatedInterval;
Interval* rangeEndInterval = relatedInterval;
bool avoidByteRegs = false;
#ifdef TARGET_X86
if ((relatedPreferences & ~RBM_BYTE_REGS) != RBM_NONE)
{
avoidByteRegs = true;
}
#endif
// Follow the chain of related intervals, as long as:
// - The next reference is a def. We don't want to use the relatedInterval for preferencing if its next reference
// is not a new definition (as it either is or will become live).
// - The next (def) reference is downstream. Otherwise we could iterate indefinitely because the preferences can be
// circular.
// - The intersection of preferenced registers is non-empty.
//
while (nextRelatedInterval != nullptr)
{
RefPosition* nextRelatedRefPosition = nextRelatedInterval->getNextRefPosition();
// Only use the relatedInterval for preferencing if the related interval's next reference
// is a new definition.
if ((nextRelatedRefPosition != nullptr) && RefTypeIsDef(nextRelatedRefPosition->refType))
{
finalRelatedInterval = nextRelatedInterval;
nextRelatedInterval = nullptr;
// First, get the preferences for this interval
regMaskTP thisRelatedPreferences = finalRelatedInterval->getCurrentPreferences();
// Now, determine if they are compatible and update the relatedPreferences that we'll consider.
regMaskTP newRelatedPreferences = thisRelatedPreferences & relatedPreferences;
if (newRelatedPreferences != RBM_NONE && (!avoidByteRegs || thisRelatedPreferences != RBM_BYTE_REGS))
{
// TODO-CQ: The following isFree() check doesn't account for the possibility that there's an
// assignedInterval whose recentRefPosition was delayFree. It also fails to account for
// the TYP_DOUBLE case on ARM. It would be better to replace the call to isFree with
// isRegAvailable(genRegNumFromMask(newRelatedPreferences), regType)), but this is retained
// to achieve zero diffs.
//
bool thisIsSingleReg = isSingleRegister(newRelatedPreferences);
if (!thisIsSingleReg ||
(finalRelatedInterval->isLocalVar &&
linearScan->isFree(linearScan->getRegisterRecord(genRegNumFromMask(newRelatedPreferences)))))
{
relatedPreferences = newRelatedPreferences;
// If this Interval has a downstream def without a single-register preference, continue to iterate.
if (nextRelatedRefPosition->nodeLocation > rangeEndLocation)
{
preferCalleeSave = (preferCalleeSave || finalRelatedInterval->preferCalleeSave);
rangeEndLocation = nextRelatedRefPosition->getRangeEndLocation();
rangeEndInterval = finalRelatedInterval;
nextRelatedInterval = finalRelatedInterval->relatedInterval;
}
}
}
}
else
{
if (nextRelatedInterval == relatedInterval)
{
relatedInterval = nullptr;
relatedPreferences = RBM_NONE;
}
nextRelatedInterval = nullptr;
}
}
// For floating point, we want to be less aggressive about using callee-save registers.
// So in that case, we just need to ensure that the current RefPosition is covered.
if (useFloatReg(currentInterval->registerType))
{
rangeEndRefPosition = refPosition;
preferCalleeSave = currentInterval->preferCalleeSave;
}
else if (currentInterval->isWriteThru && refPosition->spillAfter)
{
// This is treated as a last use of the register, as there is an upcoming EH boundary.
rangeEndRefPosition = refPosition;
}
else
{
rangeEndRefPosition = refPosition->getRangeEndRef();
// If we have a chain of related intervals, and a finalRelatedInterval that
// is not currently occupying a register, and whose lifetime begins after this one,
// we want to try to select a register that will cover its lifetime.
if ((rangeEndInterval != nullptr) && (rangeEndInterval->assignedReg == nullptr) &&
!rangeEndInterval->isWriteThru &&
(rangeEndInterval->getNextRefLocation() >= rangeEndRefPosition->nodeLocation))
{
lastRefPosition = rangeEndInterval->lastRefPosition;
}
}
if ((relatedInterval != nullptr) && !relatedInterval->isWriteThru)
{
relatedLastLocation = relatedInterval->lastRefPosition->nodeLocation;
}
if (preferCalleeSave)
{
regMaskTP calleeSaveCandidates = calleeSaveRegs(currentInterval->registerType);
if (currentInterval->isWriteThru)
{
// We'll only prefer a callee-save register if it's already been used.
regMaskTP unusedCalleeSaves =
calleeSaveCandidates & ~(linearScan->compiler->codeGen->regSet.rsGetModifiedRegsMask());
callerCalleePrefs = calleeSaveCandidates & ~unusedCalleeSaves;
preferences &= ~unusedCalleeSaves;
}
else
{
callerCalleePrefs = calleeSaveCandidates;
}
}
else
{
callerCalleePrefs = callerSaveRegs(currentInterval->registerType);
}
// If this has a delayed use (due to being used in a rmw position of a
// non-commutative operator), its endLocation is delayed until the "def"
// position, which is one location past the use (getRefEndLocation() takes care of this).
rangeEndLocation = rangeEndRefPosition->getRefEndLocation();
lastLocation = lastRefPosition->getRefEndLocation();
// We'll set this to short-circuit remaining heuristics when we have a single candidate.
found = false;
// Is this a fixedReg?
regMaskTP fixedRegMask = RBM_NONE;
if (refPosition->isFixedRegRef)
{
assert(genMaxOneBit(refPosition->registerAssignment));
fixedRegMask = refPosition->registerAssignment;
if (candidates == refPosition->registerAssignment)
{
found = true;
if (linearScan->nextIntervalRef[genRegNumFromMask(candidates)] > lastLocation)
{
unassignedSet = candidates;
}
}
}
// Eliminate candidates that are in-use or busy.
if (!found)
{
regMaskTP busyRegs = linearScan->regsBusyUntilKill | linearScan->regsInUseThisLocation;
candidates &= ~busyRegs;
// Also eliminate as busy any register with a conflicting fixed reference at this or
// the next location.
// Note that this will eliminate the fixedReg, if any, but we'll add it back below.
regMaskTP checkConflictMask = candidates & linearScan->fixedRegs;
while (checkConflictMask != RBM_NONE)
{
regMaskTP checkConflictBit = genFindLowestBit(checkConflictMask);
checkConflictMask &= ~checkConflictBit;
regNumber checkConflictReg = genRegNumFromMask(checkConflictBit);
LsraLocation checkConflictLocation = linearScan->nextFixedRef[checkConflictReg];
if ((checkConflictLocation == currentLocation) ||
(refPosition->delayRegFree && (checkConflictLocation == (currentLocation + 1))))
{
candidates &= ~checkConflictBit;
}
}
candidates |= fixedRegMask;
found = isSingleRegister(candidates);
}
// By chance, is prevRegRec already holding this interval, as a copyReg or having
// been restored as inactive after a kill?
// NOTE: this is not currently considered one of the selection criteria - it always wins
// if it is the assignedInterval of 'prevRegRec'.
if (!found && (prevRegRec != nullptr))
{
prevRegBit = genRegMask(prevRegRec->regNum);
if ((prevRegRec->assignedInterval == currentInterval) && ((candidates & prevRegBit) != RBM_NONE))
{
candidates = prevRegBit;
found = true;
#ifdef DEBUG
*registerScore = THIS_ASSIGNED;
#endif
}
}
else
{
prevRegBit = RBM_NONE;
}
if (!found && (candidates == RBM_NONE))
{
assert(refPosition->RegOptional());
currentInterval->assignedReg = nullptr;
return RBM_NONE;
}
// TODO-Cleanup: Previously, the "reverseSelect" stress mode reversed the order of the heuristics.
// It needs to be re-engineered with this refactoring.
// In non-debug builds, this will simply get optimized away
bool reverseSelect = false;
#ifdef DEBUG
reverseSelect = linearScan->doReverseSelect();
#endif // DEBUG
freeCandidates = linearScan->getFreeCandidates(candidates, regType);
// If no free candidates, then double check if refPosition is an actual ref.
if (freeCandidates == RBM_NONE)
{
// We won't spill if this refPosition is not an actual ref.
if (!refPosition->IsActualRef())
{
currentInterval->assignedReg = nullptr;
return RBM_NONE;
}
}
else
{
// Set the 'matchingConstants' set.
if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType))
{
matchingConstants = linearScan->getMatchingConstants(candidates, currentInterval, refPosition);
}
}
#define IF_FOUND_GOTO_DONE \
if (found) \
goto Selection_Done;
#ifdef DEBUG
HeuristicFn fn;
for (int orderId = 0; orderId < REGSELECT_HEURISTIC_COUNT; orderId++)
{
IF_FOUND_GOTO_DONE
RegisterScore heuristicToApply = RegSelectionOrder[orderId];
if (mappingTable->Lookup(heuristicToApply, &fn))
{
(this->*fn)();
if (found)
{
*registerScore = heuristicToApply;
}
#if TRACK_LSRA_STATS
INTRACK_STATS_IF(found, linearScan->updateLsraStat(linearScan->getLsraStatFromScore(heuristicToApply),
refPosition->bbNum));
#endif // TRACK_LSRA_STATS
}
else
{
assert(!"Unexpected heuristic value!");
}
}
#else // RELEASE
// In release, just invoke the default order
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) \
try_##stat(); \
IF_FOUND_GOTO_DONE
#include "lsra_score.h"
#undef REG_SEL_DEF
#endif // DEBUG
#undef IF_FOUND_GOTO_DONE
Selection_Done:
if (skipAllocation)
{
return RBM_NONE;
}
calculateCoversSets();
assert(found && isSingleRegister(candidates));
foundRegBit = candidates;
return candidates;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
Linear Scan Register Allocation
a.k.a. LSRA
Preconditions
- All register requirements are expressed in the code stream, either as destination
registers of tree nodes, or as internal registers. These requirements are
expressed in the RefPositions built for each node by BuildNode(), which includes:
- The register uses and definitions.
- The register restrictions (candidates) of the target register, both from itself,
as producer of the value (dstCandidates), and from its consuming node (srcCandidates).
Note that when we talk about srcCandidates we are referring to the destination register
(not any of its sources).
- The number (internalCount) of registers required, and their register restrictions (internalCandidates).
These are neither inputs nor outputs of the node, but used in the sequence of code generated for the tree.
"Internal registers" are registers used during the code sequence generated for the node.
The register lifetimes must obey the following lifetime model:
- First, any internal registers are defined.
- Next, any source registers are used (and are then freed if they are last use and are not identified as
"delayRegFree").
- Next, the internal registers are used (and are then freed).
- Next, any registers in the kill set for the instruction are killed.
- Next, the destination register(s) are defined (multiple destination registers are only supported on ARM)
- Finally, any "delayRegFree" source registers are freed.
There are several things to note about this order:
- The internal registers will never overlap any use, but they may overlap a destination register.
- Internal registers are never live beyond the node.
- The "delayRegFree" annotation is used for instructions that are only available in a Read-Modify-Write form.
That is, the destination register is one of the sources. In this case, we must not use the same register for
the non-RMW operand as for the destination.
Overview (doLinearScan):
- Walk all blocks, building intervals and RefPositions (buildIntervals)
- Allocate registers (allocateRegisters)
- Annotate nodes with register assignments (resolveRegisters)
- Add move nodes as needed to resolve conflicting register
assignments across non-adjacent edges. (resolveEdges, called from resolveRegisters)
Postconditions:
Tree nodes (GenTree):
- GenTree::GetRegNum() (and gtRegPair for ARM) is annotated with the register
assignment for a node. If the node does not require a register, it is
annotated as such (GetRegNum() = REG_NA). For a variable definition or interior
tree node (an "implicit" definition), this is the register to put the result.
For an expression use, this is the place to find the value that has previously
been computed.
- In most cases, this register must satisfy the constraints specified for the RefPosition.
- In some cases, this is difficult:
- If a lclVar node currently lives in some register, it may not be desirable to move it
(i.e. its current location may be desirable for future uses, e.g. if it's a callee save register,
but needs to be in a specific arg register for a call).
- In other cases there may be conflicts on the restrictions placed by the defining node and the node which
consumes it
- If such a node is constrained to a single fixed register (e.g. an arg register, or a return from a call),
then LSRA is free to annotate the node with a different register. The code generator must issue the appropriate
move.
- However, if such a node is constrained to a set of registers, and its current location does not satisfy that
requirement, LSRA must insert a GT_COPY node between the node and its parent. The GetRegNum() on the GT_COPY
node must satisfy the register requirement of the parent.
- GenTree::gtRsvdRegs has a set of registers used for internal temps.
- A tree node is marked GTF_SPILL if the tree node must be spilled by the code generator after it has been
evaluated.
- LSRA currently does not set GTF_SPILLED on such nodes, because it caused problems in the old code generator.
In the new backend perhaps this should change (see also the note below under CodeGen).
- A tree node is marked GTF_SPILLED if it is a lclVar that must be reloaded prior to use.
- The register (GetRegNum()) on the node indicates the register to which it must be reloaded.
- For lclVar nodes, since the uses and defs are distinct tree nodes, it is always possible to annotate the node
with the register to which the variable must be reloaded.
- For other nodes, since they represent both the def and use, if the value must be reloaded to a different
register, LSRA must insert a GT_RELOAD node in order to specify the register to which it should be reloaded.
Local variable table (LclVarDsc):
- LclVarDsc::lvRegister is set to true if a local variable has the
same register assignment for its entire lifetime.
- LclVarDsc::lvRegNum / GetOtherReg(): these are initialized to their
first value at the end of LSRA (it looks like GetOtherReg() isn't?
This is probably a bug (ARM)). Codegen will set them to their current value
as it processes the trees, since a variable can (now) be assigned different
registers over its lifetimes.
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "lsra.h"
#ifdef DEBUG
const char* LinearScan::resolveTypeName[] = {"Split", "Join", "Critical", "SharedCritical"};
#endif // DEBUG
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Small Helper functions XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//--------------------------------------------------------------
// lsraAssignRegToTree: Assign the given reg to tree node.
//
// Arguments:
// tree - Gentree node
// reg - register to be assigned
// regIdx - register idx, if tree is a multi-reg call node.
// regIdx will be zero for single-reg result producing tree nodes.
//
// Return Value:
// None
//
void lsraAssignRegToTree(GenTree* tree, regNumber reg, unsigned regIdx)
{
if (regIdx == 0)
{
tree->SetRegNum(reg);
}
#if !defined(TARGET_64BIT)
else if (tree->OperIsMultiRegOp())
{
assert(regIdx == 1);
GenTreeMultiRegOp* mul = tree->AsMultiRegOp();
mul->gtOtherReg = reg;
}
#endif // TARGET_64BIT
#if FEATURE_MULTIREG_RET
else if (tree->OperGet() == GT_COPY)
{
assert(regIdx == 1);
GenTreeCopyOrReload* copy = tree->AsCopyOrReload();
copy->gtOtherRegs[0] = (regNumberSmall)reg;
}
#endif // FEATURE_MULTIREG_RET
#if FEATURE_ARG_SPLIT
else if (tree->OperIsPutArgSplit())
{
GenTreePutArgSplit* putArg = tree->AsPutArgSplit();
putArg->SetRegNumByIdx(reg, regIdx);
}
#endif // FEATURE_ARG_SPLIT
#ifdef FEATURE_HW_INTRINSICS
else if (tree->OperIs(GT_HWINTRINSIC))
{
assert(regIdx == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
tree->AsHWIntrinsic()->SetOtherReg(reg);
}
#endif // FEATURE_HW_INTRINSICS
else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
tree->AsLclVar()->SetRegNumByIdx(reg, regIdx);
}
else
{
assert(tree->IsMultiRegCall());
GenTreeCall* call = tree->AsCall();
call->SetRegNumByIdx(reg, regIdx);
}
}
//-------------------------------------------------------------
// getWeight: Returns the weight of the RefPosition.
//
// Arguments:
// refPos - ref position
//
// Returns:
// Weight of ref position.
weight_t LinearScan::getWeight(RefPosition* refPos)
{
weight_t weight;
GenTree* treeNode = refPos->treeNode;
if (treeNode != nullptr)
{
if (isCandidateLocalRef(treeNode))
{
// Tracked locals: use weighted ref cnt as the weight of the
// ref position.
const LclVarDsc* varDsc = compiler->lvaGetDesc(treeNode->AsLclVarCommon());
weight = varDsc->lvRefCntWtd();
if (refPos->getInterval()->isSpilled)
{
// Decrease the weight if the interval has already been spilled.
if (varDsc->lvLiveInOutOfHndlr || refPos->getInterval()->firstRefPosition->singleDefSpill)
{
// An EH-var/single-def is always spilled at defs, and we'll decrease the weight by half,
// since only the reload is needed.
weight = weight / 2;
}
else
{
weight -= BB_UNITY_WEIGHT;
}
}
}
else
{
// Non-candidate local ref or non-lcl tree node.
// These are considered to have two references in the basic block:
// a def and a use and hence weighted ref count would be 2 times
// the basic block weight in which they appear.
// However, it is generally more harmful to spill tree temps, so we
// double that.
const unsigned TREE_TEMP_REF_COUNT = 2;
const unsigned TREE_TEMP_BOOST_FACTOR = 2;
weight = TREE_TEMP_REF_COUNT * TREE_TEMP_BOOST_FACTOR * blockInfo[refPos->bbNum].weight;
}
}
else
{
// Non-tree node ref positions. These will have a single
// reference in the basic block and hence their weighted
// refcount is equal to the block weight in which they
// appear.
weight = blockInfo[refPos->bbNum].weight;
}
return weight;
}
// allRegs represents a set of registers that can
// be used to allocate the specified type in any point
// in time (more of a 'bank' of registers).
regMaskTP LinearScan::allRegs(RegisterType rt)
{
assert((rt != TYP_UNDEF) && (rt != TYP_STRUCT));
if (rt == TYP_FLOAT)
{
return availableFloatRegs;
}
else if (rt == TYP_DOUBLE)
{
return availableDoubleRegs;
}
#ifdef FEATURE_SIMD
// TODO-Cleanup: Add an RBM_ALLSIMD
else if (varTypeIsSIMD(rt))
{
return availableDoubleRegs;
}
#endif // FEATURE_SIMD
else
{
return availableIntRegs;
}
}
regMaskTP LinearScan::allByteRegs()
{
#ifdef TARGET_X86
return availableIntRegs & RBM_BYTE_REGS;
#else
return availableIntRegs;
#endif
}
regMaskTP LinearScan::allSIMDRegs()
{
return availableFloatRegs;
}
void LinearScan::updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition)
{
LsraLocation nextLocation;
if (nextRefPosition == nullptr)
{
nextLocation = MaxLocation;
fixedRegs &= ~genRegMask(regRecord->regNum);
}
else
{
nextLocation = nextRefPosition->nodeLocation;
fixedRegs |= genRegMask(regRecord->regNum);
}
nextFixedRef[regRecord->regNum] = nextLocation;
}
regMaskTP LinearScan::getMatchingConstants(regMaskTP mask, Interval* currentInterval, RefPosition* refPosition)
{
assert(currentInterval->isConstant && RefTypeIsDef(refPosition->refType));
regMaskTP candidates = (mask & m_RegistersWithConstants);
regMaskTP result = RBM_NONE;
while (candidates != RBM_NONE)
{
regMaskTP candidateBit = genFindLowestBit(candidates);
candidates &= ~candidateBit;
regNumber regNum = genRegNumFromMask(candidateBit);
RegRecord* physRegRecord = getRegisterRecord(regNum);
if (isMatchingConstant(physRegRecord, refPosition))
{
result |= candidateBit;
}
}
return result;
}
void LinearScan::clearNextIntervalRef(regNumber reg, var_types regType)
{
nextIntervalRef[reg] = MaxLocation;
#ifdef TARGET_ARM
if (regType == TYP_DOUBLE)
{
assert(genIsValidDoubleReg(reg));
regNumber otherReg = REG_NEXT(reg);
nextIntervalRef[otherReg] = MaxLocation;
}
#endif
}
void LinearScan::clearSpillCost(regNumber reg, var_types regType)
{
spillCost[reg] = 0;
#ifdef TARGET_ARM
if (regType == TYP_DOUBLE)
{
assert(genIsValidDoubleReg(reg));
regNumber otherReg = REG_NEXT(reg);
spillCost[otherReg] = 0;
}
#endif
}
void LinearScan::updateNextIntervalRef(regNumber reg, Interval* interval)
{
LsraLocation nextRefLocation = interval->getNextRefLocation();
nextIntervalRef[reg] = nextRefLocation;
#ifdef TARGET_ARM
if (interval->registerType == TYP_DOUBLE)
{
regNumber otherReg = REG_NEXT(reg);
nextIntervalRef[otherReg] = nextRefLocation;
}
#endif
}
void LinearScan::updateSpillCost(regNumber reg, Interval* interval)
{
// An interval can have no recentRefPosition if this is the initial assignment
// of a parameter to its home register.
weight_t cost = (interval->recentRefPosition != nullptr) ? getWeight(interval->recentRefPosition) : 0;
spillCost[reg] = cost;
#ifdef TARGET_ARM
if (interval->registerType == TYP_DOUBLE)
{
regNumber otherReg = REG_NEXT(reg);
spillCost[otherReg] = cost;
}
#endif
}
//------------------------------------------------------------------------
// internalFloatRegCandidates: Return the set of registers that are appropriate
// for use as internal float registers.
//
// Return Value:
// The set of registers (as a regMaskTP).
//
// Notes:
// compFloatingPointUsed is only required to be set if it is possible that we
// will use floating point callee-save registers.
// It is unlikely, if an internal register is the only use of floating point,
// that it will select a callee-save register. But to be safe, we restrict
// the set of candidates if compFloatingPointUsed is not already set.
regMaskTP LinearScan::internalFloatRegCandidates()
{
if (compiler->compFloatingPointUsed)
{
return allRegs(TYP_FLOAT);
}
else
{
return RBM_FLT_CALLEE_TRASH;
}
}
bool LinearScan::isFree(RegRecord* regRecord)
{
return ((regRecord->assignedInterval == nullptr || !regRecord->assignedInterval->isActive) &&
!isRegBusy(regRecord->regNum, regRecord->registerType));
}
RegRecord* LinearScan::getRegisterRecord(regNumber regNum)
{
assert((unsigned)regNum < ArrLen(physRegs));
return &physRegs[regNum];
}
#ifdef DEBUG
//----------------------------------------------------------------------------
// getConstrainedRegMask: Returns new regMask which is the intersection of
// regMaskActual and regMaskConstraint if the new regMask has at least
// minRegCount registers, otherwise returns regMaskActual.
//
// Arguments:
// regMaskActual - regMask that needs to be constrained
// regMaskConstraint - regMask constraint that needs to be
// applied to regMaskActual
// minRegCount - Minimum number of regs that should be
// be present in new regMask.
//
// Return Value:
// New regMask that has minRegCount registers after instersection.
// Otherwise returns regMaskActual.
regMaskTP LinearScan::getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstraint, unsigned minRegCount)
{
regMaskTP newMask = regMaskActual & regMaskConstraint;
if (genCountBits(newMask) >= minRegCount)
{
return newMask;
}
return regMaskActual;
}
//------------------------------------------------------------------------
// stressLimitRegs: Given a set of registers, expressed as a register mask, reduce
// them based on the current stress options.
//
// Arguments:
// mask - The current mask of register candidates for a node
//
// Return Value:
// A possibly-modified mask, based on the value of COMPlus_JitStressRegs.
//
// Notes:
// This is the method used to implement the stress options that limit
// the set of registers considered for allocation.
regMaskTP LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask)
{
if (getStressLimitRegs() != LSRA_LIMIT_NONE)
{
// The refPosition could be null, for example when called
// by getTempRegForResolution().
int minRegCount = (refPosition != nullptr) ? refPosition->minRegCandidateCount : 1;
switch (getStressLimitRegs())
{
case LSRA_LIMIT_CALLEE:
if (!compiler->opts.compDbgEnC)
{
mask = getConstrainedRegMask(mask, RBM_CALLEE_SAVED, minRegCount);
}
break;
case LSRA_LIMIT_CALLER:
{
mask = getConstrainedRegMask(mask, RBM_CALLEE_TRASH, minRegCount);
}
break;
case LSRA_LIMIT_SMALL_SET:
if ((mask & LsraLimitSmallIntSet) != RBM_NONE)
{
mask = getConstrainedRegMask(mask, LsraLimitSmallIntSet, minRegCount);
}
else if ((mask & LsraLimitSmallFPSet) != RBM_NONE)
{
mask = getConstrainedRegMask(mask, LsraLimitSmallFPSet, minRegCount);
}
break;
default:
unreached();
}
if (refPosition != nullptr && refPosition->isFixedRegRef)
{
mask |= refPosition->registerAssignment;
}
}
return mask;
}
#endif // DEBUG
//------------------------------------------------------------------------
// conflictingFixedRegReference: Determine whether the 'reg' has a
// fixed register use that conflicts with 'refPosition'
//
// Arguments:
// regNum - The register of interest
// refPosition - The RefPosition of interest
//
// Return Value:
// Returns true iff the given RefPosition is NOT a fixed use of this register,
// AND either:
// - there is a RefPosition on this RegRecord at the nodeLocation of the given RefPosition, or
// - the given RefPosition has a delayRegFree, and there is a RefPosition on this RegRecord at
// the nodeLocation just past the given RefPosition.
//
// Assumptions:
// 'refPosition is non-null.
bool LinearScan::conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition)
{
// Is this a fixed reference of this register? If so, there is no conflict.
if (refPosition->isFixedRefOfRegMask(genRegMask(regNum)))
{
return false;
}
// Otherwise, check for conflicts.
// There is a conflict if:
// 1. There is a recent RefPosition on this RegRecord that is at this location, OR
// 2. There is an upcoming RefPosition at this location, or at the next location
// if refPosition is a delayed use (i.e. must be kept live through the next/def location).
LsraLocation refLocation = refPosition->nodeLocation;
RegRecord* regRecord = getRegisterRecord(regNum);
if (isRegInUse(regNum, refPosition->getInterval()->registerType) &&
(regRecord->assignedInterval != refPosition->getInterval()))
{
return true;
}
LsraLocation nextPhysRefLocation = nextFixedRef[regNum];
if (nextPhysRefLocation == refLocation || (refPosition->delayRegFree && nextPhysRefLocation == (refLocation + 1)))
{
return true;
}
return false;
}
/*****************************************************************************
* Inline functions for Interval
*****************************************************************************/
RefPosition* Referenceable::getNextRefPosition()
{
if (recentRefPosition == nullptr)
{
return firstRefPosition;
}
else
{
return recentRefPosition->nextRefPosition;
}
}
LsraLocation Referenceable::getNextRefLocation()
{
RefPosition* nextRefPosition = getNextRefPosition();
if (nextRefPosition == nullptr)
{
return MaxLocation;
}
else
{
return nextRefPosition->nodeLocation;
}
}
#ifdef DEBUG
void LinearScan::dumpVarToRegMap(VarToRegMap map)
{
bool anyPrinted = false;
for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
if (map[varIndex] != REG_STK)
{
printf("V%02u=%s ", compiler->lvaTrackedIndexToLclNum(varIndex), getRegName(map[varIndex]));
anyPrinted = true;
}
}
if (!anyPrinted)
{
printf("none");
}
printf("\n");
}
void LinearScan::dumpInVarToRegMap(BasicBlock* block)
{
printf("Var=Reg beg of " FMT_BB ": ", block->bbNum);
VarToRegMap map = getInVarToRegMap(block->bbNum);
dumpVarToRegMap(map);
}
void LinearScan::dumpOutVarToRegMap(BasicBlock* block)
{
printf("Var=Reg end of " FMT_BB ": ", block->bbNum);
VarToRegMap map = getOutVarToRegMap(block->bbNum);
dumpVarToRegMap(map);
}
#endif // DEBUG
LinearScanInterface* getLinearScanAllocator(Compiler* comp)
{
return new (comp, CMK_LSRA) LinearScan(comp);
}
//------------------------------------------------------------------------
// LSRA constructor
//
// Arguments:
// theCompiler
//
// Notes:
// The constructor takes care of initializing the data structures that are used
// during Lowering, including (in DEBUG) getting the stress environment variables,
// as they may affect the block ordering.
LinearScan::LinearScan(Compiler* theCompiler)
: compiler(theCompiler)
, intervals(theCompiler->getAllocator(CMK_LSRA_Interval))
, allocationPassComplete(false)
, refPositions(theCompiler->getAllocator(CMK_LSRA_RefPosition))
, listNodePool(theCompiler)
{
regSelector = new (theCompiler, CMK_LSRA) RegisterSelection(this);
firstColdLoc = MaxLocation;
#ifdef DEBUG
maxNodeLocation = 0;
activeRefPosition = nullptr;
// Get the value of the environment variable that controls stress for register allocation
lsraStressMask = JitConfig.JitStressRegs();
#if 0
if (lsraStressMask != 0)
{
// The code in this #if can be used to debug JitStressRegs issues according to
// method hash or method count.
// To use, simply set environment variables:
// JitStressRegsHashLo and JitStressRegsHashHi to set the range of method hash, or
// JitStressRegsStart and JitStressRegsEnd to set the range of method count
// (Compiler::jitTotalMethodCount as reported by COMPlus_DumpJittedMethods).
unsigned methHash = compiler->info.compMethodHash();
char* lostr = getenv("JitStressRegsHashLo");
unsigned methHashLo = 0;
bool dump = false;
if (lostr != nullptr)
{
sscanf_s(lostr, "%x", &methHashLo);
dump = true;
}
char* histr = getenv("JitStressRegsHashHi");
unsigned methHashHi = UINT32_MAX;
if (histr != nullptr)
{
sscanf_s(histr, "%x", &methHashHi);
dump = true;
}
if (methHash < methHashLo || methHash > methHashHi)
{
lsraStressMask = 0;
}
// Check method count
unsigned count = Compiler::jitTotalMethodCompiled;
unsigned start = 0;
unsigned end = UINT32_MAX;
char* startStr = getenv("JitStressRegsStart");
char* endStr = getenv("JitStressRegsEnd");
if (startStr != nullptr)
{
sscanf_s(startStr, "%d", &start);
dump = true;
}
if (endStr != nullptr)
{
sscanf_s(endStr, "%d", &end);
dump = true;
}
if (count < start || (count > end))
{
lsraStressMask = 0;
}
if ((lsraStressMask != 0) && (dump == true))
{
printf("JitStressRegs = %x for method %d: %s, hash = 0x%x.\n",
lsraStressMask, Compiler::jitTotalMethodCompiled, compiler->info.compFullName, compiler->info.compMethodHash());
printf(""); // flush
}
}
#endif // 0
#endif // DEBUG
// Assume that we will enregister local variables if it's not disabled. We'll reset it if we
// have no tracked locals when we start allocating. Note that new tracked lclVars may be added
// after the first liveness analysis - either by optimizations or by Lowering, and the tracked
// set won't be recomputed until after Lowering (and this constructor is called prior to Lowering),
// so we don't want to check that yet.
enregisterLocalVars = compiler->compEnregLocals();
#ifdef TARGET_ARM64
availableIntRegs = (RBM_ALLINT & ~(RBM_PR | RBM_FP | RBM_LR) & ~compiler->codeGen->regSet.rsMaskResvd);
#else
availableIntRegs = (RBM_ALLINT & ~compiler->codeGen->regSet.rsMaskResvd);
#endif
#if ETW_EBP_FRAMED
availableIntRegs &= ~RBM_FPBASE;
#endif // ETW_EBP_FRAMED
availableFloatRegs = RBM_ALLFLOAT;
availableDoubleRegs = RBM_ALLDOUBLE;
#ifdef TARGET_AMD64
if (compiler->opts.compDbgEnC)
{
// On x64 when the EnC option is set, we always save exactly RBP, RSI and RDI.
// RBP is not available to the register allocator, so RSI and RDI are the only
// callee-save registers available.
availableIntRegs &= ~RBM_CALLEE_SAVED | RBM_RSI | RBM_RDI;
availableFloatRegs &= ~RBM_CALLEE_SAVED;
availableDoubleRegs &= ~RBM_CALLEE_SAVED;
}
#endif // TARGET_AMD64
compiler->rpFrameType = FT_NOT_SET;
compiler->rpMustCreateEBPCalled = false;
compiler->codeGen->intRegState.rsIsFloat = false;
compiler->codeGen->floatRegState.rsIsFloat = true;
// Block sequencing (the order in which we schedule).
// Note that we don't initialize the bbVisitedSet until we do the first traversal
// This is so that any blocks that are added during the first traversal
// are accounted for (and we don't have BasicBlockEpoch issues).
blockSequencingDone = false;
blockSequence = nullptr;
blockSequenceWorkList = nullptr;
curBBSeqNum = 0;
bbSeqCount = 0;
// Information about each block, including predecessor blocks used for variable locations at block entry.
blockInfo = nullptr;
pendingDelayFree = false;
tgtPrefUse = nullptr;
}
//------------------------------------------------------------------------
// getNextCandidateFromWorkList: Get the next candidate for block sequencing
//
// Arguments:
// None.
//
// Return Value:
// The next block to be placed in the sequence.
//
// Notes:
// This method currently always returns the next block in the list, and relies on having
// blocks added to the list only when they are "ready", and on the
// addToBlockSequenceWorkList() method to insert them in the proper order.
// However, a block may be in the list and already selected, if it was subsequently
// encountered as both a flow and layout successor of the most recently selected
// block.
BasicBlock* LinearScan::getNextCandidateFromWorkList()
{
BasicBlockList* nextWorkList = nullptr;
for (BasicBlockList* workList = blockSequenceWorkList; workList != nullptr; workList = nextWorkList)
{
nextWorkList = workList->next;
BasicBlock* candBlock = workList->block;
removeFromBlockSequenceWorkList(workList, nullptr);
if (!isBlockVisited(candBlock))
{
return candBlock;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// setBlockSequence: Determine the block order for register allocation.
//
// Arguments:
// None
//
// Return Value:
// None
//
// Notes:
// On return, the blockSequence array contains the blocks, in the order in which they
// will be allocated.
// This method clears the bbVisitedSet on LinearScan, and when it returns the set
// contains all the bbNums for the block.
void LinearScan::setBlockSequence()
{
assert(!blockSequencingDone); // The method should be called only once.
compiler->EnsureBasicBlockEpoch();
#ifdef DEBUG
blockEpoch = compiler->GetCurBasicBlockEpoch();
#endif // DEBUG
// Initialize the "visited" blocks set.
bbVisitedSet = BlockSetOps::MakeEmpty(compiler);
BlockSet readySet(BlockSetOps::MakeEmpty(compiler));
BlockSet predSet(BlockSetOps::MakeEmpty(compiler));
assert(blockSequence == nullptr && bbSeqCount == 0);
blockSequence = new (compiler, CMK_LSRA) BasicBlock*[compiler->fgBBcount];
bbNumMaxBeforeResolution = compiler->fgBBNumMax;
blockInfo = new (compiler, CMK_LSRA) LsraBlockInfo[bbNumMaxBeforeResolution + 1];
assert(blockSequenceWorkList == nullptr);
verifiedAllBBs = false;
hasCriticalEdges = false;
BasicBlock* nextBlock;
// We use a bbNum of 0 for entry RefPositions.
// The other information in blockInfo[0] will never be used.
blockInfo[0].weight = BB_UNITY_WEIGHT;
#if TRACK_LSRA_STATS
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
blockInfo[0].stats[statIndex] = 0;
}
#endif // TRACK_LSRA_STATS
JITDUMP("Start LSRA Block Sequence: \n");
for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = nextBlock)
{
JITDUMP("Current block: " FMT_BB "\n", block->bbNum);
blockSequence[bbSeqCount] = block;
markBlockVisited(block);
bbSeqCount++;
nextBlock = nullptr;
// Initialize the blockInfo.
// predBBNum will be set later.
// 0 is never used as a bbNum, but is used in blockInfo to designate an exception entry block.
blockInfo[block->bbNum].predBBNum = 0;
// We check for critical edges below, but initialize to false.
blockInfo[block->bbNum].hasCriticalInEdge = false;
blockInfo[block->bbNum].hasCriticalOutEdge = false;
blockInfo[block->bbNum].weight = block->getBBWeight(compiler);
blockInfo[block->bbNum].hasEHBoundaryIn = block->hasEHBoundaryIn();
blockInfo[block->bbNum].hasEHBoundaryOut = block->hasEHBoundaryOut();
blockInfo[block->bbNum].hasEHPred = false;
#if TRACK_LSRA_STATS
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
blockInfo[block->bbNum].stats[statIndex] = 0;
}
#endif // TRACK_LSRA_STATS
// We treat BBCallAlwaysPairTail blocks as having EH flow, since we can't
// insert resolution moves into those blocks.
if (block->isBBCallAlwaysPairTail())
{
blockInfo[block->bbNum].hasEHBoundaryIn = true;
blockInfo[block->bbNum].hasEHBoundaryOut = true;
}
bool hasUniquePred = (block->GetUniquePred(compiler) != nullptr);
for (BasicBlock* const predBlock : block->PredBlocks())
{
if (!hasUniquePred)
{
if (predBlock->NumSucc(compiler) > 1)
{
blockInfo[block->bbNum].hasCriticalInEdge = true;
hasCriticalEdges = true;
}
else if (predBlock->bbJumpKind == BBJ_SWITCH)
{
assert(!"Switch with single successor");
}
}
if (!block->isBBCallAlwaysPairTail() &&
(predBlock->hasEHBoundaryOut() || predBlock->isBBCallAlwaysPairTail()))
{
assert(!block->isBBCallAlwaysPairTail());
if (hasUniquePred)
{
// A unique pred with an EH out edge won't allow us to keep any variables enregistered.
blockInfo[block->bbNum].hasEHBoundaryIn = true;
}
else
{
blockInfo[block->bbNum].hasEHPred = true;
}
}
}
// Determine which block to schedule next.
// First, update the NORMAL successors of the current block, adding them to the worklist
// according to the desired order. We will handle the EH successors below.
const unsigned numSuccs = block->NumSucc(compiler);
bool checkForCriticalOutEdge = (numSuccs > 1);
if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH)
{
assert(!"Switch with single successor");
}
for (unsigned succIndex = 0; succIndex < numSuccs; succIndex++)
{
BasicBlock* succ = block->GetSucc(succIndex, compiler);
if (checkForCriticalOutEdge && succ->GetUniquePred(compiler) == nullptr)
{
blockInfo[block->bbNum].hasCriticalOutEdge = true;
hasCriticalEdges = true;
// We can stop checking now.
checkForCriticalOutEdge = false;
}
if (isTraversalLayoutOrder() || isBlockVisited(succ))
{
continue;
}
// We've now seen a predecessor, so add it to the work list and the "readySet".
// It will be inserted in the worklist according to the specified traversal order
// (i.e. pred-first or random, since layout order is handled above).
if (!BlockSetOps::IsMember(compiler, readySet, succ->bbNum))
{
JITDUMP("\tSucc block: " FMT_BB, succ->bbNum);
addToBlockSequenceWorkList(readySet, succ, predSet);
BlockSetOps::AddElemD(compiler, readySet, succ->bbNum);
}
}
// For layout order, simply use bbNext
if (isTraversalLayoutOrder())
{
nextBlock = block->bbNext;
continue;
}
while (nextBlock == nullptr)
{
nextBlock = getNextCandidateFromWorkList();
// TODO-Throughput: We would like to bypass this traversal if we know we've handled all
// the blocks - but fgBBcount does not appear to be updated when blocks are removed.
if (nextBlock == nullptr /* && bbSeqCount != compiler->fgBBcount*/ && !verifiedAllBBs)
{
// If we don't encounter all blocks by traversing the regular successor links, do a full
// traversal of all the blocks, and add them in layout order.
// This may include:
// - internal-only blocks which may not be in the flow graph
// - blocks that have become unreachable due to optimizations, but that are strongly
// connected (these are not removed)
// - EH blocks
for (BasicBlock* const seqBlock : compiler->Blocks())
{
if (!isBlockVisited(seqBlock))
{
JITDUMP("\tUnvisited block: " FMT_BB, seqBlock->bbNum);
addToBlockSequenceWorkList(readySet, seqBlock, predSet);
BlockSetOps::AddElemD(compiler, readySet, seqBlock->bbNum);
}
}
verifiedAllBBs = true;
}
else
{
break;
}
}
}
blockSequencingDone = true;
#ifdef DEBUG
// Make sure that we've visited all the blocks.
for (BasicBlock* const block : compiler->Blocks())
{
assert(isBlockVisited(block));
}
JITDUMP("Final LSRA Block Sequence: \n");
int i = 1;
for (BasicBlock *block = startBlockSequence(); block != nullptr; ++i, block = moveToNextBlock())
{
JITDUMP(FMT_BB, block->bbNum);
JITDUMP("(%6s) ", refCntWtd2str(block->getBBWeight(compiler)));
if (blockInfo[block->bbNum].hasEHBoundaryIn)
{
JITDUMP(" EH-in");
}
if (blockInfo[block->bbNum].hasEHBoundaryOut)
{
JITDUMP(" EH-out");
}
if (blockInfo[block->bbNum].hasEHPred)
{
JITDUMP(" has EH pred");
}
JITDUMP("\n");
}
JITDUMP("\n");
#endif
}
//------------------------------------------------------------------------
// compareBlocksForSequencing: Compare two basic blocks for sequencing order.
//
// Arguments:
// block1 - the first block for comparison
// block2 - the second block for comparison
// useBlockWeights - whether to use block weights for comparison
//
// Return Value:
// -1 if block1 is preferred.
// 0 if the blocks are equivalent.
// 1 if block2 is preferred.
//
// Notes:
// See addToBlockSequenceWorkList.
int LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights)
{
if (useBlockWeights)
{
weight_t weight1 = block1->getBBWeight(compiler);
weight_t weight2 = block2->getBBWeight(compiler);
if (weight1 > weight2)
{
return -1;
}
else if (weight1 < weight2)
{
return 1;
}
}
// If weights are the same prefer LOWER bbnum
if (block1->bbNum < block2->bbNum)
{
return -1;
}
else if (block1->bbNum == block2->bbNum)
{
return 0;
}
else
{
return 1;
}
}
//------------------------------------------------------------------------
// addToBlockSequenceWorkList: Add a BasicBlock to the work list for sequencing.
//
// Arguments:
// sequencedBlockSet - the set of blocks that are already sequenced
// block - the new block to be added
// predSet - the buffer to save predecessors set. A block set allocated by the caller used here as a
// temporary block set for constructing a predecessor set. Allocated by the caller to avoid reallocating a new block
// set with every call to this function
//
// Return Value:
// None.
//
// Notes:
// The first block in the list will be the next one to be sequenced, as soon
// as we encounter a block whose successors have all been sequenced, in pred-first
// order, or the very next block if we are traversing in random order (once implemented).
// This method uses a comparison method to determine the order in which to place
// the blocks in the list. This method queries whether all predecessors of the
// block are sequenced at the time it is added to the list and if so uses block weights
// for inserting the block. A block is never inserted ahead of its predecessors.
// A block at the time of insertion may not have all its predecessors sequenced, in
// which case it will be sequenced based on its block number. Once a block is inserted,
// its priority\order will not be changed later once its remaining predecessors are
// sequenced. This would mean that work list may not be sorted entirely based on
// block weights alone.
//
// Note also that, when random traversal order is implemented, this method
// should insert the blocks into the list in random order, so that we can always
// simply select the first block in the list.
void LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet)
{
// The block that is being added is not already sequenced
assert(!BlockSetOps::IsMember(compiler, sequencedBlockSet, block->bbNum));
// Get predSet of block
BlockSetOps::ClearD(compiler, predSet);
for (BasicBlock* const predBlock : block->PredBlocks())
{
BlockSetOps::AddElemD(compiler, predSet, predBlock->bbNum);
}
// If either a rarely run block or all its preds are already sequenced, use block's weight to sequence
bool useBlockWeight = block->isRunRarely() || BlockSetOps::IsSubset(compiler, sequencedBlockSet, predSet);
JITDUMP(", Criteria: %s", useBlockWeight ? "weight" : "bbNum");
BasicBlockList* prevNode = nullptr;
BasicBlockList* nextNode = blockSequenceWorkList;
while (nextNode != nullptr)
{
int seqResult;
if (nextNode->block->isRunRarely())
{
// If the block that is yet to be sequenced is a rarely run block, always use block weights for sequencing
seqResult = compareBlocksForSequencing(nextNode->block, block, true);
}
else if (BlockSetOps::IsMember(compiler, predSet, nextNode->block->bbNum))
{
// always prefer unsequenced pred blocks
seqResult = -1;
}
else
{
seqResult = compareBlocksForSequencing(nextNode->block, block, useBlockWeight);
}
if (seqResult > 0)
{
break;
}
prevNode = nextNode;
nextNode = nextNode->next;
}
BasicBlockList* newListNode = new (compiler, CMK_LSRA) BasicBlockList(block, nextNode);
if (prevNode == nullptr)
{
blockSequenceWorkList = newListNode;
}
else
{
prevNode->next = newListNode;
}
#ifdef DEBUG
nextNode = blockSequenceWorkList;
JITDUMP(", Worklist: [");
while (nextNode != nullptr)
{
JITDUMP(FMT_BB " ", nextNode->block->bbNum);
nextNode = nextNode->next;
}
JITDUMP("]\n");
#endif
}
void LinearScan::removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode)
{
if (listNode == blockSequenceWorkList)
{
assert(prevNode == nullptr);
blockSequenceWorkList = listNode->next;
}
else
{
assert(prevNode != nullptr && prevNode->next == listNode);
prevNode->next = listNode->next;
}
// TODO-Cleanup: consider merging Compiler::BlockListNode and BasicBlockList
// compiler->FreeBlockListNode(listNode);
}
// Initialize the block order for allocation (called each time a new traversal begins).
BasicBlock* LinearScan::startBlockSequence()
{
if (!blockSequencingDone)
{
setBlockSequence();
}
else
{
clearVisitedBlocks();
}
BasicBlock* curBB = compiler->fgFirstBB;
curBBSeqNum = 0;
curBBNum = curBB->bbNum;
assert(blockSequence[0] == compiler->fgFirstBB);
markBlockVisited(curBB);
return curBB;
}
//------------------------------------------------------------------------
// moveToNextBlock: Move to the next block in order for allocation or resolution.
//
// Arguments:
// None
//
// Return Value:
// The next block.
//
// Notes:
// This method is used when the next block is actually going to be handled.
// It changes curBBNum.
BasicBlock* LinearScan::moveToNextBlock()
{
BasicBlock* nextBlock = getNextBlock();
curBBSeqNum++;
if (nextBlock != nullptr)
{
curBBNum = nextBlock->bbNum;
}
return nextBlock;
}
//------------------------------------------------------------------------
// getNextBlock: Get the next block in order for allocation or resolution.
//
// Arguments:
// None
//
// Return Value:
// The next block.
//
// Notes:
// This method does not actually change the current block - it is used simply
// to determine which block will be next.
BasicBlock* LinearScan::getNextBlock()
{
assert(blockSequencingDone);
unsigned int nextBBSeqNum = curBBSeqNum + 1;
if (nextBBSeqNum < bbSeqCount)
{
return blockSequence[nextBBSeqNum];
}
return nullptr;
}
//------------------------------------------------------------------------
// doLinearScan: The main method for register allocation.
//
// Arguments:
// None
//
// Return Value:
// None.
//
void LinearScan::doLinearScan()
{
// Check to see whether we have any local variables to enregister.
// We initialize this in the constructor based on opt settings,
// but we don't want to spend time on the lclVar parts of LinearScan
// if we have no tracked locals.
if (enregisterLocalVars && (compiler->lvaTrackedCount == 0))
{
enregisterLocalVars = false;
}
splitBBNumToTargetBBNumMap = nullptr;
// This is complicated by the fact that physical registers have refs associated
// with locations where they are killed (e.g. calls), but we don't want to
// count these as being touched.
compiler->codeGen->regSet.rsClearRegsModified();
initMaxSpill();
buildIntervals();
DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_REFPOS));
compiler->EndPhase(PHASE_LINEAR_SCAN_BUILD);
DBEXEC(VERBOSE, lsraDumpIntervals("after buildIntervals"));
initVarRegMaps();
allocateRegisters();
allocationPassComplete = true;
compiler->EndPhase(PHASE_LINEAR_SCAN_ALLOC);
resolveRegisters();
compiler->EndPhase(PHASE_LINEAR_SCAN_RESOLVE);
assert(blockSequencingDone); // Should do at least one traversal.
assert(blockEpoch == compiler->GetCurBasicBlockEpoch());
#if TRACK_LSRA_STATS
if ((JitConfig.DisplayLsraStats() == 1)
#ifdef DEBUG
|| VERBOSE
#endif
)
{
dumpLsraStats(jitstdout);
}
#endif // TRACK_LSRA_STATS
DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_POST));
#ifdef DEBUG
compiler->fgDebugCheckLinks();
#endif
compiler->compLSRADone = true;
}
//------------------------------------------------------------------------
// recordVarLocationsAtStartOfBB: Update live-in LclVarDscs with the appropriate
// register location at the start of a block, during codegen.
//
// Arguments:
// bb - the block for which code is about to be generated.
//
// Return Value:
// None.
//
// Assumptions:
// CodeGen will take care of updating the reg masks and the current var liveness,
// after calling this method.
// This is because we need to kill off the dead registers before setting the newly live ones.
void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb)
{
if (!enregisterLocalVars)
{
return;
}
JITDUMP("Recording Var Locations at start of " FMT_BB "\n", bb->bbNum);
VarToRegMap map = getInVarToRegMap(bb->bbNum);
unsigned count = 0;
VarSetOps::AssignNoCopy(compiler, currentLiveVars,
VarSetOps::Intersection(compiler, registerCandidateVars, bb->bbLiveIn));
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedIndexToLclNum(varIndex);
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
regNumber oldRegNum = varDsc->GetRegNum();
regNumber newRegNum = getVarReg(map, varIndex);
if (oldRegNum != newRegNum)
{
JITDUMP(" V%02u(%s->%s)", varNum, compiler->compRegVarName(oldRegNum),
compiler->compRegVarName(newRegNum));
varDsc->SetRegNum(newRegNum);
count++;
#ifdef USING_VARIABLE_LIVE_RANGE
BasicBlock* prevReportedBlock = bb->bbPrev;
if (bb->bbPrev != nullptr && bb->bbPrev->isBBCallAlwaysPairTail())
{
// For callf+always pair we generate the code for the always
// block in genCallFinally and skip it, so we don't report
// anything for it (it has only trivial instructions, so that
// does not matter much). So whether we need to rehome or not
// depends on what we reported at the end of the callf block.
prevReportedBlock = bb->bbPrev->bbPrev;
}
if (prevReportedBlock != nullptr && VarSetOps::IsMember(compiler, prevReportedBlock->bbLiveOut, varIndex))
{
// varDsc was alive on previous block end so it has an open
// "VariableLiveRange" which should change to be according to
// "getInVarToRegMap"
compiler->codeGen->getVariableLiveKeeper()->siUpdateVariableLiveRange(varDsc, varNum);
}
#endif // USING_VARIABLE_LIVE_RANGE
}
else if (newRegNum != REG_STK)
{
JITDUMP(" V%02u(%s)", varNum, compiler->compRegVarName(newRegNum));
count++;
}
}
if (count == 0)
{
JITDUMP(" <none>\n");
}
JITDUMP("\n");
}
void Interval::setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* linScan)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
assert(varDsc->lvTracked);
assert(varDsc->lvVarIndex < compiler->lvaTrackedCount);
linScan->localVarIntervals[varDsc->lvVarIndex] = this;
assert(linScan->getIntervalForLocalVar(varDsc->lvVarIndex) == this);
this->isLocalVar = true;
this->varNum = lclNum;
}
//------------------------------------------------------------------------
// LinearScan:identifyCandidatesExceptionDataflow: Build the set of variables exposed on EH flow edges
//
// Notes:
// This logic was originally cloned from fgInterBlockLocalVarLiveness.
//
void LinearScan::identifyCandidatesExceptionDataflow()
{
for (BasicBlock* const block : compiler->Blocks())
{
if (block->hasEHBoundaryIn())
{
// live on entry to handler
VarSetOps::UnionD(compiler, exceptVars, block->bbLiveIn);
}
if (block->hasEHBoundaryOut())
{
VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut);
if (block->bbJumpKind == BBJ_EHFINALLYRET)
{
// Live on exit from finally.
// We track these separately because, in addition to having EH live-out semantics,
// we need to mark them must-init.
VarSetOps::UnionD(compiler, finallyVars, block->bbLiveOut);
}
}
}
#ifdef DEBUG
if (VERBOSE)
{
JITDUMP("EH Vars: ");
INDEBUG(dumpConvertedVarSet(compiler, exceptVars));
JITDUMP("\nFinally Vars: ");
INDEBUG(dumpConvertedVarSet(compiler, finallyVars));
JITDUMP("\n\n");
}
// All variables live on exit from a 'finally' block should be marked lvLiveInOutOfHndlr.
// and as 'explicitly initialized' (must-init) for GC-ref types.
VarSetOps::Iter iter(compiler, exceptVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedIndexToLclNum(varIndex);
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(varDsc->lvLiveInOutOfHndlr);
if (varTypeIsGC(varDsc) && VarSetOps::IsMember(compiler, finallyVars, varIndex) && !varDsc->lvIsParam)
{
assert(varDsc->lvMustInit);
}
}
#endif
}
bool LinearScan::isRegCandidate(LclVarDsc* varDsc)
{
if (!enregisterLocalVars)
{
return false;
}
assert(compiler->compEnregLocals());
if (!varDsc->lvTracked)
{
return false;
}
#if !defined(TARGET_64BIT)
if (varDsc->lvType == TYP_LONG)
{
// Long variables should not be register candidates.
// Lowering will have split any candidate lclVars into lo/hi vars.
return false;
}
#endif // !defined(TARGET_64BIT)
// If we have JMP, reg args must be put on the stack
if (compiler->compJmpOpUsed && varDsc->lvIsRegArg)
{
return false;
}
// Don't allocate registers for dependently promoted struct fields
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
return false;
}
// Don't enregister if the ref count is zero.
if (varDsc->lvRefCnt() == 0)
{
varDsc->setLvRefCntWtd(0);
return false;
}
// Variables that are address-exposed are never enregistered, or tracked.
// A struct may be promoted, and a struct that fits in a register may be fully enregistered.
// Pinned variables may not be tracked (a condition of the GCInfo representation)
// or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning")
// references when using the general GC encoding.
unsigned lclNum = compiler->lvaGetLclNum(varDsc);
if (varDsc->IsAddressExposed() || !varDsc->IsEnregisterableType() ||
(!compiler->compEnregStructLocals() && (varDsc->lvType == TYP_STRUCT)))
{
#ifdef DEBUG
DoNotEnregisterReason dner;
if (varDsc->IsAddressExposed())
{
dner = DoNotEnregisterReason::AddrExposed;
}
else if (!varDsc->IsEnregisterableType())
{
dner = DoNotEnregisterReason::NotRegSizeStruct;
}
else
{
dner = DoNotEnregisterReason::DontEnregStructs;
}
#endif // DEBUG
compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(dner));
return false;
}
else if (varDsc->lvPinned)
{
varDsc->lvTracked = 0;
#ifdef JIT32_GCENCODER
compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef));
#endif // JIT32_GCENCODER
return false;
}
// Are we not optimizing and we have exception handlers?
// if so mark all args and locals as volatile, so that they
// won't ever get enregistered.
//
if (compiler->opts.MinOpts() && compiler->compHndBBtabCount > 0)
{
compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler));
}
if (varDsc->lvDoNotEnregister)
{
return false;
}
switch (genActualType(varDsc->TypeGet()))
{
case TYP_FLOAT:
case TYP_DOUBLE:
return !compiler->opts.compDbgCode;
case TYP_INT:
case TYP_LONG:
case TYP_REF:
case TYP_BYREF:
break;
#ifdef FEATURE_SIMD
case TYP_SIMD8:
case TYP_SIMD12:
case TYP_SIMD16:
case TYP_SIMD32:
return !varDsc->lvPromoted;
#endif // FEATURE_SIMD
case TYP_STRUCT:
// TODO-1stClassStructs: support vars with GC pointers. The issue is that such
// vars will have `lvMustInit` set, because emitter has poor support for struct liveness,
// but if the variable is tracked the prolog generator would expect it to be in liveIn set,
// so an assert in `genFnProlog` will fire.
return compiler->compEnregStructLocals() && !varDsc->HasGCPtr();
case TYP_UNDEF:
case TYP_UNKNOWN:
noway_assert(!"lvType not set correctly");
varDsc->lvType = TYP_INT;
return false;
default:
return false;
}
return true;
}
// Identify locals & compiler temps that are register candidates
// TODO-Cleanup: This was cloned from Compiler::lvaSortByRefCount() in lclvars.cpp in order
// to avoid perturbation, but should be merged.
void LinearScan::identifyCandidates()
{
if (enregisterLocalVars)
{
// Initialize the set of lclVars that are candidates for register allocation.
VarSetOps::AssignNoCopy(compiler, registerCandidateVars, VarSetOps::MakeEmpty(compiler));
// Initialize the sets of lclVars that are used to determine whether, and for which lclVars,
// we need to perform resolution across basic blocks.
// Note that we can't do this in the constructor because the number of tracked lclVars may
// change between the constructor and the actual allocation.
VarSetOps::AssignNoCopy(compiler, resolutionCandidateVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, splitOrSpilledVars, VarSetOps::MakeEmpty(compiler));
// We set enregisterLocalVars to true only if there are tracked lclVars
assert(compiler->lvaCount != 0);
}
else if (compiler->lvaCount == 0)
{
// Nothing to do. Note that even if enregisterLocalVars is false, we still need to set the
// lvLRACandidate field on all the lclVars to false if we have any.
return;
}
VarSetOps::AssignNoCopy(compiler, exceptVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, finallyVars, VarSetOps::MakeEmpty(compiler));
if (compiler->compHndBBtabCount > 0)
{
identifyCandidatesExceptionDataflow();
}
unsigned lclNum;
LclVarDsc* varDsc;
// While we build intervals for the candidate lclVars, we will determine the floating point
// lclVars, if any, to consider for callee-save register preferencing.
// We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count,
// and those that meet the second.
// The first threshold is used for methods that are heuristically deemed either to have light
// fp usage, or other factors that encourage conservative use of callee-save registers, such
// as multiple exits (where there might be an early exit that woudl be excessively penalized by
// lots of prolog/epilog saves & restores).
// The second threshold is used where there are factors deemed to make it more likely that fp
// fp callee save registers will be needed, such as loops or many fp vars.
// We keep two sets of vars, since we collect some of the information to determine which set to
// use as we iterate over the vars.
// When we are generating AVX code on non-Unix (FEATURE_PARTIAL_SIMD_CALLEE_SAVE), we maintain an
// additional set of LargeVectorType vars, and there is a separate threshold defined for those.
// It is assumed that if we encounter these, that we should consider this a "high use" scenario,
// so we don't maintain two sets of these vars.
// This is defined as thresholdLargeVectorRefCntWtd, as we are likely to use the same mechanism
// for vectors on Arm64, though the actual value may differ.
unsigned int floatVarCount = 0;
weight_t thresholdFPRefCntWtd = 4 * BB_UNITY_WEIGHT;
weight_t maybeFPRefCntWtd = 2 * BB_UNITY_WEIGHT;
VARSET_TP fpMaybeCandidateVars(VarSetOps::UninitVal());
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
unsigned int largeVectorVarCount = 0;
weight_t thresholdLargeVectorRefCntWtd = 4 * BB_UNITY_WEIGHT;
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (enregisterLocalVars)
{
VarSetOps::AssignNoCopy(compiler, fpCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, fpMaybeCandidateVars, VarSetOps::MakeEmpty(compiler));
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
VarSetOps::AssignNoCopy(compiler, largeVectorVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, largeVectorCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler));
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
#if DOUBLE_ALIGN
unsigned refCntStk = 0;
unsigned refCntReg = 0;
weight_t refCntWtdReg = 0;
unsigned refCntStkParam = 0; // sum of ref counts for all stack based parameters
weight_t refCntWtdStkDbl = 0; // sum of wtd ref counts for stack based doubles
doDoubleAlign = false;
bool checkDoubleAlign = true;
if (compiler->codeGen->isFramePointerRequired() || compiler->opts.MinOpts())
{
checkDoubleAlign = false;
}
else
{
switch (compiler->getCanDoubleAlign())
{
case MUST_DOUBLE_ALIGN:
doDoubleAlign = true;
checkDoubleAlign = false;
break;
case CAN_DOUBLE_ALIGN:
break;
case CANT_DOUBLE_ALIGN:
doDoubleAlign = false;
checkDoubleAlign = false;
break;
default:
unreached();
}
}
#endif // DOUBLE_ALIGN
// Check whether register variables are permitted.
if (!enregisterLocalVars)
{
localVarIntervals = nullptr;
}
else if (compiler->lvaTrackedCount > 0)
{
// initialize mapping from tracked local to interval
localVarIntervals = new (compiler, CMK_LSRA) Interval*[compiler->lvaTrackedCount];
}
INTRACK_STATS(regCandidateVarCount = 0);
for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++)
{
// Initialize all variables to REG_STK
varDsc->SetRegNum(REG_STK);
#ifndef TARGET_64BIT
varDsc->SetOtherReg(REG_STK);
#endif // TARGET_64BIT
if (!enregisterLocalVars)
{
varDsc->lvLRACandidate = false;
continue;
}
#if DOUBLE_ALIGN
if (checkDoubleAlign)
{
if (varDsc->lvIsParam && !varDsc->lvIsRegArg)
{
refCntStkParam += varDsc->lvRefCnt();
}
else if (!isRegCandidate(varDsc) || varDsc->lvDoNotEnregister)
{
refCntStk += varDsc->lvRefCnt();
if ((varDsc->lvType == TYP_DOUBLE) ||
((varTypeIsStruct(varDsc) && varDsc->lvStructDoubleAlign &&
(compiler->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT))))
{
refCntWtdStkDbl += varDsc->lvRefCntWtd();
}
}
else
{
refCntReg += varDsc->lvRefCnt();
refCntWtdReg += varDsc->lvRefCntWtd();
}
}
#endif // DOUBLE_ALIGN
// Start with the assumption that it's a candidate.
varDsc->lvLRACandidate = 1;
// Start with lvRegister as false - set it true only if the variable gets
// the same register assignment throughout
varDsc->lvRegister = false;
if (!isRegCandidate(varDsc))
{
varDsc->lvLRACandidate = 0;
if (varDsc->lvTracked)
{
localVarIntervals[varDsc->lvVarIndex] = nullptr;
}
// The current implementation of multi-reg structs that are referenced collectively
// (i.e. by refering to the parent lclVar rather than each field separately) relies
// on all or none of the fields being candidates.
if (varDsc->lvIsStructField)
{
LclVarDsc* parentVarDsc = compiler->lvaGetDesc(varDsc->lvParentLcl);
if (parentVarDsc->lvIsMultiRegRet && !parentVarDsc->lvDoNotEnregister)
{
JITDUMP("Setting multi-reg struct V%02u as not enregisterable:", varDsc->lvParentLcl);
compiler->lvaSetVarDoNotEnregister(varDsc->lvParentLcl DEBUGARG(DoNotEnregisterReason::BlockOp));
for (unsigned int i = 0; i < parentVarDsc->lvFieldCnt; i++)
{
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(parentVarDsc->lvFieldLclStart + i);
JITDUMP(" V%02u", parentVarDsc->lvFieldLclStart + i);
if (fieldVarDsc->lvTracked)
{
fieldVarDsc->lvLRACandidate = 0;
localVarIntervals[fieldVarDsc->lvVarIndex] = nullptr;
VarSetOps::RemoveElemD(compiler, registerCandidateVars, fieldVarDsc->lvVarIndex);
JITDUMP("*");
}
// This is not accurate, but we need a non-zero refCnt for the parent so that it will
// be allocated to the stack.
parentVarDsc->setLvRefCnt(parentVarDsc->lvRefCnt() + fieldVarDsc->lvRefCnt());
}
JITDUMP("\n");
}
}
continue;
}
if (varDsc->lvLRACandidate)
{
var_types type = varDsc->GetActualRegisterType();
if (varTypeUsesFloatReg(type))
{
compiler->compFloatingPointUsed = true;
}
Interval* newInt = newInterval(type);
newInt->setLocalNumber(compiler, lclNum, this);
VarSetOps::AddElemD(compiler, registerCandidateVars, varDsc->lvVarIndex);
// we will set this later when we have determined liveness
varDsc->lvMustInit = false;
if (varDsc->lvIsStructField)
{
newInt->isStructField = true;
}
if (varDsc->lvLiveInOutOfHndlr)
{
newInt->isWriteThru = varDsc->lvSingleDefRegCandidate;
setIntervalAsSpilled(newInt);
}
INTRACK_STATS(regCandidateVarCount++);
// We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count,
// and those that meet the second (see the definitions of thresholdFPRefCntWtd and maybeFPRefCntWtd
// above).
CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Additionally, when we are generating code for a target with partial SIMD callee-save
// (AVX on non-UNIX amd64 and 16-byte vectors on arm64), we keep a separate set of the
// LargeVectorType vars.
if (Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType()))
{
largeVectorVarCount++;
VarSetOps::AddElemD(compiler, largeVectorVars, varDsc->lvVarIndex);
weight_t refCntWtd = varDsc->lvRefCntWtd();
if (refCntWtd >= thresholdLargeVectorRefCntWtd)
{
VarSetOps::AddElemD(compiler, largeVectorCalleeSaveCandidateVars, varDsc->lvVarIndex);
}
}
else
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (regType(type) == FloatRegisterType)
{
floatVarCount++;
weight_t refCntWtd = varDsc->lvRefCntWtd();
if (varDsc->lvIsRegArg)
{
// Don't count the initial reference for register params. In those cases,
// using a callee-save causes an extra copy.
refCntWtd -= BB_UNITY_WEIGHT;
}
if (refCntWtd >= thresholdFPRefCntWtd)
{
VarSetOps::AddElemD(compiler, fpCalleeSaveCandidateVars, varDsc->lvVarIndex);
}
else if (refCntWtd >= maybeFPRefCntWtd)
{
VarSetOps::AddElemD(compiler, fpMaybeCandidateVars, varDsc->lvVarIndex);
}
}
JITDUMP(" ");
DBEXEC(VERBOSE, newInt->dump());
}
else
{
localVarIntervals[varDsc->lvVarIndex] = nullptr;
}
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Create Intervals to use for the save & restore of the upper halves of large vector lclVars.
if (enregisterLocalVars)
{
VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars);
unsigned largeVectorVarIndex = 0;
while (largeVectorVarsIter.NextElem(&largeVectorVarIndex))
{
makeUpperVectorInterval(largeVectorVarIndex);
}
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#if DOUBLE_ALIGN
if (checkDoubleAlign)
{
// TODO-CQ: Fine-tune this:
// In the legacy reg predictor, this runs after allocation, and then demotes any lclVars
// allocated to the frame pointer, which is probably the wrong order.
// However, because it runs after allocation, it can determine the impact of demoting
// the lclVars allocated to the frame pointer.
// => Here, estimate of the EBP refCnt and weighted refCnt is a wild guess.
//
unsigned refCntEBP = refCntReg / 8;
weight_t refCntWtdEBP = refCntWtdReg / 8;
doDoubleAlign =
compiler->shouldDoubleAlign(refCntStk, refCntEBP, refCntWtdEBP, refCntStkParam, refCntWtdStkDbl);
}
#endif // DOUBLE_ALIGN
// The factors we consider to determine which set of fp vars to use as candidates for callee save
// registers current include the number of fp vars, whether there are loops, and whether there are
// multiple exits. These have been selected somewhat empirically, but there is probably room for
// more tuning.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (VERBOSE)
{
printf("\nFP callee save candidate vars: ");
if (enregisterLocalVars && !VarSetOps::IsEmpty(compiler, fpCalleeSaveCandidateVars))
{
dumpConvertedVarSet(compiler, fpCalleeSaveCandidateVars);
printf("\n");
}
else
{
printf("None\n\n");
}
}
#endif
JITDUMP("floatVarCount = %d; hasLoops = %s, singleExit = %s\n", floatVarCount, dspBool(compiler->fgHasLoops),
dspBool(compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr));
// Determine whether to use the 2nd, more aggressive, threshold for fp callee saves.
if (floatVarCount > 6 && compiler->fgHasLoops &&
(compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr))
{
assert(enregisterLocalVars);
#ifdef DEBUG
if (VERBOSE)
{
printf("Adding additional fp callee save candidates: \n");
if (!VarSetOps::IsEmpty(compiler, fpMaybeCandidateVars))
{
dumpConvertedVarSet(compiler, fpMaybeCandidateVars);
printf("\n");
}
else
{
printf("None\n\n");
}
}
#endif
VarSetOps::UnionD(compiler, fpCalleeSaveCandidateVars, fpMaybeCandidateVars);
}
// From here on, we're only interested in the exceptVars that are candidates.
if (enregisterLocalVars && (compiler->compHndBBtabCount > 0))
{
VarSetOps::IntersectionD(compiler, exceptVars, registerCandidateVars);
}
#ifdef TARGET_ARM
#ifdef DEBUG
if (VERBOSE)
{
// Frame layout is only pre-computed for ARM
printf("\nlvaTable after IdentifyCandidates\n");
compiler->lvaTableDump(Compiler::FrameLayoutState::PRE_REGALLOC_FRAME_LAYOUT);
}
#endif // DEBUG
#endif // TARGET_ARM
}
// TODO-Throughput: This mapping can surely be more efficiently done
void LinearScan::initVarRegMaps()
{
if (!enregisterLocalVars)
{
inVarToRegMaps = nullptr;
outVarToRegMaps = nullptr;
return;
}
assert(compiler->lvaTrackedFixed); // We should have already set this to prevent us from adding any new tracked
// variables.
// The compiler memory allocator requires that the allocation be an
// even multiple of int-sized objects
unsigned int varCount = compiler->lvaTrackedCount;
regMapCount = roundUp(varCount, (unsigned)sizeof(int));
// Not sure why blocks aren't numbered from zero, but they don't appear to be.
// So, if we want to index by bbNum we have to know the maximum value.
unsigned int bbCount = compiler->fgBBNumMax + 1;
inVarToRegMaps = new (compiler, CMK_LSRA) regNumberSmall*[bbCount];
outVarToRegMaps = new (compiler, CMK_LSRA) regNumberSmall*[bbCount];
if (varCount > 0)
{
// This VarToRegMap is used during the resolution of critical edges.
sharedCriticalVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount];
for (unsigned int i = 0; i < bbCount; i++)
{
VarToRegMap inVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount];
VarToRegMap outVarToRegMap = new (compiler, CMK_LSRA) regNumberSmall[regMapCount];
for (unsigned int j = 0; j < regMapCount; j++)
{
inVarToRegMap[j] = REG_STK;
outVarToRegMap[j] = REG_STK;
}
inVarToRegMaps[i] = inVarToRegMap;
outVarToRegMaps[i] = outVarToRegMap;
}
}
else
{
sharedCriticalVarToRegMap = nullptr;
for (unsigned int i = 0; i < bbCount; i++)
{
inVarToRegMaps[i] = nullptr;
outVarToRegMaps[i] = nullptr;
}
}
}
void LinearScan::setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg)
{
assert(enregisterLocalVars);
assert(reg < UCHAR_MAX && varNum < compiler->lvaCount);
inVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = (regNumberSmall)reg;
}
void LinearScan::setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg)
{
assert(enregisterLocalVars);
assert(reg < UCHAR_MAX && varNum < compiler->lvaCount);
outVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = (regNumberSmall)reg;
}
LinearScan::SplitEdgeInfo LinearScan::getSplitEdgeInfo(unsigned int bbNum)
{
assert(enregisterLocalVars);
SplitEdgeInfo splitEdgeInfo;
assert(bbNum <= compiler->fgBBNumMax);
assert(bbNum > bbNumMaxBeforeResolution);
assert(splitBBNumToTargetBBNumMap != nullptr);
splitBBNumToTargetBBNumMap->Lookup(bbNum, &splitEdgeInfo);
assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution);
assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution);
return splitEdgeInfo;
}
VarToRegMap LinearScan::getInVarToRegMap(unsigned int bbNum)
{
assert(enregisterLocalVars);
assert(bbNum <= compiler->fgBBNumMax);
// For the blocks inserted to split critical edges, the inVarToRegMap is
// equal to the outVarToRegMap at the "from" block.
if (bbNum > bbNumMaxBeforeResolution)
{
SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum);
unsigned fromBBNum = splitEdgeInfo.fromBBNum;
if (fromBBNum == 0)
{
assert(splitEdgeInfo.toBBNum != 0);
return inVarToRegMaps[splitEdgeInfo.toBBNum];
}
else
{
return outVarToRegMaps[fromBBNum];
}
}
return inVarToRegMaps[bbNum];
}
VarToRegMap LinearScan::getOutVarToRegMap(unsigned int bbNum)
{
assert(enregisterLocalVars);
assert(bbNum <= compiler->fgBBNumMax);
if (bbNum == 0)
{
return nullptr;
}
// For the blocks inserted to split critical edges, the outVarToRegMap is
// equal to the inVarToRegMap at the target.
if (bbNum > bbNumMaxBeforeResolution)
{
// If this is an empty block, its in and out maps are both the same.
// We identify this case by setting fromBBNum or toBBNum to 0, and using only the other.
SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum);
unsigned toBBNum = splitEdgeInfo.toBBNum;
if (toBBNum == 0)
{
assert(splitEdgeInfo.fromBBNum != 0);
return outVarToRegMaps[splitEdgeInfo.fromBBNum];
}
else
{
return inVarToRegMaps[toBBNum];
}
}
return outVarToRegMaps[bbNum];
}
//------------------------------------------------------------------------
// setVarReg: Set the register associated with a variable in the given 'bbVarToRegMap'.
//
// Arguments:
// bbVarToRegMap - the map of interest
// trackedVarIndex - the lvVarIndex for the variable
// reg - the register to which it is being mapped
//
// Return Value:
// None
//
void LinearScan::setVarReg(VarToRegMap bbVarToRegMap, unsigned int trackedVarIndex, regNumber reg)
{
assert(trackedVarIndex < compiler->lvaTrackedCount);
regNumberSmall regSmall = (regNumberSmall)reg;
assert((regNumber)regSmall == reg);
bbVarToRegMap[trackedVarIndex] = regSmall;
}
//------------------------------------------------------------------------
// getVarReg: Get the register associated with a variable in the given 'bbVarToRegMap'.
//
// Arguments:
// bbVarToRegMap - the map of interest
// trackedVarIndex - the lvVarIndex for the variable
//
// Return Value:
// The register to which 'trackedVarIndex' is mapped
//
regNumber LinearScan::getVarReg(VarToRegMap bbVarToRegMap, unsigned int trackedVarIndex)
{
assert(enregisterLocalVars);
assert(trackedVarIndex < compiler->lvaTrackedCount);
return (regNumber)bbVarToRegMap[trackedVarIndex];
}
// Initialize the incoming VarToRegMap to the given map values (generally a predecessor of
// the block)
VarToRegMap LinearScan::setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap)
{
assert(enregisterLocalVars);
VarToRegMap inVarToRegMap = inVarToRegMaps[bbNum];
memcpy(inVarToRegMap, srcVarToRegMap, (regMapCount * sizeof(regNumber)));
return inVarToRegMap;
}
//------------------------------------------------------------------------
// checkLastUses: Check correctness of last use flags
//
// Arguments:
// The block for which we are checking last uses.
//
// Notes:
// This does a backward walk of the RefPositions, starting from the liveOut set.
// This method was previously used to set the last uses, which were computed by
// liveness, but were not create in some cases of multiple lclVar references in the
// same tree. However, now that last uses are computed as RefPositions are created,
// that is no longer necessary, and this method is simply retained as a check.
// The exception to the check-only behavior is when LSRA_EXTEND_LIFETIMES if set via
// COMPlus_JitStressRegs. In that case, this method is required, because even though
// the RefPositions will not be marked lastUse in that case, we still need to correclty
// mark the last uses on the tree nodes, which is done by this method.
//
#ifdef DEBUG
void LinearScan::checkLastUses(BasicBlock* block)
{
if (VERBOSE)
{
JITDUMP("\n\nCHECKING LAST USES for " FMT_BB ", liveout=", block->bbNum);
dumpConvertedVarSet(compiler, block->bbLiveOut);
JITDUMP("\n==============================\n");
}
unsigned keepAliveVarNum = BAD_VAR_NUM;
if (compiler->lvaKeepAliveAndReportThis())
{
keepAliveVarNum = compiler->info.compThisArg;
assert(compiler->info.compIsStatic == false);
}
// find which uses are lastUses
// Work backwards starting with live out.
// 'computedLive' is updated to include any exposed use (including those in this
// block that we've already seen). When we encounter a use, if it's
// not in that set, then it's a last use.
VARSET_TP computedLive(VarSetOps::MakeCopy(compiler, block->bbLiveOut));
bool foundDiff = false;
RefPositionReverseIterator reverseIterator = refPositions.rbegin();
RefPosition* currentRefPosition;
for (currentRefPosition = &reverseIterator; currentRefPosition->refType != RefTypeBB;
reverseIterator++, currentRefPosition = &reverseIterator)
{
// We should never see ParamDefs or ZeroInits within a basic block.
assert(currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit);
if (currentRefPosition->isIntervalRef() && currentRefPosition->getInterval()->isLocalVar)
{
unsigned varNum = currentRefPosition->getInterval()->varNum;
unsigned varIndex = currentRefPosition->getInterval()->getVarIndex(compiler);
LsraLocation loc = currentRefPosition->nodeLocation;
// We should always have a tree node for a localVar, except for the "special" RefPositions.
GenTree* tree = currentRefPosition->treeNode;
assert(tree != nullptr || currentRefPosition->refType == RefTypeExpUse ||
currentRefPosition->refType == RefTypeDummyDef);
if (!VarSetOps::IsMember(compiler, computedLive, varIndex) && varNum != keepAliveVarNum)
{
// There was no exposed use, so this is a "last use" (and we mark it thus even if it's a def)
if (extendLifetimes())
{
// NOTE: this is a bit of a hack. When extending lifetimes, the "last use" bit will be clear.
// This bit, however, would normally be used during resolveLocalRef to set the value of
// LastUse on the node for a ref position. If this bit is not set correctly even when
// extending lifetimes, the code generator will assert as it expects to have accurate last
// use information. To avoid these asserts, set the LastUse bit here.
// Note also that extendLifetimes() is an LSRA stress mode, so it will only be true for
// Checked or Debug builds, for which this method will be executed.
if (tree != nullptr)
{
tree->AsLclVar()->SetLastUse(currentRefPosition->multiRegIdx);
}
}
else if (!currentRefPosition->lastUse)
{
JITDUMP("missing expected last use of V%02u @%u\n", compiler->lvaTrackedIndexToLclNum(varIndex),
loc);
foundDiff = true;
}
VarSetOps::AddElemD(compiler, computedLive, varIndex);
}
else if (currentRefPosition->lastUse)
{
JITDUMP("unexpected last use of V%02u @%u\n", compiler->lvaTrackedIndexToLclNum(varIndex), loc);
foundDiff = true;
}
else if (extendLifetimes() && tree != nullptr)
{
// NOTE: see the comment above re: the extendLifetimes hack.
tree->AsLclVar()->ClearLastUse(currentRefPosition->multiRegIdx);
}
if (currentRefPosition->refType == RefTypeDef || currentRefPosition->refType == RefTypeDummyDef)
{
VarSetOps::RemoveElemD(compiler, computedLive, varIndex);
}
}
assert(reverseIterator != refPositions.rend());
}
VARSET_TP liveInNotComputedLive(VarSetOps::Diff(compiler, block->bbLiveIn, computedLive));
// We may have exception vars in the liveIn set of exception blocks that are not computed live.
if (compiler->ehBlockHasExnFlowDsc(block))
{
VarSetOps::DiffD(compiler, liveInNotComputedLive, compiler->fgGetHandlerLiveVars(block));
}
VarSetOps::Iter liveInNotComputedLiveIter(compiler, liveInNotComputedLive);
unsigned liveInNotComputedLiveIndex = 0;
while (liveInNotComputedLiveIter.NextElem(&liveInNotComputedLiveIndex))
{
LclVarDsc* varDesc = compiler->lvaGetDescByTrackedIndex(liveInNotComputedLiveIndex);
if (varDesc->lvLRACandidate)
{
JITDUMP(FMT_BB ": V%02u is in LiveIn set, but not computed live.\n", block->bbNum,
compiler->lvaTrackedIndexToLclNum(liveInNotComputedLiveIndex));
foundDiff = true;
}
}
VarSetOps::DiffD(compiler, computedLive, block->bbLiveIn);
const VARSET_TP& computedLiveNotLiveIn(computedLive); // reuse the buffer.
VarSetOps::Iter computedLiveNotLiveInIter(compiler, computedLiveNotLiveIn);
unsigned computedLiveNotLiveInIndex = 0;
while (computedLiveNotLiveInIter.NextElem(&computedLiveNotLiveInIndex))
{
LclVarDsc* varDesc = compiler->lvaGetDescByTrackedIndex(computedLiveNotLiveInIndex);
if (varDesc->lvLRACandidate)
{
JITDUMP(FMT_BB ": V%02u is computed live, but not in LiveIn set.\n", block->bbNum,
compiler->lvaTrackedIndexToLclNum(computedLiveNotLiveInIndex));
foundDiff = true;
}
}
assert(!foundDiff);
}
#endif // DEBUG
//------------------------------------------------------------------------
// findPredBlockForLiveIn: Determine which block should be used for the register locations of the live-in variables.
//
// Arguments:
// block - The block for which we're selecting a predecesor.
// prevBlock - The previous block in in allocation order.
// pPredBlockIsAllocated - A debug-only argument that indicates whether any of the predecessors have been seen
// in allocation order.
//
// Return Value:
// The selected predecessor.
//
// Assumptions:
// in DEBUG, caller initializes *pPredBlockIsAllocated to false, and it will be set to true if the block
// returned is in fact a predecessor.
//
// Notes:
// This will select a predecessor based on the heuristics obtained by getLsraBlockBoundaryLocations(), which can be
// one of:
// LSRA_BLOCK_BOUNDARY_PRED - Use the register locations of a predecessor block (default)
// LSRA_BLOCK_BOUNDARY_LAYOUT - Use the register locations of the previous block in layout order.
// This is the only case where this actually returns a different block.
// LSRA_BLOCK_BOUNDARY_ROTATE - Rotate the register locations from a predecessor.
// For this case, the block returned is the same as for LSRA_BLOCK_BOUNDARY_PRED, but
// the register locations will be "rotated" to stress the resolution and allocation
// code.
BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block,
BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated))
{
BasicBlock* predBlock = nullptr;
assert(*pPredBlockIsAllocated == false);
// Blocks with exception flow on entry use no predecessor blocks, as all incoming vars
// are on the stack.
if (blockInfo[block->bbNum].hasEHBoundaryIn)
{
JITDUMP("\n\nIncoming EH boundary; ");
return nullptr;
}
if (block == compiler->fgFirstBB)
{
return nullptr;
}
if (block->bbPreds == nullptr)
{
assert((block != compiler->fgFirstBB) || (prevBlock != nullptr));
JITDUMP("\n\nNo predecessor; ");
// Some throw blocks do not have predecessor. For such blocks, we want to return the fact
// that predecessor is indeed null instead of returning the prevBlock. Returning prevBlock
// will be wrong, because LSRA would think that the variable is live in registers based on
// the lexical flow, but that won't be true according to the control flow.
// Example:
//
// IG05:
// ... ; V01 is in 'rdi'
// JNE IG07
// ...
// IG06:
// ...
// ... ; V01 is in 'rbx'
// JMP IG08
// IG07:
// ... ; LSRA thinks V01 is in 'rbx' if IG06 is set as previous block of IG07.
// ....
// CALL CORINFO_HELP_RNGCHKFAIL
// ...
// IG08:
// ...
// ...
if (block->bbJumpKind == BBJ_THROW)
{
JITDUMP(" - throw block; ");
return nullptr;
}
// We may have unreachable blocks, due to optimization.
// We don't want to set the predecessor as null in this case, since that will result in
// unnecessary DummyDefs, and possibly result in inconsistencies requiring resolution
// (since these unreachable blocks can have reachable successors).
return prevBlock;
}
#ifdef DEBUG
if (getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_LAYOUT)
{
if (prevBlock != nullptr)
{
predBlock = prevBlock;
}
}
else
#endif // DEBUG
{
predBlock = block->GetUniquePred(compiler);
if (predBlock != nullptr)
{
// We should already have returned null if this block has a single incoming EH boundary edge.
assert(!predBlock->hasEHBoundaryOut());
if (isBlockVisited(predBlock))
{
if (predBlock->bbJumpKind == BBJ_COND)
{
// Special handling to improve matching on backedges.
BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext;
noway_assert(otherBlock != nullptr);
if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn)
{
// This is the case when we have a conditional branch where one target has already
// been visited. It would be best to use the same incoming regs as that block,
// so that we have less likelihood of having to move registers.
// For example, in determining the block to use for the starting register locations for
// "block" in the following example, we'd like to use the same predecessor for "block"
// as for "otherBlock", so that both successors of predBlock have the same locations, reducing
// the likelihood of needing a split block on a backedge:
//
// otherPred
// |
// otherBlock <-+
// . . . |
// |
// predBlock----+
// |
// block
//
if (blockInfo[otherBlock->bbNum].hasEHBoundaryIn)
{
return nullptr;
}
else
{
for (BasicBlock* const otherPred : otherBlock->PredBlocks())
{
if (otherPred->bbNum == blockInfo[otherBlock->bbNum].predBBNum)
{
predBlock = otherPred;
break;
}
}
}
}
}
}
else
{
predBlock = nullptr;
}
}
else
{
for (BasicBlock* const candidatePredBlock : block->PredBlocks())
{
if (isBlockVisited(candidatePredBlock))
{
if ((predBlock == nullptr) || (predBlock->bbWeight < candidatePredBlock->bbWeight))
{
predBlock = candidatePredBlock;
INDEBUG(*pPredBlockIsAllocated = true;)
}
}
}
}
if (predBlock == nullptr)
{
predBlock = prevBlock;
assert(predBlock != nullptr);
JITDUMP("\n\nNo allocated predecessor; ");
}
}
return predBlock;
}
#ifdef DEBUG
void LinearScan::dumpVarRefPositions(const char* title)
{
if (enregisterLocalVars)
{
printf("\nVAR REFPOSITIONS %s\n", title);
for (unsigned i = 0; i < compiler->lvaCount; i++)
{
printf("--- V%02u", i);
const LclVarDsc* varDsc = compiler->lvaGetDesc(i);
if (varDsc->lvIsRegCandidate())
{
Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex);
printf(" (Interval %d)\n", interval->intervalIndex);
for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
{
ref->dump(this);
}
}
else
{
printf("\n");
}
}
printf("\n");
}
}
#endif // DEBUG
// Set the default rpFrameType based upon codeGen->isFramePointerRequired()
// This was lifted from the register predictor
//
void LinearScan::setFrameType()
{
FrameType frameType = FT_NOT_SET;
#if DOUBLE_ALIGN
compiler->codeGen->setDoubleAlign(false);
if (doDoubleAlign)
{
frameType = FT_DOUBLE_ALIGN_FRAME;
compiler->codeGen->setDoubleAlign(true);
}
else
#endif // DOUBLE_ALIGN
if (compiler->codeGen->isFramePointerRequired())
{
frameType = FT_EBP_FRAME;
}
else
{
if (compiler->rpMustCreateEBPCalled == false)
{
#ifdef DEBUG
const char* reason;
#endif // DEBUG
compiler->rpMustCreateEBPCalled = true;
if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason)))
{
JITDUMP("; Decided to create an EBP based frame for ETW stackwalking (%s)\n", reason);
compiler->codeGen->setFrameRequired(true);
}
}
if (compiler->codeGen->isFrameRequired())
{
frameType = FT_EBP_FRAME;
}
else
{
frameType = FT_ESP_FRAME;
}
}
switch (frameType)
{
case FT_ESP_FRAME:
noway_assert(!compiler->codeGen->isFramePointerRequired());
noway_assert(!compiler->codeGen->isFrameRequired());
compiler->codeGen->setFramePointerUsed(false);
break;
case FT_EBP_FRAME:
compiler->codeGen->setFramePointerUsed(true);
break;
#if DOUBLE_ALIGN
case FT_DOUBLE_ALIGN_FRAME:
noway_assert(!compiler->codeGen->isFramePointerRequired());
compiler->codeGen->setFramePointerUsed(false);
break;
#endif // DOUBLE_ALIGN
default:
noway_assert(!"rpFrameType not set correctly!");
break;
}
// If we are using FPBASE as the frame register, we cannot also use it for
// a local var.
regMaskTP removeMask = RBM_NONE;
if (frameType == FT_EBP_FRAME)
{
removeMask |= RBM_FPBASE;
}
compiler->rpFrameType = frameType;
#ifdef TARGET_ARMARCH
// Determine whether we need to reserve a register for large lclVar offsets.
if (compiler->compRsvdRegCheck(Compiler::REGALLOC_FRAME_LAYOUT))
{
// We reserve R10/IP1 in this case to hold the offsets in load/store instructions
compiler->codeGen->regSet.rsMaskResvd |= RBM_OPT_RSVD;
assert(REG_OPT_RSVD != REG_FP);
JITDUMP(" Reserved REG_OPT_RSVD (%s) due to large frame\n", getRegName(REG_OPT_RSVD));
removeMask |= RBM_OPT_RSVD;
}
#endif // TARGET_ARMARCH
if ((removeMask != RBM_NONE) && ((availableIntRegs & removeMask) != 0))
{
// We know that we're already in "read mode" for availableIntRegs. However,
// we need to remove these registers, so subsequent users (like callers
// to allRegs()) get the right thing. The RemoveRegistersFromMasks() code
// fixes up everything that already took a dependency on the value that was
// previously read, so this completes the picture.
availableIntRegs.OverrideAssign(availableIntRegs & ~removeMask);
}
}
//------------------------------------------------------------------------
// copyOrMoveRegInUse: Is 'ref' a copyReg/moveReg that is still busy at the given location?
//
// Arguments:
// ref: The RefPosition of interest
// loc: The LsraLocation at which we're determining whether it's busy.
//
// Return Value:
// true iff 'ref' is active at the given location
//
bool copyOrMoveRegInUse(RefPosition* ref, LsraLocation loc)
{
if (!ref->copyReg && !ref->moveReg)
{
return false;
}
if (ref->getRefEndLocation() >= loc)
{
return true;
}
Interval* interval = ref->getInterval();
RefPosition* nextRef = interval->getNextRefPosition();
if (nextRef != nullptr && nextRef->treeNode == ref->treeNode && nextRef->getRefEndLocation() >= loc)
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// getRegisterType: Get the RegisterType to use for the given RefPosition
//
// Arguments:
// currentInterval: The interval for the current allocation
// refPosition: The RefPosition of the current Interval for which a register is being allocated
//
// Return Value:
// The RegisterType that should be allocated for this RefPosition
//
// Notes:
// This will nearly always be identical to the registerType of the interval, except in the case
// of SIMD types of 8 bytes (currently only Vector2) when they are passed and returned in integer
// registers, or copied to a return temp.
// This method need only be called in situations where we may be dealing with the register requirements
// of a RefTypeUse RefPosition (i.e. not when we are only looking at the type of an interval, nor when
// we are interested in the "defining" type of the interval). This is because the situation of interest
// only happens at the use (where it must be copied to an integer register).
RegisterType LinearScan::getRegisterType(Interval* currentInterval, RefPosition* refPosition)
{
assert(refPosition->getInterval() == currentInterval);
RegisterType regType = currentInterval->registerType;
regMaskTP candidates = refPosition->registerAssignment;
assert((candidates & allRegs(regType)) != RBM_NONE);
return regType;
}
//------------------------------------------------------------------------
// isMatchingConstant: Check to see whether a given register contains the constant referenced
// by the given RefPosition
//
// Arguments:
// physRegRecord: The RegRecord for the register we're interested in.
// refPosition: The RefPosition for a constant interval.
//
// Return Value:
// True iff the register was defined by an identical constant node as the current interval.
//
bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPosition)
{
if ((physRegRecord->assignedInterval == nullptr) || !physRegRecord->assignedInterval->isConstant ||
(refPosition->refType != RefTypeDef))
{
return false;
}
Interval* interval = refPosition->getInterval();
if (!interval->isConstant || !isRegConstant(physRegRecord->regNum, interval->registerType))
{
return false;
}
noway_assert(refPosition->treeNode != nullptr);
GenTree* otherTreeNode = physRegRecord->assignedInterval->firstRefPosition->treeNode;
noway_assert(otherTreeNode != nullptr);
if (refPosition->treeNode->OperGet() != otherTreeNode->OperGet())
{
return false;
}
switch (otherTreeNode->OperGet())
{
case GT_CNS_INT:
{
ssize_t v1 = refPosition->treeNode->AsIntCon()->IconValue();
ssize_t v2 = otherTreeNode->AsIntCon()->IconValue();
if ((v1 == v2) && (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode) || v1 == 0))
{
#ifdef TARGET_64BIT
// If the constant is negative, only reuse registers of the same type.
// This is because, on a 64-bit system, we do not sign-extend immediates in registers to
// 64-bits unless they are actually longs, as this requires a longer instruction.
// This doesn't apply to a 32-bit system, on which long values occupy multiple registers.
// (We could sign-extend, but we would have to always sign-extend, because if we reuse more
// than once, we won't have access to the instruction that originally defines the constant).
if ((refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()) || (v1 >= 0))
#endif // TARGET_64BIT
{
return true;
}
}
break;
}
case GT_CNS_DBL:
{
// For floating point constants, the values must be identical, not simply compare
// equal. So we compare the bits.
if (refPosition->treeNode->AsDblCon()->isBitwiseEqual(otherTreeNode->AsDblCon()) &&
(refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()))
{
return true;
}
break;
}
default:
break;
}
return false;
}
//------------------------------------------------------------------------
// allocateReg: Find a register that satisfies the requirements for refPosition,
// taking into account the preferences for the given Interval,
// and possibly spilling a lower weight Interval.
//
// Arguments:
// currentInterval: The interval for the current allocation
// refPosition: The RefPosition of the current Interval for which a register is being allocated
// Return Value:
// The regNumber, if any, allocated to the RefPosition.
// Returns REG_NA only if 'refPosition->RegOptional()' is true, and there are
// no free registers and no registers containing lower-weight Intervals that can be spilled.
//
// Notes:
// This method will prefer to allocate a free register, but if none are available,
// it will look for a lower-weight Interval to spill.
// Weight and farthest distance of next reference are used to determine whether an Interval
// currently occupying a register should be spilled. It will be spilled either:
// - At its most recent RefPosition, if that is within the current block, OR
// - At the boundary between the previous block and this one
//
// To select a ref position for spilling.
// - If refPosition->RegOptional() == false
// The RefPosition chosen for spilling will be the lowest weight
// of all and if there is is more than one ref position with the
// same lowest weight, among them choses the one with farthest
// distance to its next reference.
//
// - If refPosition->RegOptional() == true
// The ref position chosen for spilling will not only be lowest weight
// of all but also has a weight lower than 'refPosition'. If there is
// no such ref position, no register will be allocated.
//
regNumber LinearScan::allocateReg(Interval* currentInterval,
RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore))
{
regMaskTP foundRegBit = regSelector->select(currentInterval, refPosition DEBUG_ARG(registerScore));
if (foundRegBit == RBM_NONE)
{
return REG_NA;
}
regNumber foundReg = genRegNumFromMask(foundRegBit);
RegRecord* availablePhysRegRecord = getRegisterRecord(foundReg);
Interval* assignedInterval = availablePhysRegRecord->assignedInterval;
if ((assignedInterval != currentInterval) &&
isAssigned(availablePhysRegRecord ARM_ARG(getRegisterType(currentInterval, refPosition))))
{
if (regSelector->isSpilling())
{
// We're spilling.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
if (currentInterval->registerType == TYP_DOUBLE)
{
assert(genIsValidDoubleReg(availablePhysRegRecord->regNum));
unassignDoublePhysReg(availablePhysRegRecord);
}
else if (assignedInterval->registerType == TYP_DOUBLE)
{
// Make sure we spill both halves of the double register.
assert(genIsValidDoubleReg(assignedInterval->assignedReg->regNum));
unassignPhysReg(assignedInterval->assignedReg, assignedInterval->recentRefPosition);
}
else
#endif
{
unassignPhysReg(availablePhysRegRecord, assignedInterval->recentRefPosition);
}
}
else
{
// If we considered this "unassigned" because this interval's lifetime ends before
// the next ref, remember it.
// For historical reasons (due to former short-circuiting of this case), if we're reassigning
// the current interval to a previous assignment, we don't remember the previous interval.
// Note that we need to compute this condition before calling unassignPhysReg, which wil reset
// assignedInterval->physReg.
bool wasAssigned = regSelector->foundUnassignedReg() && (assignedInterval != nullptr) &&
(assignedInterval->physReg == foundReg);
unassignPhysReg(availablePhysRegRecord ARM_ARG(currentInterval->registerType));
if (regSelector->isMatchingConstant() && compiler->opts.OptimizationEnabled())
{
assert(assignedInterval->isConstant);
refPosition->treeNode->SetReuseRegVal();
}
else if (wasAssigned)
{
updatePreviousInterval(availablePhysRegRecord, assignedInterval, assignedInterval->registerType);
}
else
{
assert(!regSelector->isConstAvailable());
}
}
}
assignPhysReg(availablePhysRegRecord, currentInterval);
refPosition->registerAssignment = foundRegBit;
return foundReg;
}
//------------------------------------------------------------------------
// canSpillReg: Determine whether we can spill physRegRecord
//
// Arguments:
// physRegRecord - reg to spill
// refLocation - Location of RefPosition where this register will be spilled
//
// Return Value:
// True - if we can spill physRegRecord
// False - otherwise
//
bool LinearScan::canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation)
{
assert(physRegRecord->assignedInterval != nullptr);
RefPosition* recentAssignedRef = physRegRecord->assignedInterval->recentRefPosition;
if (recentAssignedRef != nullptr)
{
// We can't spill a register that's active at the current location.
// We should already have determined this with isRegBusy before calling this method.
assert(!isRefPositionActive(recentAssignedRef, refLocation));
return true;
}
// recentAssignedRef can only be null if this is a parameter that has not yet been
// moved to a register (or stack), in which case we can't spill it yet.
assert(physRegRecord->assignedInterval->getLocalVar(compiler)->lvIsParam);
return false;
}
//------------------------------------------------------------------------
// getSpillWeight: Get the weight associated with spilling the given register
//
// Arguments:
// physRegRecord - reg to spill
//
// Return Value:
// The weight associated with the location at which we will spill.
//
// Note: This helper is designed to be used only from allocateReg() and getDoubleSpillWeight()
//
weight_t LinearScan::getSpillWeight(RegRecord* physRegRecord)
{
assert(physRegRecord->assignedInterval != nullptr);
RefPosition* recentAssignedRef = physRegRecord->assignedInterval->recentRefPosition;
weight_t weight = BB_ZERO_WEIGHT;
// We shouldn't call this method if there is no recentAssignedRef.
assert(recentAssignedRef != nullptr);
// We shouldn't call this method if the register is active at this location.
assert(!isRefPositionActive(recentAssignedRef, currentLoc));
weight = getWeight(recentAssignedRef);
return weight;
}
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// canSpillDoubleReg: Determine whether we can spill physRegRecord
//
// Arguments:
// physRegRecord - reg to spill (must be a valid double register)
// refLocation - Location of RefPosition where this register will be spilled
//
// Return Value:
// True - if we can spill physRegRecord
// False - otherwise
//
bool LinearScan::canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation)
{
assert(genIsValidDoubleReg(physRegRecord->regNum));
RegRecord* physRegRecord2 = getSecondHalfRegRec(physRegRecord);
if ((physRegRecord->assignedInterval != nullptr) && !canSpillReg(physRegRecord, refLocation))
{
return false;
}
if ((physRegRecord2->assignedInterval != nullptr) && !canSpillReg(physRegRecord2, refLocation))
{
return false;
}
return true;
}
//------------------------------------------------------------------------
// unassignDoublePhysReg: unassign a double register (pair)
//
// Arguments:
// doubleRegRecord - reg to unassign
//
// Note:
// The given RegRecord must be a valid (even numbered) double register.
//
void LinearScan::unassignDoublePhysReg(RegRecord* doubleRegRecord)
{
assert(genIsValidDoubleReg(doubleRegRecord->regNum));
RegRecord* doubleRegRecordLo = doubleRegRecord;
RegRecord* doubleRegRecordHi = getSecondHalfRegRec(doubleRegRecordLo);
// For a double register, we has following four cases.
// Case 1: doubleRegRecLo is assigned to TYP_DOUBLE interval
// Case 2: doubleRegRecLo and doubleRegRecHi are assigned to different TYP_FLOAT intervals
// Case 3: doubelRegRecLo is assgined to TYP_FLOAT interval and doubleRegRecHi is nullptr
// Case 4: doubleRegRecordLo is nullptr, and doubleRegRecordHi is assigned to a TYP_FLOAT interval
if (doubleRegRecordLo->assignedInterval != nullptr)
{
if (doubleRegRecordLo->assignedInterval->registerType == TYP_DOUBLE)
{
// Case 1: doubleRegRecLo is assigned to TYP_DOUBLE interval
unassignPhysReg(doubleRegRecordLo, doubleRegRecordLo->assignedInterval->recentRefPosition);
}
else
{
// Case 2: doubleRegRecLo and doubleRegRecHi are assigned to different TYP_FLOAT intervals
// Case 3: doubelRegRecLo is assgined to TYP_FLOAT interval and doubleRegRecHi is nullptr
assert(doubleRegRecordLo->assignedInterval->registerType == TYP_FLOAT);
unassignPhysReg(doubleRegRecordLo, doubleRegRecordLo->assignedInterval->recentRefPosition);
if (doubleRegRecordHi != nullptr)
{
if (doubleRegRecordHi->assignedInterval != nullptr)
{
assert(doubleRegRecordHi->assignedInterval->registerType == TYP_FLOAT);
unassignPhysReg(doubleRegRecordHi, doubleRegRecordHi->assignedInterval->recentRefPosition);
}
}
}
}
else
{
// Case 4: doubleRegRecordLo is nullptr, and doubleRegRecordHi is assigned to a TYP_FLOAT interval
assert(doubleRegRecordHi->assignedInterval != nullptr);
assert(doubleRegRecordHi->assignedInterval->registerType == TYP_FLOAT);
unassignPhysReg(doubleRegRecordHi, doubleRegRecordHi->assignedInterval->recentRefPosition);
}
}
#endif // TARGET_ARM
//------------------------------------------------------------------------
// isRefPositionActive: Determine whether a given RefPosition is active at the given location
//
// Arguments:
// refPosition - the RefPosition of interest
// refLocation - the LsraLocation at which we want to know if it is active
//
// Return Value:
// True - if this RefPosition occurs at the given location, OR
// if it occurs at the previous location and is marked delayRegFree.
// False - otherwise
//
bool LinearScan::isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation)
{
return (refPosition->nodeLocation == refLocation ||
((refPosition->nodeLocation + 1 == refLocation) && refPosition->delayRegFree));
}
//------------------------------------------------------------------------
// isSpillCandidate: Determine if a register is a spill candidate for a given RefPosition.
//
// Arguments:
// current The interval for the current allocation
// refPosition The RefPosition of the current Interval for which a register is being allocated
// physRegRecord The RegRecord for the register we're considering for spill
//
// Return Value:
// True iff the given register can be spilled to accommodate the given RefPosition.
//
bool LinearScan::isSpillCandidate(Interval* current, RefPosition* refPosition, RegRecord* physRegRecord)
{
regMaskTP candidateBit = genRegMask(physRegRecord->regNum);
LsraLocation refLocation = refPosition->nodeLocation;
// We shouldn't be calling this if we haven't already determined that the register is not
// busy until the next kill.
assert(!isRegBusy(physRegRecord->regNum, current->registerType));
// We should already have determined that the register isn't actively in use.
assert(!isRegInUse(physRegRecord->regNum, current->registerType));
// We shouldn't be calling this if 'refPosition' is a fixed reference to this register.
assert(!refPosition->isFixedRefOfRegMask(candidateBit));
// We shouldn't be calling this if there is a fixed reference at the same location
// (and it's not due to this reference), as checked above.
assert(!conflictingFixedRegReference(physRegRecord->regNum, refPosition));
bool canSpill;
#ifdef TARGET_ARM
if (current->registerType == TYP_DOUBLE)
{
canSpill = canSpillDoubleReg(physRegRecord, refLocation);
}
else
#endif // TARGET_ARM
{
canSpill = canSpillReg(physRegRecord, refLocation);
}
if (!canSpill)
{
return false;
}
return true;
}
// Grab a register to use to copy and then immediately use.
// This is called only for localVar intervals that already have a register
// assignment that is not compatible with the current RefPosition.
// This is not like regular assignment, because we don't want to change
// any preferences or existing register assignments.
// Prefer a free register that's got the earliest next use.
// Otherwise, spill something with the farthest next use
//
regNumber LinearScan::assignCopyReg(RefPosition* refPosition)
{
Interval* currentInterval = refPosition->getInterval();
assert(currentInterval != nullptr);
assert(currentInterval->isActive);
// Save the relatedInterval, if any, so that it doesn't get modified during allocation.
Interval* savedRelatedInterval = currentInterval->relatedInterval;
currentInterval->relatedInterval = nullptr;
// We don't want really want to change the default assignment,
// so 1) pretend this isn't active, and 2) remember the old reg
regNumber oldPhysReg = currentInterval->physReg;
RegRecord* oldRegRecord = currentInterval->assignedReg;
assert(oldRegRecord->regNum == oldPhysReg);
currentInterval->isActive = false;
// We *must* allocate a register, and it will be a copyReg. Set that field now, so that
// refPosition->RegOptional() will return false.
refPosition->copyReg = true;
RegisterScore registerScore = NONE;
regNumber allocatedReg = allocateReg(currentInterval, refPosition DEBUG_ARG(®isterScore));
assert(allocatedReg != REG_NA);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, currentInterval, allocatedReg, nullptr, registerScore));
// Now restore the old info
currentInterval->relatedInterval = savedRelatedInterval;
currentInterval->physReg = oldPhysReg;
currentInterval->assignedReg = oldRegRecord;
currentInterval->isActive = true;
return allocatedReg;
}
//------------------------------------------------------------------------
// isAssigned: This is the function to check if the given RegRecord has an assignedInterval.
//
// Arguments:
// regRec - The RegRecord to check that it is assigned.
// newRegType - There are elements to judge according to the upcoming register type.
//
// Return Value:
// Returns true if the given RegRecord has an assignedInterval.
//
bool LinearScan::isAssigned(RegRecord* regRec ARM_ARG(RegisterType newRegType))
{
if (regRec->assignedInterval != nullptr)
{
return true;
}
#ifdef TARGET_ARM
if (newRegType == TYP_DOUBLE)
{
RegRecord* otherRegRecord = getSecondHalfRegRec(regRec);
if (otherRegRecord->assignedInterval != nullptr)
{
return true;
}
}
#endif
return false;
}
//------------------------------------------------------------------------
// checkAndAssignInterval: Check if the interval is already assigned and
// if it is then unassign the physical record
// and set the assignedInterval to 'interval'
//
// Arguments:
// regRec - The RegRecord of interest
// interval - The Interval that we're going to assign to 'regRec'
//
void LinearScan::checkAndAssignInterval(RegRecord* regRec, Interval* interval)
{
Interval* assignedInterval = regRec->assignedInterval;
if (assignedInterval != nullptr && assignedInterval != interval)
{
// This is allocated to another interval. Either it is inactive, or it was allocated as a
// copyReg and is therefore not the "assignedReg" of the other interval. In the latter case,
// we simply unassign it - in the former case we need to set the physReg on the interval to
// REG_NA to indicate that it is no longer in that register.
// The lack of checking for this case resulted in an assert in the retail version of System.dll,
// in method SerialStream.GetDcbFlag.
// Note that we can't check for the copyReg case, because we may have seen a more recent
// RefPosition for the Interval that was NOT a copyReg.
if (assignedInterval->assignedReg == regRec)
{
assert(assignedInterval->isActive == false);
assignedInterval->physReg = REG_NA;
}
unassignPhysReg(regRec->regNum);
}
#ifdef TARGET_ARM
// If 'interval' and 'assignedInterval' were both TYP_DOUBLE, then we have unassigned 'assignedInterval'
// from both halves. Otherwise, if 'interval' is TYP_DOUBLE, we now need to unassign the other half.
if ((interval->registerType == TYP_DOUBLE) &&
((assignedInterval == nullptr) || (assignedInterval->registerType == TYP_FLOAT)))
{
RegRecord* otherRegRecord = getSecondHalfRegRec(regRec);
assignedInterval = otherRegRecord->assignedInterval;
if (assignedInterval != nullptr && assignedInterval != interval)
{
if (assignedInterval->assignedReg == otherRegRecord)
{
assert(assignedInterval->isActive == false);
assignedInterval->physReg = REG_NA;
}
unassignPhysReg(otherRegRecord->regNum);
}
}
#endif
updateAssignedInterval(regRec, interval, interval->registerType);
}
// Assign the given physical register interval to the given interval
void LinearScan::assignPhysReg(RegRecord* regRec, Interval* interval)
{
regMaskTP assignedRegMask = genRegMask(regRec->regNum);
compiler->codeGen->regSet.rsSetRegsModified(assignedRegMask DEBUGARG(true));
interval->assignedReg = regRec;
checkAndAssignInterval(regRec, interval);
interval->physReg = regRec->regNum;
interval->isActive = true;
if (interval->isLocalVar)
{
// Prefer this register for future references
interval->updateRegisterPreferences(assignedRegMask);
}
}
//------------------------------------------------------------------------
// setIntervalAsSplit: Set this Interval as being split
//
// Arguments:
// interval - The Interval which is being split
//
// Return Value:
// None.
//
// Notes:
// The given Interval will be marked as split, and it will be added to the
// set of splitOrSpilledVars.
//
// Assumptions:
// "interval" must be a lclVar interval, as tree temps are never split.
// This is asserted in the call to getVarIndex().
//
void LinearScan::setIntervalAsSplit(Interval* interval)
{
if (interval->isLocalVar)
{
unsigned varIndex = interval->getVarIndex(compiler);
if (!interval->isSplit)
{
VarSetOps::AddElemD(compiler, splitOrSpilledVars, varIndex);
}
else
{
assert(VarSetOps::IsMember(compiler, splitOrSpilledVars, varIndex));
}
}
interval->isSplit = true;
}
//------------------------------------------------------------------------
// setIntervalAsSpilled: Set this Interval as being spilled
//
// Arguments:
// interval - The Interval which is being spilled
//
// Return Value:
// None.
//
// Notes:
// The given Interval will be marked as spilled, and it will be added
// to the set of splitOrSpilledVars.
//
void LinearScan::setIntervalAsSpilled(Interval* interval)
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (interval->isUpperVector)
{
assert(interval->relatedInterval->isLocalVar);
interval->isSpilled = true;
// Now we need to mark the local as spilled also, even if the lower half is never spilled,
// as this will use the upper part of its home location.
interval = interval->relatedInterval;
// We'll now mark this as spilled, so it changes the spillCost.
RefPosition* recentRefPos = interval->recentRefPosition;
if (!interval->isSpilled && interval->isActive && (recentRefPos != nullptr))
{
VarSetOps::AddElemD(compiler, splitOrSpilledVars, interval->getVarIndex(compiler));
interval->isSpilled = true;
regNumber reg = interval->physReg;
spillCost[reg] = getSpillWeight(getRegisterRecord(reg));
}
}
#endif
if (interval->isLocalVar)
{
unsigned varIndex = interval->getVarIndex(compiler);
if (!interval->isSpilled)
{
VarSetOps::AddElemD(compiler, splitOrSpilledVars, varIndex);
}
else
{
assert(VarSetOps::IsMember(compiler, splitOrSpilledVars, varIndex));
}
}
interval->isSpilled = true;
}
//------------------------------------------------------------------------
// spill: Spill the "interval" starting from "fromRefPosition" (upto "toRefPosition")
//
// Arguments:
// interval - The interval that contains the RefPosition to be spilled
// fromRefPosition - The RefPosition at which the Interval is to be spilled
// toRefPosition - The RefPosition at which it must be reloaded (debug only arg)
//
// Return Value:
// None.
//
// Assumptions:
// fromRefPosition and toRefPosition must not be null
//
void LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition DEBUGARG(RefPosition* toRefPosition))
{
assert(fromRefPosition != nullptr && toRefPosition != nullptr);
assert(fromRefPosition->getInterval() == interval && toRefPosition->getInterval() == interval);
assert(fromRefPosition->nextRefPosition == toRefPosition);
if (!fromRefPosition->lastUse)
{
// If not allocated a register, Lcl var def/use ref positions even if reg optional
// should be marked as spillAfter. Note that if it is a WriteThru interval, the value is always
// written to the stack, but the WriteThru indicates that the register is no longer live.
if (fromRefPosition->RegOptional() && !(interval->isLocalVar && fromRefPosition->IsActualRef()))
{
fromRefPosition->registerAssignment = RBM_NONE;
}
else
{
fromRefPosition->spillAfter = true;
}
}
// Only handle the singledef intervals whose firstRefPosition is RefTypeDef and is not yet marked as spillAfter.
// The singledef intervals whose firstRefPositions are already marked as spillAfter, no need to mark them as
// singleDefSpill because they will always get spilled at firstRefPosition.
// This helps in spilling the singleDef at definition
//
// Note: Only mark "singleDefSpill" for those intervals who ever get spilled. The intervals that are never spilled
// will not be marked as "singleDefSpill" and hence won't get spilled at the first definition.
if (interval->isSingleDef && RefTypeIsDef(interval->firstRefPosition->refType) &&
!interval->firstRefPosition->spillAfter)
{
// TODO-CQ: Check if it is beneficial to spill at def, meaning, if it is a hot block don't worry about
// doing the spill. Another option is to track number of refpositions and a interval has more than X
// refpositions
// then perform this optimization.
interval->firstRefPosition->singleDefSpill = true;
}
assert(toRefPosition != nullptr);
#ifdef DEBUG
if (VERBOSE)
{
dumpLsraAllocationEvent(LSRA_EVENT_SPILL, interval);
}
#endif // DEBUG
INTRACK_STATS(updateLsraStat(STAT_SPILL, fromRefPosition->bbNum));
interval->isActive = false;
setIntervalAsSpilled(interval);
// If fromRefPosition occurs before the beginning of this block, mark this as living in the stack
// on entry to this block.
if (fromRefPosition->nodeLocation <= curBBStartLocation)
{
// This must be a lclVar interval
assert(interval->isLocalVar);
setInVarRegForBB(curBBNum, interval->varNum, REG_STK);
}
}
//------------------------------------------------------------------------
// unassignPhysRegNoSpill: Unassign the given physical register record from
// an active interval, without spilling.
//
// Arguments:
// regRec - the RegRecord to be unassigned
//
// Return Value:
// None.
//
// Assumptions:
// The assignedInterval must not be null, and must be active.
//
// Notes:
// This method is used to unassign a register when an interval needs to be moved to a
// different register, but not (yet) spilled.
void LinearScan::unassignPhysRegNoSpill(RegRecord* regRec)
{
Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr && assignedInterval->isActive);
assignedInterval->isActive = false;
unassignPhysReg(regRec, nullptr);
assignedInterval->isActive = true;
}
//------------------------------------------------------------------------
// checkAndClearInterval: Clear the assignedInterval for the given
// physical register record
//
// Arguments:
// regRec - the physical RegRecord to be unassigned
// spillRefPosition - The RefPosition at which the assignedInterval is to be spilled
// or nullptr if we aren't spilling
//
// Return Value:
// None.
//
// Assumptions:
// see unassignPhysReg
//
void LinearScan::checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition)
{
Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr);
regNumber thisRegNum = regRec->regNum;
if (spillRefPosition == nullptr)
{
// Note that we can't assert for the copyReg case
//
if (assignedInterval->physReg == thisRegNum)
{
assert(assignedInterval->isActive == false);
}
}
else
{
assert(spillRefPosition->getInterval() == assignedInterval);
}
updateAssignedInterval(regRec, nullptr, assignedInterval->registerType);
}
//------------------------------------------------------------------------
// unassignPhysReg: Unassign the given physical register record, and spill the
// assignedInterval at the given spillRefPosition, if any.
//
// Arguments:
// regRec - The RegRecord to be unassigned
// newRegType - The RegisterType of interval that would be assigned
//
// Return Value:
// None.
//
// Notes:
// On ARM architecture, Intervals have to be unassigned considering
// with the register type of interval that would be assigned.
//
void LinearScan::unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType))
{
RegRecord* regRecToUnassign = regRec;
#ifdef TARGET_ARM
RegRecord* anotherRegRec = nullptr;
if ((regRecToUnassign->assignedInterval != nullptr) &&
(regRecToUnassign->assignedInterval->registerType == TYP_DOUBLE))
{
// If the register type of interval(being unassigned or new) is TYP_DOUBLE,
// It should have to be valid double register (even register)
if (!genIsValidDoubleReg(regRecToUnassign->regNum))
{
regRecToUnassign = findAnotherHalfRegRec(regRec);
}
}
else
{
if (newRegType == TYP_DOUBLE)
{
anotherRegRec = getSecondHalfRegRec(regRecToUnassign);
}
}
#endif
if (regRecToUnassign->assignedInterval != nullptr)
{
unassignPhysReg(regRecToUnassign, regRecToUnassign->assignedInterval->recentRefPosition);
}
#ifdef TARGET_ARM
if ((anotherRegRec != nullptr) && (anotherRegRec->assignedInterval != nullptr))
{
unassignPhysReg(anotherRegRec, anotherRegRec->assignedInterval->recentRefPosition);
}
#endif
}
//------------------------------------------------------------------------
// unassignPhysReg: Unassign the given physical register record, and spill the
// assignedInterval at the given spillRefPosition, if any.
//
// Arguments:
// regRec - the RegRecord to be unassigned
// spillRefPosition - The RefPosition at which the assignedInterval is to be spilled
//
// Return Value:
// None.
//
// Assumptions:
// The assignedInterval must not be null.
// If spillRefPosition is null, the assignedInterval must be inactive, or not currently
// assigned to this register (e.g. this is a copyReg for that Interval).
// Otherwise, spillRefPosition must be associated with the assignedInterval.
//
void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition)
{
Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr);
assert(spillRefPosition == nullptr || spillRefPosition->getInterval() == assignedInterval);
regNumber thisRegNum = regRec->regNum;
// Is assignedInterval actually still assigned to this register?
bool intervalIsAssigned = (assignedInterval->physReg == thisRegNum);
regNumber regToUnassign = thisRegNum;
#ifdef TARGET_ARM
RegRecord* anotherRegRec = nullptr;
// Prepare second half RegRecord of a double register for TYP_DOUBLE
if (assignedInterval->registerType == TYP_DOUBLE)
{
assert(isFloatRegType(regRec->registerType));
RegRecord* doubleRegRec;
if (genIsValidDoubleReg(thisRegNum))
{
anotherRegRec = getSecondHalfRegRec(regRec);
doubleRegRec = regRec;
}
else
{
regToUnassign = REG_PREV(thisRegNum);
anotherRegRec = getRegisterRecord(regToUnassign);
doubleRegRec = anotherRegRec;
}
// Both RegRecords should have been assigned to the same interval.
assert(assignedInterval == anotherRegRec->assignedInterval);
if (!intervalIsAssigned && (assignedInterval->physReg == anotherRegRec->regNum))
{
intervalIsAssigned = true;
}
clearNextIntervalRef(regToUnassign, TYP_DOUBLE);
clearSpillCost(regToUnassign, TYP_DOUBLE);
checkAndClearInterval(doubleRegRec, spillRefPosition);
// Both RegRecords should have been unassigned together.
assert(regRec->assignedInterval == nullptr);
assert(anotherRegRec->assignedInterval == nullptr);
}
else
#endif // TARGET_ARM
{
clearNextIntervalRef(thisRegNum, assignedInterval->registerType);
clearSpillCost(thisRegNum, assignedInterval->registerType);
checkAndClearInterval(regRec, spillRefPosition);
}
makeRegAvailable(regToUnassign, assignedInterval->registerType);
RefPosition* nextRefPosition = nullptr;
if (spillRefPosition != nullptr)
{
nextRefPosition = spillRefPosition->nextRefPosition;
}
if (!intervalIsAssigned && assignedInterval->physReg != REG_NA)
{
// This must have been a temporary copy reg, but we can't assert that because there
// may have been intervening RefPositions that were not copyRegs.
// reg->assignedInterval has already been set to nullptr by checkAndClearInterval()
assert(regRec->assignedInterval == nullptr);
return;
}
// regNumber victimAssignedReg = assignedInterval->physReg;
assignedInterval->physReg = REG_NA;
bool spill = assignedInterval->isActive && nextRefPosition != nullptr;
if (spill)
{
// If this is an active interval, it must have a recentRefPosition,
// otherwise it would not be active
assert(spillRefPosition != nullptr);
#if 0
// TODO-CQ: Enable this and insert an explicit GT_COPY (otherwise there's no way to communicate
// to codegen that we want the copyReg to be the new home location).
// If the last reference was a copyReg, and we're spilling the register
// it was copied from, then make the copyReg the new primary location
// if possible
if (spillRefPosition->copyReg)
{
regNumber copyFromRegNum = victimAssignedReg;
regNumber copyRegNum = genRegNumFromMask(spillRefPosition->registerAssignment);
if (copyFromRegNum == thisRegNum &&
getRegisterRecord(copyRegNum)->assignedInterval == assignedInterval)
{
assert(copyRegNum != thisRegNum);
assignedInterval->physReg = copyRegNum;
assignedInterval->assignedReg = this->getRegisterRecord(copyRegNum);
return;
}
}
#endif // 0
#ifdef DEBUG
// With JitStressRegs == 0x80 (LSRA_EXTEND_LIFETIMES), we may have a RefPosition
// that is not marked lastUse even though the treeNode is a lastUse. In that case
// we must not mark it for spill because the register will have been immediately freed
// after use. While we could conceivably add special handling for this case in codegen,
// it would be messy and undesirably cause the "bleeding" of LSRA stress modes outside
// of LSRA.
if (extendLifetimes() && assignedInterval->isLocalVar && RefTypeIsUse(spillRefPosition->refType) &&
spillRefPosition->treeNode != nullptr &&
spillRefPosition->treeNode->AsLclVar()->IsLastUse(spillRefPosition->multiRegIdx))
{
dumpLsraAllocationEvent(LSRA_EVENT_SPILL_EXTENDED_LIFETIME, assignedInterval);
assignedInterval->isActive = false;
spill = false;
// If the spillRefPosition occurs before the beginning of this block, it will have
// been marked as living in this register on entry to this block, but we now need
// to mark this as living on the stack.
if (spillRefPosition->nodeLocation <= curBBStartLocation)
{
setInVarRegForBB(curBBNum, assignedInterval->varNum, REG_STK);
if (spillRefPosition->nextRefPosition != nullptr)
{
setIntervalAsSpilled(assignedInterval);
}
}
else
{
// Otherwise, we need to mark spillRefPosition as lastUse, or the interval
// will remain active beyond its allocated range during the resolution phase.
spillRefPosition->lastUse = true;
}
}
else
#endif // DEBUG
{
spillInterval(assignedInterval, spillRefPosition DEBUGARG(nextRefPosition));
}
}
// Maintain the association with the interval, if it has more references.
// Or, if we "remembered" an interval assigned to this register, restore it.
if (nextRefPosition != nullptr)
{
assignedInterval->assignedReg = regRec;
}
else if (canRestorePreviousInterval(regRec, assignedInterval))
{
regRec->assignedInterval = regRec->previousInterval;
regRec->previousInterval = nullptr;
if (regRec->assignedInterval->physReg != thisRegNum)
{
clearNextIntervalRef(thisRegNum, regRec->assignedInterval->registerType);
}
else
{
updateNextIntervalRef(thisRegNum, regRec->assignedInterval);
}
#ifdef TARGET_ARM
// Note:
// We can not use updateAssignedInterval() and updatePreviousInterval() here,
// because regRec may not be a even-numbered float register.
// Update second half RegRecord of a double register for TYP_DOUBLE
if (regRec->assignedInterval->registerType == TYP_DOUBLE)
{
RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec);
anotherHalfRegRec->assignedInterval = regRec->assignedInterval;
anotherHalfRegRec->previousInterval = nullptr;
}
#endif // TARGET_ARM
#ifdef DEBUG
if (spill)
{
dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, regRec->assignedInterval,
thisRegNum);
}
else
{
dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, regRec->assignedInterval, thisRegNum);
}
#endif // DEBUG
}
else
{
updateAssignedInterval(regRec, nullptr, assignedInterval->registerType);
updatePreviousInterval(regRec, nullptr, assignedInterval->registerType);
}
}
//------------------------------------------------------------------------
// spillGCRefs: Spill any GC-type intervals that are currently in registers.
//
// Arguments:
// killRefPosition - The RefPosition for the kill
//
// Return Value:
// None.
//
// Notes:
// This is used to ensure that we have no live GC refs in registers at an
// unmanaged call.
//
void LinearScan::spillGCRefs(RefPosition* killRefPosition)
{
// For each physical register that can hold a GC type,
// if it is occupied by an interval of a GC type, spill that interval.
regMaskTP candidateRegs = killRefPosition->registerAssignment;
INDEBUG(bool killedRegs = false);
while (candidateRegs != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
candidateRegs &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
RegRecord* regRecord = getRegisterRecord(nextReg);
Interval* assignedInterval = regRecord->assignedInterval;
if (assignedInterval == nullptr || (assignedInterval->isActive == false))
{
continue;
}
bool needsKill = varTypeIsGC(assignedInterval->registerType);
if (!needsKill)
{
// The importer will assign a GC type to the rhs of an assignment if the lhs type is a GC type,
// even if the rhs is not. See the CEE_STLOC* case in impImportBlockCode(). As a result,
// we can have a 'GT_LCL_VAR' node with a GC type, when the lclVar itself is an integer type.
// The emitter will mark this register as holding a GC type. Therfore we must spill this value.
// This was exposed on Arm32 with EH write-thru.
if ((assignedInterval->recentRefPosition != nullptr) &&
(assignedInterval->recentRefPosition->treeNode != nullptr))
{
needsKill = varTypeIsGC(assignedInterval->recentRefPosition->treeNode);
}
}
if (needsKill)
{
INDEBUG(killedRegs = true);
unassignPhysReg(regRecord, assignedInterval->recentRefPosition);
makeRegAvailable(nextReg, assignedInterval->registerType);
}
}
INDEBUG(dumpLsraAllocationEvent(killedRegs ? LSRA_EVENT_DONE_KILL_GC_REFS : LSRA_EVENT_NO_GC_KILLS, nullptr, REG_NA,
nullptr));
}
//------------------------------------------------------------------------
// processBlockEndAllocation: Update var locations after 'currentBlock' has been allocated
//
// Arguments:
// currentBlock - the BasicBlock we have just finished allocating registers for
//
// Return Value:
// None
//
// Notes:
// Calls processBlockEndLocations() to set the outVarToRegMap, then gets the next block,
// and sets the inVarToRegMap appropriately.
void LinearScan::processBlockEndAllocation(BasicBlock* currentBlock)
{
assert(currentBlock != nullptr);
if (enregisterLocalVars)
{
processBlockEndLocations(currentBlock);
}
markBlockVisited(currentBlock);
// Get the next block to allocate.
// When the last block in the method has successors, there will be a final "RefTypeBB" to
// ensure that we get the varToRegMap set appropriately, but in that case we don't need
// to worry about "nextBlock".
BasicBlock* nextBlock = getNextBlock();
if (nextBlock != nullptr)
{
processBlockStartLocations(nextBlock);
}
}
//------------------------------------------------------------------------
// rotateBlockStartLocation: When in the LSRA_BLOCK_BOUNDARY_ROTATE stress mode, attempt to
// "rotate" the register assignment for a localVar to the next higher
// register that is available.
//
// Arguments:
// interval - the Interval for the variable whose register is getting rotated
// targetReg - its register assignment from the predecessor block being used for live-in
// availableRegs - registers available for use
//
// Return Value:
// The new register to use.
#ifdef DEBUG
regNumber LinearScan::rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs)
{
if (targetReg != REG_STK && getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE)
{
// If we're rotating the register locations at block boundaries, try to use
// the next higher register number of the appropriate register type.
regMaskTP candidateRegs = allRegs(interval->registerType) & availableRegs;
regNumber firstReg = REG_NA;
regNumber newReg = REG_NA;
while (candidateRegs != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
candidateRegs &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
if (nextReg > targetReg)
{
newReg = nextReg;
break;
}
else if (firstReg == REG_NA)
{
firstReg = nextReg;
}
}
if (newReg == REG_NA)
{
assert(firstReg != REG_NA);
newReg = firstReg;
}
targetReg = newReg;
}
return targetReg;
}
#endif // DEBUG
#ifdef TARGET_ARM
//--------------------------------------------------------------------------------------
// isSecondHalfReg: Test if recRec is second half of double register
// which is assigned to an interval.
//
// Arguments:
// regRec - a register to be tested
// interval - an interval which is assigned to some register
//
// Assumptions:
// None
//
// Return Value:
// True only if regRec is second half of assignedReg in interval
//
bool LinearScan::isSecondHalfReg(RegRecord* regRec, Interval* interval)
{
RegRecord* assignedReg = interval->assignedReg;
if (assignedReg != nullptr && interval->registerType == TYP_DOUBLE)
{
// interval should have been allocated to a valid double register
assert(genIsValidDoubleReg(assignedReg->regNum));
// Find a second half RegRecord of double register
regNumber firstRegNum = assignedReg->regNum;
regNumber secondRegNum = REG_NEXT(firstRegNum);
assert(genIsValidFloatReg(secondRegNum) && !genIsValidDoubleReg(secondRegNum));
RegRecord* secondRegRec = getRegisterRecord(secondRegNum);
return secondRegRec == regRec;
}
return false;
}
//------------------------------------------------------------------------------------------
// getSecondHalfRegRec: Get the second (odd) half of an ARM32 double register
//
// Arguments:
// regRec - A float RegRecord
//
// Assumptions:
// regRec must be a valid double register (i.e. even)
//
// Return Value:
// The RegRecord for the second half of the double register
//
RegRecord* LinearScan::getSecondHalfRegRec(RegRecord* regRec)
{
regNumber secondHalfRegNum;
RegRecord* secondHalfRegRec;
assert(genIsValidDoubleReg(regRec->regNum));
secondHalfRegNum = REG_NEXT(regRec->regNum);
secondHalfRegRec = getRegisterRecord(secondHalfRegNum);
return secondHalfRegRec;
}
//------------------------------------------------------------------------------------------
// findAnotherHalfRegRec: Find another half RegRecord which forms same ARM32 double register
//
// Arguments:
// regRec - A float RegRecord
//
// Assumptions:
// None
//
// Return Value:
// A RegRecord which forms same double register with regRec
//
RegRecord* LinearScan::findAnotherHalfRegRec(RegRecord* regRec)
{
regNumber anotherHalfRegNum = findAnotherHalfRegNum(regRec->regNum);
return getRegisterRecord(anotherHalfRegNum);
}
//------------------------------------------------------------------------------------------
// findAnotherHalfRegNum: Find another half register's number which forms same ARM32 double register
//
// Arguments:
// regNumber - A float regNumber
//
// Assumptions:
// None
//
// Return Value:
// A register number which forms same double register with regNum.
//
regNumber LinearScan::findAnotherHalfRegNum(regNumber regNum)
{
regNumber anotherHalfRegNum;
assert(genIsValidFloatReg(regNum));
// Find another half register for TYP_DOUBLE interval,
// following same logic in canRestorePreviousInterval().
if (genIsValidDoubleReg(regNum))
{
anotherHalfRegNum = REG_NEXT(regNum);
assert(!genIsValidDoubleReg(anotherHalfRegNum));
}
else
{
anotherHalfRegNum = REG_PREV(regNum);
assert(genIsValidDoubleReg(anotherHalfRegNum));
}
return anotherHalfRegNum;
}
#endif
//--------------------------------------------------------------------------------------
// canRestorePreviousInterval: Test if we can restore previous interval
//
// Arguments:
// regRec - a register which contains previous interval to be restored
// assignedInterval - an interval just unassigned
//
// Assumptions:
// None
//
// Return Value:
// True only if previous interval of regRec can be restored
//
bool LinearScan::canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval)
{
bool retVal =
(regRec->previousInterval != nullptr && regRec->previousInterval != assignedInterval &&
regRec->previousInterval->assignedReg == regRec && regRec->previousInterval->getNextRefPosition() != nullptr);
#ifdef TARGET_ARM
if (retVal && regRec->previousInterval->registerType == TYP_DOUBLE)
{
RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec);
retVal = retVal && anotherHalfRegRec->assignedInterval == nullptr;
}
#endif
return retVal;
}
bool LinearScan::isAssignedToInterval(Interval* interval, RegRecord* regRec)
{
bool isAssigned = (interval->assignedReg == regRec);
#ifdef TARGET_ARM
isAssigned |= isSecondHalfReg(regRec, interval);
#endif
return isAssigned;
}
void LinearScan::unassignIntervalBlockStart(RegRecord* regRecord, VarToRegMap inVarToRegMap)
{
// Is there another interval currently assigned to this register? If so unassign it.
Interval* assignedInterval = regRecord->assignedInterval;
if (assignedInterval != nullptr)
{
if (isAssignedToInterval(assignedInterval, regRecord))
{
// Only localVars, constants or vector upper halves should be assigned to registers at block boundaries.
if (!assignedInterval->isLocalVar)
{
assert(assignedInterval->isConstant || assignedInterval->IsUpperVector());
// Don't need to update the VarToRegMap.
inVarToRegMap = nullptr;
}
regNumber assignedRegNum = assignedInterval->assignedReg->regNum;
// If the interval is active, it will be set to active when we reach its new
// register assignment (which we must not yet have done, or it wouldn't still be
// assigned to this register).
assignedInterval->isActive = false;
unassignPhysReg(assignedInterval->assignedReg, nullptr);
if ((inVarToRegMap != nullptr) && inVarToRegMap[assignedInterval->getVarIndex(compiler)] == assignedRegNum)
{
inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK;
}
}
else
{
// This interval is no longer assigned to this register.
updateAssignedInterval(regRecord, nullptr, assignedInterval->registerType);
}
}
}
//------------------------------------------------------------------------
// processBlockStartLocations: Update var locations on entry to 'currentBlock' and clear constant
// registers.
//
// Arguments:
// currentBlock - the BasicBlock we are about to allocate registers for
//
// Return Value:
// None
//
// Notes:
// During the allocation pass (allocationPassComplete = false), we use the outVarToRegMap
// of the selected predecessor to determine the lclVar locations for the inVarToRegMap.
// During the resolution (write-back when allocationPassComplete = true) pass, we only
// modify the inVarToRegMap in cases where a lclVar was spilled after the block had been
// completed.
void LinearScan::processBlockStartLocations(BasicBlock* currentBlock)
{
// If we have no register candidates we should only call this method during allocation.
assert(enregisterLocalVars || !allocationPassComplete);
if (!enregisterLocalVars)
{
// Just clear any constant registers and return.
resetAvailableRegs();
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
Interval* assignedInterval = physRegRecord->assignedInterval;
clearNextIntervalRef(reg, physRegRecord->registerType);
clearSpillCost(reg, physRegRecord->registerType);
if (assignedInterval != nullptr)
{
assert(assignedInterval->isConstant);
physRegRecord->assignedInterval = nullptr;
}
}
return;
}
unsigned predBBNum = blockInfo[currentBlock->bbNum].predBBNum;
VarToRegMap predVarToRegMap = getOutVarToRegMap(predBBNum);
VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum);
// If this block enters an exception region, all incoming vars are on the stack.
if (predBBNum == 0)
{
#if DEBUG
if (blockInfo[currentBlock->bbNum].hasEHBoundaryIn || !allocationPassComplete)
{
// This should still be in its initialized empty state.
for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
// In the case where we're extending lifetimes for stress, we are intentionally modeling variables
// as live when they really aren't to create extra register pressure & constraints.
// However, this means that non-EH-vars will be live into EH regions. We can and should ignore the
// locations of these. Note that they aren't reported to codegen anyway.
if (!getLsraExtendLifeTimes() || VarSetOps::IsMember(compiler, currentBlock->bbLiveIn, varIndex))
{
assert(inVarToRegMap[varIndex] == REG_STK);
}
}
}
#endif // DEBUG
predVarToRegMap = inVarToRegMap;
}
VarSetOps::AssignNoCopy(compiler, currentLiveVars,
VarSetOps::Intersection(compiler, registerCandidateVars, currentBlock->bbLiveIn));
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
VarSetOps::AssignNoCopy(compiler, currentLiveVars, registerCandidateVars);
}
// If we are rotating register assignments at block boundaries, we want to make the
// inactive registers available for the rotation.
regMaskTP inactiveRegs = RBM_NONE;
#endif // DEBUG
regMaskTP liveRegs = RBM_NONE;
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate)
{
continue;
}
regNumber targetReg;
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* nextRefPosition = interval->getNextRefPosition();
assert((nextRefPosition != nullptr) || (interval->isWriteThru));
bool leaveOnStack = false;
// Special handling for variables live in/out of exception handlers.
if (interval->isWriteThru)
{
// There are 3 cases where we will leave writethru lclVars on the stack:
// 1) There is no predecessor.
// 2) It is conservatively or artificially live - that is, it has no next use,
// so there is no place for codegen to record that the register is no longer occupied.
// 3) This block has a predecessor with an outgoing EH edge. We won't be able to add "join"
// resolution to load the EH var into a register along that edge, so it must be on stack.
if ((predBBNum == 0) || (nextRefPosition == nullptr) || (RefTypeIsDef(nextRefPosition->refType)) ||
blockInfo[currentBlock->bbNum].hasEHPred)
{
leaveOnStack = true;
}
}
if (!allocationPassComplete)
{
targetReg = getVarReg(predVarToRegMap, varIndex);
if (leaveOnStack)
{
targetReg = REG_STK;
}
#ifdef DEBUG
regNumber newTargetReg = rotateBlockStartLocation(interval, targetReg, (~liveRegs | inactiveRegs));
if (newTargetReg != targetReg)
{
targetReg = newTargetReg;
setIntervalAsSplit(interval);
}
#endif // DEBUG
setVarReg(inVarToRegMap, varIndex, targetReg);
}
else // allocationPassComplete (i.e. resolution/write-back pass)
{
targetReg = getVarReg(inVarToRegMap, varIndex);
// There are four cases that we need to consider during the resolution pass:
// 1. This variable had a register allocated initially, and it was not spilled in the RefPosition
// that feeds this block. In this case, both targetReg and predVarToRegMap[varIndex] will be targetReg.
// 2. This variable had not been spilled prior to the end of predBB, but was later spilled, so
// predVarToRegMap[varIndex] will be REG_STK, but targetReg is its former allocated value.
// In this case, we will normally change it to REG_STK. We will update its "spilled" status when we
// encounter it in resolveLocalRef().
// 2a. If the next RefPosition is marked as a copyReg, we need to retain the allocated register. This is
// because the copyReg RefPosition will not have recorded the "home" register, yet downstream
// RefPositions rely on the correct "home" register.
// 3. This variable was spilled before we reached the end of predBB. In this case, both targetReg and
// predVarToRegMap[varIndex] will be REG_STK, and the next RefPosition will have been marked
// as reload during allocation time if necessary (note that by the time we actually reach the next
// RefPosition, we may be using a different predecessor, at which it is still in a register).
// 4. This variable was spilled during the allocation of this block, so targetReg is REG_STK
// (because we set inVarToRegMap at the time we spilled it), but predVarToRegMap[varIndex]
// is not REG_STK. We retain the REG_STK value in the inVarToRegMap.
if (targetReg != REG_STK)
{
if (getVarReg(predVarToRegMap, varIndex) != REG_STK)
{
// Case #1 above.
assert(getVarReg(predVarToRegMap, varIndex) == targetReg ||
getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE);
}
else if (!nextRefPosition->copyReg)
{
// case #2 above.
setVarReg(inVarToRegMap, varIndex, REG_STK);
targetReg = REG_STK;
}
// Else case 2a. - retain targetReg.
}
// Else case #3 or #4, we retain targetReg and nothing further to do or assert.
}
if (interval->physReg == targetReg)
{
if (interval->isActive)
{
assert(targetReg != REG_STK);
assert(interval->assignedReg != nullptr && interval->assignedReg->regNum == targetReg &&
interval->assignedReg->assignedInterval == interval);
liveRegs |= getRegMask(targetReg, interval->registerType);
continue;
}
}
else if (interval->physReg != REG_NA)
{
// This can happen if we are using the locations from a basic block other than the
// immediately preceding one - where the variable was in a different location.
if ((targetReg != REG_STK) || leaveOnStack)
{
// Unassign it from the register (it may get a new register below).
if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval)
{
interval->isActive = false;
unassignPhysReg(getRegisterRecord(interval->physReg), nullptr);
}
else
{
// This interval was live in this register the last time we saw a reference to it,
// but has since been displaced.
interval->physReg = REG_NA;
}
}
else if (!allocationPassComplete)
{
// Keep the register assignment - if another var has it, it will get unassigned.
// Otherwise, resolution will fix it up later, and it will be more
// likely to match other assignments this way.
targetReg = interval->physReg;
interval->isActive = true;
liveRegs |= getRegMask(targetReg, interval->registerType);
INDEBUG(inactiveRegs |= genRegMask(targetReg));
setVarReg(inVarToRegMap, varIndex, targetReg);
}
else
{
interval->physReg = REG_NA;
}
}
if (targetReg != REG_STK)
{
RegRecord* targetRegRecord = getRegisterRecord(targetReg);
liveRegs |= getRegMask(targetReg, interval->registerType);
if (!allocationPassComplete)
{
updateNextIntervalRef(targetReg, interval);
updateSpillCost(targetReg, interval);
}
if (!interval->isActive)
{
interval->isActive = true;
interval->physReg = targetReg;
interval->assignedReg = targetRegRecord;
}
if (targetRegRecord->assignedInterval != interval)
{
#ifdef TARGET_ARM
// If this is a TYP_DOUBLE interval, and the assigned interval is either null or is TYP_FLOAT,
// we also need to unassign the other half of the register.
// Note that if the assigned interval is TYP_DOUBLE, it will be unassigned below.
if ((interval->registerType == TYP_DOUBLE) &&
((targetRegRecord->assignedInterval == nullptr) ||
(targetRegRecord->assignedInterval->registerType == TYP_FLOAT)))
{
assert(genIsValidDoubleReg(targetReg));
unassignIntervalBlockStart(getSecondHalfRegRec(targetRegRecord),
allocationPassComplete ? nullptr : inVarToRegMap);
}
// If this is a TYP_FLOAT interval, and the assigned interval was TYP_DOUBLE, we also
// need to update the liveRegs to specify that the other half is not live anymore.
// As mentioned above, for TYP_DOUBLE, the other half will be unassigned further below.
if ((interval->registerType == TYP_FLOAT) &&
((targetRegRecord->assignedInterval != nullptr) &&
(targetRegRecord->assignedInterval->registerType == TYP_DOUBLE)))
{
RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(targetRegRecord);
// Use TYP_FLOAT to get the regmask of just the half reg.
liveRegs &= ~getRegMask(anotherHalfRegRec->regNum, TYP_FLOAT);
}
#endif // TARGET_ARM
unassignIntervalBlockStart(targetRegRecord, allocationPassComplete ? nullptr : inVarToRegMap);
assignPhysReg(targetRegRecord, interval);
}
if (interval->recentRefPosition != nullptr && !interval->recentRefPosition->copyReg &&
interval->recentRefPosition->registerAssignment != genRegMask(targetReg))
{
interval->getNextRefPosition()->outOfOrder = true;
}
}
}
// Unassign any registers that are no longer live, and set register state, if allocating.
if (!allocationPassComplete)
{
resetRegState();
setRegsInUse(liveRegs);
}
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
if ((liveRegs & genRegMask(reg)) == 0)
{
makeRegAvailable(reg, physRegRecord->registerType);
Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
assert(assignedInterval->isLocalVar || assignedInterval->isConstant ||
assignedInterval->IsUpperVector());
if (!assignedInterval->isConstant && assignedInterval->assignedReg == physRegRecord)
{
assignedInterval->isActive = false;
if (assignedInterval->getNextRefPosition() == nullptr)
{
unassignPhysReg(physRegRecord, nullptr);
}
if (!assignedInterval->IsUpperVector())
{
inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK;
}
}
else
{
// This interval may still be active, but was in another register in an
// intervening block.
updateAssignedInterval(physRegRecord, nullptr, assignedInterval->registerType);
}
#ifdef TARGET_ARM
// unassignPhysReg, above, may have restored a 'previousInterval', in which case we need to
// get the value of 'physRegRecord->assignedInterval' rather than using 'assignedInterval'.
if (physRegRecord->assignedInterval != nullptr)
{
assignedInterval = physRegRecord->assignedInterval;
}
if (assignedInterval->registerType == TYP_DOUBLE)
{
// Skip next float register, because we already addressed a double register
assert(genIsValidDoubleReg(reg));
reg = REG_NEXT(reg);
makeRegAvailable(reg, physRegRecord->registerType);
}
#endif // TARGET_ARM
}
}
#ifdef TARGET_ARM
else
{
Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr && assignedInterval->registerType == TYP_DOUBLE)
{
// Skip next float register, because we already addressed a double register
assert(genIsValidDoubleReg(reg));
reg = REG_NEXT(reg);
}
}
#endif // TARGET_ARM
}
}
//------------------------------------------------------------------------
// processBlockEndLocations: Record the variables occupying registers after completing the current block.
//
// Arguments:
// currentBlock - the block we have just completed.
//
// Return Value:
// None
//
// Notes:
// This must be called both during the allocation and resolution (write-back) phases.
// This is because we need to have the outVarToRegMap locations in order to set the locations
// at successor blocks during allocation time, but if lclVars are spilled after a block has been
// completed, we need to record the REG_STK location for those variables at resolution time.
void LinearScan::processBlockEndLocations(BasicBlock* currentBlock)
{
assert(currentBlock != nullptr && currentBlock->bbNum == curBBNum);
VarToRegMap outVarToRegMap = getOutVarToRegMap(curBBNum);
VarSetOps::AssignNoCopy(compiler, currentLiveVars,
VarSetOps::Intersection(compiler, registerCandidateVars, currentBlock->bbLiveOut));
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
VarSetOps::Assign(compiler, currentLiveVars, registerCandidateVars);
}
#endif // DEBUG
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
Interval* interval = getIntervalForLocalVar(varIndex);
if (interval->isActive)
{
assert(interval->physReg != REG_NA && interval->physReg != REG_STK);
setVarReg(outVarToRegMap, varIndex, interval->physReg);
}
else
{
outVarToRegMap[varIndex] = REG_STK;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Ensure that we have no partially-spilled large vector locals.
assert(!Compiler::varTypeNeedsPartialCalleeSave(interval->registerType) || !interval->isPartiallySpilled);
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_END_BB));
}
#ifdef DEBUG
void LinearScan::dumpRefPositions(const char* str)
{
printf("------------\n");
printf("REFPOSITIONS %s: \n", str);
printf("------------\n");
for (RefPosition& refPos : refPositions)
{
refPos.dump(this);
}
}
#endif // DEBUG
//------------------------------------------------------------------------
// LinearScan::makeRegisterInactive: Make the interval currently assigned to
// a register inactive.
//
// Arguments:
// physRegRecord - the RegRecord for the register
//
// Return Value:
// None.
//
// Notes:
// It may be that the RegRecord has already been freed, e.g. due to a kill,
// or it may be that the register was a copyReg, so is not the assigned register
// of the Interval currently occupying the register, in which case this method has no effect.
//
void LinearScan::makeRegisterInactive(RegRecord* physRegRecord)
{
Interval* assignedInterval = physRegRecord->assignedInterval;
// It may have already been freed by a "Kill"
if ((assignedInterval != nullptr) && (assignedInterval->physReg == physRegRecord->regNum))
{
assignedInterval->isActive = false;
if (assignedInterval->isConstant)
{
clearNextIntervalRef(physRegRecord->regNum, assignedInterval->registerType);
}
}
}
//------------------------------------------------------------------------
// LinearScan::freeRegister: Make a register available for use
//
// Arguments:
// physRegRecord - the RegRecord for the register to be freed.
//
// Return Value:
// None.
//
// Assumptions:
// None.
// It may be that the RegRecord has already been freed, e.g. due to a kill,
// in which case this method has no effect.
//
// Notes:
// If there is currently an Interval assigned to this register, and it has
// more references (i.e. this is a local last-use, but more uses and/or
// defs remain), it will remain assigned to the physRegRecord. However, since
// it is marked inactive, the register will be available, albeit less desirable
// to allocate.
//
void LinearScan::freeRegister(RegRecord* physRegRecord)
{
Interval* assignedInterval = physRegRecord->assignedInterval;
makeRegAvailable(physRegRecord->regNum, physRegRecord->registerType);
clearSpillCost(physRegRecord->regNum, physRegRecord->registerType);
makeRegisterInactive(physRegRecord);
if (assignedInterval != nullptr)
{
// TODO: Under the following conditions we should be just putting it in regsToMakeInactive
// not regsToFree.
//
// We don't unassign in the following conditions:
// - If this is a constant node, that we may encounter again, OR
// - If its recent RefPosition is not a last-use and its next RefPosition is non-null.
// - If there are no more RefPositions, or the next
// one is a def. Note that the latter condition doesn't actually ensure that
// there aren't subsequent uses that could be reached by a value in the assigned
// register, but is merely a heuristic to avoid tying up the register (or using
// it when it's non-optimal). A better alternative would be to use SSA, so that
// we wouldn't unnecessarily link separate live ranges to the same register.
//
RefPosition* nextRefPosition = assignedInterval->getNextRefPosition();
if (!assignedInterval->isConstant && (nextRefPosition == nullptr || RefTypeIsDef(nextRefPosition->refType)))
{
#ifdef TARGET_ARM
assert((assignedInterval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(physRegRecord->regNum));
#endif // TARGET_ARM
unassignPhysReg(physRegRecord, nullptr);
}
}
}
//------------------------------------------------------------------------
// LinearScan::freeRegisters: Free the registers in 'regsToFree'
//
// Arguments:
// regsToFree - the mask of registers to free
//
void LinearScan::freeRegisters(regMaskTP regsToFree)
{
if (regsToFree == RBM_NONE)
{
return;
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FREE_REGS));
makeRegsAvailable(regsToFree);
while (regsToFree != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(regsToFree);
regsToFree &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
RegRecord* regRecord = getRegisterRecord(nextReg);
#ifdef TARGET_ARM
if (regRecord->assignedInterval != nullptr && (regRecord->assignedInterval->registerType == TYP_DOUBLE))
{
assert(genIsValidDoubleReg(nextReg));
regsToFree &= ~(nextRegBit << 1);
}
#endif
freeRegister(regRecord);
}
}
//------------------------------------------------------------------------
// LinearScan::allocateRegisters: Perform the actual register allocation by iterating over
// all of the previously constructed Intervals
//
void LinearScan::allocateRegisters()
{
JITDUMP("*************** In LinearScan::allocateRegisters()\n");
DBEXEC(VERBOSE, lsraDumpIntervals("before allocateRegisters"));
// at start, nothing is active except for register args
for (Interval& interval : intervals)
{
Interval* currentInterval = &interval;
currentInterval->recentRefPosition = nullptr;
currentInterval->isActive = false;
if (currentInterval->isLocalVar)
{
LclVarDsc* varDsc = currentInterval->getLocalVar(compiler);
if (varDsc->lvIsRegArg && currentInterval->firstRefPosition != nullptr)
{
currentInterval->isActive = true;
}
}
}
if (enregisterLocalVars)
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars);
unsigned largeVectorVarIndex = 0;
while (largeVectorVarsIter.NextElem(&largeVectorVarIndex))
{
Interval* lclVarInterval = getIntervalForLocalVar(largeVectorVarIndex);
lclVarInterval->isPartiallySpilled = false;
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
resetRegState();
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->recentRefPosition = nullptr;
updateNextFixedRef(physRegRecord, physRegRecord->firstRefPosition);
// Is this an incoming arg register? (Note that we don't, currently, consider reassigning
// an incoming arg register as having spill cost.)
Interval* interval = physRegRecord->assignedInterval;
if (interval != nullptr)
{
#ifdef TARGET_ARM
if ((interval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(reg))
#endif // TARGET_ARM
{
updateNextIntervalRef(reg, interval);
updateSpillCost(reg, interval);
setRegInUse(reg, interval->registerType);
INDEBUG(registersToDump |= getRegMask(reg, interval->registerType));
}
}
else
{
clearNextIntervalRef(reg, physRegRecord->registerType);
clearSpillCost(reg, physRegRecord->registerType);
}
}
#ifdef DEBUG
if (VERBOSE)
{
dumpRefPositions("BEFORE ALLOCATION");
dumpVarRefPositions("BEFORE ALLOCATION");
printf("\n\nAllocating Registers\n"
"--------------------\n");
// Start with a small set of commonly used registers, so that we don't keep having to print a new title.
// Include all the arg regs, as they may already have values assigned to them.
registersToDump = LsraLimitSmallIntSet | LsraLimitSmallFPSet | RBM_ARG_REGS;
dumpRegRecordHeader();
// Now print an empty "RefPosition", since we complete the dump of the regs at the beginning of the loop.
printf(indentFormat, "");
}
#endif // DEBUG
BasicBlock* currentBlock = nullptr;
LsraLocation prevLocation = MinLocation;
regMaskTP regsToFree = RBM_NONE;
regMaskTP delayRegsToFree = RBM_NONE;
regMaskTP regsToMakeInactive = RBM_NONE;
regMaskTP delayRegsToMakeInactive = RBM_NONE;
regMaskTP copyRegsToFree = RBM_NONE;
regsInUseThisLocation = RBM_NONE;
regsInUseNextLocation = RBM_NONE;
// This is the most recent RefPosition for which a register was allocated
// - currently only used for DEBUG but maintained in non-debug, for clarity of code
// (and will be optimized away because in non-debug spillAlways() unconditionally returns false)
RefPosition* lastAllocatedRefPosition = nullptr;
bool handledBlockEnd = false;
for (RefPosition& refPositionIterator : refPositions)
{
RefPosition* currentRefPosition = &refPositionIterator;
RefPosition* nextRefPosition = currentRefPosition->nextRefPosition;
// TODO: Can we combine this with the freeing of registers below? It might
// mess with the dump, since this was previously being done before the call below
// to dumpRegRecords.
regMaskTP tempRegsToMakeInactive = (regsToMakeInactive | delayRegsToMakeInactive);
while (tempRegsToMakeInactive != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(tempRegsToMakeInactive);
tempRegsToMakeInactive &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
RegRecord* regRecord = getRegisterRecord(nextReg);
clearSpillCost(regRecord->regNum, regRecord->registerType);
makeRegisterInactive(regRecord);
}
if (currentRefPosition->nodeLocation > prevLocation)
{
makeRegsAvailable(regsToMakeInactive);
// TODO: Clean this up. We need to make the delayRegs inactive as well, but don't want
// to mark them as free yet.
regsToMakeInactive |= delayRegsToMakeInactive;
regsToMakeInactive = delayRegsToMakeInactive;
delayRegsToMakeInactive = RBM_NONE;
}
#ifdef DEBUG
// Set the activeRefPosition to null until we're done with any boundary handling.
activeRefPosition = nullptr;
if (VERBOSE)
{
// We're really dumping the RegRecords "after" the previous RefPosition, but it's more convenient
// to do this here, since there are a number of "continue"s in this loop.
dumpRegRecords();
}
#endif // DEBUG
// This is the previousRefPosition of the current Referent, if any
RefPosition* previousRefPosition = nullptr;
Interval* currentInterval = nullptr;
Referenceable* currentReferent = nullptr;
RefType refType = currentRefPosition->refType;
currentReferent = currentRefPosition->referent;
if (spillAlways() && lastAllocatedRefPosition != nullptr && !lastAllocatedRefPosition->IsPhysRegRef() &&
!lastAllocatedRefPosition->getInterval()->isInternal &&
(RefTypeIsDef(lastAllocatedRefPosition->refType) || lastAllocatedRefPosition->getInterval()->isLocalVar))
{
assert(lastAllocatedRefPosition->registerAssignment != RBM_NONE);
RegRecord* regRecord = lastAllocatedRefPosition->getInterval()->assignedReg;
unassignPhysReg(regRecord, lastAllocatedRefPosition);
// Now set lastAllocatedRefPosition to null, so that we don't try to spill it again
lastAllocatedRefPosition = nullptr;
}
// We wait to free any registers until we've completed all the
// uses for the current node.
// This avoids reusing registers too soon.
// We free before the last true def (after all the uses & internal
// registers), and then again at the beginning of the next node.
// This is made easier by assigning two LsraLocations per node - one
// for all the uses, internal registers & all but the last def, and
// another for the final def (if any).
LsraLocation currentLocation = currentRefPosition->nodeLocation;
// Free at a new location.
if (currentLocation > prevLocation)
{
// CopyRegs are simply made available - we don't want to make the associated interval inactive.
makeRegsAvailable(copyRegsToFree);
copyRegsToFree = RBM_NONE;
regsInUseThisLocation = regsInUseNextLocation;
regsInUseNextLocation = RBM_NONE;
if ((regsToFree | delayRegsToFree) != RBM_NONE)
{
freeRegisters(regsToFree);
if ((currentLocation > (prevLocation + 1)) && (delayRegsToFree != RBM_NONE))
{
// We should never see a delayReg that is delayed until a Location that has no RefPosition
// (that would be the RefPosition that it was supposed to interfere with).
assert(!"Found a delayRegFree associated with Location with no reference");
// However, to be cautious for the Release build case, we will free them.
freeRegisters(delayRegsToFree);
delayRegsToFree = RBM_NONE;
regsInUseThisLocation = RBM_NONE;
}
regsToFree = delayRegsToFree;
delayRegsToFree = RBM_NONE;
#ifdef DEBUG
// Validate the current state just after we've freed the registers. This ensures that any pending
// freed registers will have had their state updated to reflect the intervals they were holding.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
regMaskTP regMask = genRegMask(reg);
// If this isn't available or if it's still waiting to be freed (i.e. it was in
// delayRegsToFree and so now it's in regsToFree), then skip it.
if ((regMask & (availableIntRegs | availableFloatRegs) & ~regsToFree) == RBM_NONE)
{
continue;
}
RegRecord* physRegRecord = getRegisterRecord(reg);
Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
bool isAssignedReg = (assignedInterval->physReg == reg);
RefPosition* recentRefPosition = assignedInterval->recentRefPosition;
// If we have a copyReg or a moveReg, we might have assigned this register to an Interval,
// but that isn't considered its assignedReg.
if (recentRefPosition != nullptr)
{
if (recentRefPosition->refType == RefTypeExpUse)
{
// We don't update anything on these, as they're just placeholders to extend the
// lifetime.
continue;
}
// For copyReg or moveReg, we don't have anything further to assert.
if (recentRefPosition->copyReg || recentRefPosition->moveReg)
{
continue;
}
assert(assignedInterval->isConstant == isRegConstant(reg, assignedInterval->registerType));
if (assignedInterval->isActive)
{
// If this is not the register most recently allocated, it must be from a copyReg,
// it was placed there by the inVarToRegMap or it might be one of the upper vector
// save/restore refPosition.
// In either case it must be a lclVar.
if (!isAssignedToInterval(assignedInterval, physRegRecord))
{
// We'd like to assert that this was either set by the inVarToRegMap, or by
// a copyReg, but we can't traverse backward to check for a copyReg, because
// we only have recentRefPosition, and there may be a previous RefPosition
// at the same Location with a copyReg.
bool sanityCheck = assignedInterval->isLocalVar;
// For upper vector interval, make sure it was one of the save/restore only.
if (assignedInterval->IsUpperVector())
{
sanityCheck |= (recentRefPosition->refType == RefTypeUpperVectorSave) ||
(recentRefPosition->refType == RefTypeUpperVectorRestore);
}
assert(sanityCheck);
}
if (isAssignedReg)
{
assert(nextIntervalRef[reg] == assignedInterval->getNextRefLocation());
assert(!isRegAvailable(reg, assignedInterval->registerType));
assert((recentRefPosition == nullptr) ||
(spillCost[reg] == getSpillWeight(physRegRecord)));
}
else
{
assert((nextIntervalRef[reg] == MaxLocation) ||
isRegBusy(reg, assignedInterval->registerType));
}
}
else
{
if ((assignedInterval->physReg == reg) && !assignedInterval->isConstant)
{
assert(nextIntervalRef[reg] == assignedInterval->getNextRefLocation());
}
else
{
assert(nextIntervalRef[reg] == MaxLocation);
assert(isRegAvailable(reg, assignedInterval->registerType));
assert(spillCost[reg] == 0);
}
}
}
}
else
{
assert(isRegAvailable(reg, physRegRecord->registerType));
assert(!isRegConstant(reg, physRegRecord->registerType));
assert(nextIntervalRef[reg] == MaxLocation);
assert(spillCost[reg] == 0);
}
LsraLocation thisNextFixedRef = physRegRecord->getNextRefLocation();
assert(nextFixedRef[reg] == thisNextFixedRef);
#ifdef TARGET_ARM
// If this is occupied by a double interval, skip the corresponding float reg.
if ((assignedInterval != nullptr) && (assignedInterval->registerType == TYP_DOUBLE))
{
reg = REG_NEXT(reg);
}
#endif
}
#endif // DEBUG
}
}
prevLocation = currentLocation;
// get previous refposition, then current refpos is the new previous
if (currentReferent != nullptr)
{
previousRefPosition = currentReferent->recentRefPosition;
currentReferent->recentRefPosition = currentRefPosition;
}
else
{
assert((refType == RefTypeBB) || (refType == RefTypeKillGCRefs));
}
#ifdef DEBUG
activeRefPosition = currentRefPosition;
// For the purposes of register resolution, we handle the DummyDefs before
// the block boundary - so the RefTypeBB is after all the DummyDefs.
// However, for the purposes of allocation, we want to handle the block
// boundary first, so that we can free any registers occupied by lclVars
// that aren't live in the next block and make them available for the
// DummyDefs.
// If we've already handled the BlockEnd, but now we're seeing the RefTypeBB,
// dump it now.
if ((refType == RefTypeBB) && handledBlockEnd)
{
dumpNewBlock(currentBlock, currentRefPosition->nodeLocation);
}
#endif // DEBUG
if (!handledBlockEnd && (refType == RefTypeBB || refType == RefTypeDummyDef))
{
// Free any delayed regs (now in regsToFree) before processing the block boundary
freeRegisters(regsToFree);
regsToFree = RBM_NONE;
regsInUseThisLocation = RBM_NONE;
regsInUseNextLocation = RBM_NONE;
handledBlockEnd = true;
curBBStartLocation = currentRefPosition->nodeLocation;
if (currentBlock == nullptr)
{
currentBlock = startBlockSequence();
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_START_BB, nullptr, REG_NA, compiler->fgFirstBB));
}
else
{
processBlockEndAllocation(currentBlock);
currentBlock = moveToNextBlock();
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_START_BB, nullptr, REG_NA, currentBlock));
}
}
if (refType == RefTypeBB)
{
handledBlockEnd = false;
continue;
}
if (refType == RefTypeKillGCRefs)
{
spillGCRefs(currentRefPosition);
continue;
}
if (currentRefPosition->isPhysRegRef)
{
RegRecord* regRecord = currentRefPosition->getReg();
Interval* assignedInterval = regRecord->assignedInterval;
updateNextFixedRef(regRecord, currentRefPosition->nextRefPosition);
// If this is a FixedReg, disassociate any inactive constant interval from this register.
// Otherwise, do nothing.
if (refType == RefTypeFixedReg)
{
if (assignedInterval != nullptr && !assignedInterval->isActive && assignedInterval->isConstant)
{
clearConstantReg(regRecord->regNum, assignedInterval->registerType);
regRecord->assignedInterval = nullptr;
spillCost[regRecord->regNum] = 0;
#ifdef TARGET_ARM
// Update overlapping floating point register for TYP_DOUBLE
if (assignedInterval->registerType == TYP_DOUBLE)
{
RegRecord* otherRegRecord = findAnotherHalfRegRec(regRecord);
assert(otherRegRecord->assignedInterval == assignedInterval);
otherRegRecord->assignedInterval = nullptr;
spillCost[otherRegRecord->regNum] = 0;
}
#endif // TARGET_ARM
}
regsInUseThisLocation |= currentRefPosition->registerAssignment;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FIXED_REG, nullptr, currentRefPosition->assignedReg()));
continue;
}
if (refType == RefTypeKill)
{
if (assignedInterval != nullptr)
{
unassignPhysReg(regRecord, assignedInterval->recentRefPosition);
clearConstantReg(regRecord->regNum, assignedInterval->registerType);
makeRegAvailable(regRecord->regNum, assignedInterval->registerType);
}
clearRegBusyUntilKill(regRecord->regNum);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum));
continue;
}
}
// If this is an exposed use, do nothing - this is merely a placeholder to attempt to
// ensure that a register is allocated for the full lifetime. The resolution logic
// will take care of moving to the appropriate register if needed.
if (refType == RefTypeExpUse)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_EXP_USE));
currentInterval = currentRefPosition->getInterval();
if (currentInterval->physReg != REG_NA)
{
updateNextIntervalRef(currentInterval->physReg, currentInterval);
}
continue;
}
regNumber assignedRegister = REG_NA;
assert(currentRefPosition->isIntervalRef());
currentInterval = currentRefPosition->getInterval();
assert(currentInterval != nullptr);
assignedRegister = currentInterval->physReg;
// Identify the special cases where we decide up-front not to allocate
bool allocate = true;
bool didDump = false;
if (refType == RefTypeParamDef || refType == RefTypeZeroInit)
{
if (nextRefPosition == nullptr)
{
// If it has no actual references, mark it as "lastUse"; since they're not actually part
// of any flow they won't have been marked during dataflow. Otherwise, if we allocate a
// register we won't unassign it.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_ZERO_REF, currentInterval));
currentRefPosition->lastUse = true;
}
LclVarDsc* varDsc = currentInterval->getLocalVar(compiler);
assert(varDsc != nullptr);
assert(!blockInfo[compiler->fgFirstBB->bbNum].hasEHBoundaryIn || currentInterval->isWriteThru);
if (blockInfo[compiler->fgFirstBB->bbNum].hasEHBoundaryIn ||
blockInfo[compiler->fgFirstBB->bbNum].hasEHPred)
{
allocate = false;
}
else if (refType == RefTypeParamDef && (varDsc->lvRefCntWtd() <= BB_UNITY_WEIGHT) &&
(!currentRefPosition->lastUse || (currentInterval->physReg == REG_STK)))
{
// If this is a low ref-count parameter, and either it is used (def is not the last use) or it's
// passed on the stack, don't allocate a register.
// Note that if this is an unused register parameter we don't want to set allocate to false because that
// will cause us to allocate stack space to spill it.
allocate = false;
}
else if ((currentInterval->physReg == REG_STK) && nextRefPosition->treeNode->OperIs(GT_BITCAST))
{
// In the case of ABI mismatches, avoid allocating a register only to have to immediately move
// it to a different register file.
allocate = false;
}
else if ((currentInterval->isWriteThru) && (refType == RefTypeZeroInit))
{
// For RefTypeZeroInit which is a write thru, there is no need to allocate register
// right away. It can be assigned when actually definition occurs.
// In future, see if avoiding allocation for RefTypeZeroInit gives any benefit in general.
allocate = false;
}
if (!allocate)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, currentInterval));
didDump = true;
setIntervalAsSpilled(currentInterval);
if (assignedRegister != REG_NA)
{
clearNextIntervalRef(assignedRegister, currentInterval->registerType);
clearSpillCost(assignedRegister, currentInterval->registerType);
makeRegAvailable(assignedRegister, currentInterval->registerType);
}
}
}
#ifdef FEATURE_SIMD
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
else if (currentInterval->isUpperVector)
{
// This is a save or restore of the upper half of a large vector lclVar.
Interval* lclVarInterval = currentInterval->relatedInterval;
assert(lclVarInterval->isLocalVar);
if (refType == RefTypeUpperVectorSave)
{
if ((lclVarInterval->physReg == REG_NA) ||
(lclVarInterval->isPartiallySpilled && (currentInterval->physReg == REG_STK)))
{
allocate = false;
}
else
{
lclVarInterval->isPartiallySpilled = true;
}
}
else if (refType == RefTypeUpperVectorRestore)
{
assert(currentInterval->isUpperVector);
if (lclVarInterval->isPartiallySpilled)
{
lclVarInterval->isPartiallySpilled = false;
}
else
{
allocate = false;
}
}
}
else if (refType == RefTypeUpperVectorSave)
{
assert(!currentInterval->isLocalVar);
// Note that this case looks a lot like the case below, but in this case we need to spill
// at the previous RefPosition.
// We may want to consider allocating two callee-save registers for this case, but it happens rarely
// enough that it may not warrant the additional complexity.
if (assignedRegister != REG_NA)
{
unassignPhysReg(getRegisterRecord(assignedRegister), currentInterval->firstRefPosition);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval));
}
currentRefPosition->registerAssignment = RBM_NONE;
continue;
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#endif // FEATURE_SIMD
if (allocate == false)
{
if (assignedRegister != REG_NA)
{
unassignPhysReg(getRegisterRecord(assignedRegister), currentRefPosition);
}
else if (!didDump)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval));
didDump = true;
}
currentRefPosition->registerAssignment = RBM_NONE;
continue;
}
if (currentInterval->isSpecialPutArg)
{
assert(!currentInterval->isLocalVar);
Interval* srcInterval = currentInterval->relatedInterval;
assert(srcInterval != nullptr && srcInterval->isLocalVar);
if (refType == RefTypeDef)
{
assert(srcInterval->recentRefPosition->nodeLocation == currentLocation - 1);
RegRecord* physRegRecord = srcInterval->assignedReg;
// For a putarg_reg to be special, its next use location has to be the same
// as fixed reg's next kill location. Otherwise, if source lcl var's next use
// is after the kill of fixed reg but before putarg_reg's next use, fixed reg's
// kill would lead to spill of source but not the putarg_reg if it were treated
// as special.
if (srcInterval->isActive &&
genRegMask(srcInterval->physReg) == currentRefPosition->registerAssignment &&
currentInterval->getNextRefLocation() == nextFixedRef[srcInterval->physReg])
{
assert(physRegRecord->regNum == srcInterval->physReg);
// Special putarg_reg acts as a pass-thru since both source lcl var
// and putarg_reg have the same register allocated. Physical reg
// record of reg continue to point to source lcl var's interval
// instead of to putarg_reg's interval. So if a spill of reg
// allocated to source lcl var happens, to reallocate to another
// tree node, before its use at call node it will lead to spill of
// lcl var instead of putarg_reg since physical reg record is pointing
// to lcl var's interval. As a result, arg reg would get trashed leading
// to bad codegen. The assumption here is that source lcl var of a
// special putarg_reg doesn't get spilled and re-allocated prior to
// its use at the call node. This is ensured by marking physical reg
// record as busy until next kill.
setRegBusyUntilKill(srcInterval->physReg, srcInterval->registerType);
}
else
{
currentInterval->isSpecialPutArg = false;
}
}
// If this is still a SpecialPutArg, continue;
if (currentInterval->isSpecialPutArg)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, currentInterval,
currentRefPosition->assignedReg()));
continue;
}
}
if (assignedRegister == REG_NA && RefTypeIsUse(refType))
{
currentRefPosition->reload = true;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, currentInterval, assignedRegister));
}
regMaskTP assignedRegBit = RBM_NONE;
bool isInRegister = false;
if (assignedRegister != REG_NA)
{
isInRegister = true;
assignedRegBit = genRegMask(assignedRegister);
if (!currentInterval->isActive)
{
// If this is a use, it must have started the block on the stack, but the register
// was available for use so we kept the association.
if (RefTypeIsUse(refType))
{
assert(enregisterLocalVars);
assert(inVarToRegMaps[curBBNum][currentInterval->getVarIndex(compiler)] == REG_STK &&
previousRefPosition->nodeLocation <= curBBStartLocation);
isInRegister = false;
}
else
{
currentInterval->isActive = true;
setRegInUse(assignedRegister, currentInterval->registerType);
updateSpillCost(assignedRegister, currentInterval);
}
updateNextIntervalRef(assignedRegister, currentInterval);
}
assert(currentInterval->assignedReg != nullptr &&
currentInterval->assignedReg->regNum == assignedRegister &&
currentInterval->assignedReg->assignedInterval == currentInterval);
}
if (previousRefPosition != nullptr)
{
assert(previousRefPosition->nextRefPosition == currentRefPosition);
assert(assignedRegister == REG_NA || assignedRegBit == previousRefPosition->registerAssignment ||
currentRefPosition->outOfOrder || previousRefPosition->copyReg ||
previousRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef);
}
else if (assignedRegister != REG_NA)
{
// Handle the case where this is a preassigned register (i.e. parameter).
// We don't want to actually use the preassigned register if it's not
// going to cover the lifetime - but we had to preallocate it to ensure
// that it remained live.
// TODO-CQ: At some point we may want to refine the analysis here, in case
// it might be beneficial to keep it in this reg for PART of the lifetime
if (currentInterval->isLocalVar)
{
regMaskTP preferences = currentInterval->registerPreferences;
bool keepAssignment = true;
bool matchesPreferences = (preferences & genRegMask(assignedRegister)) != RBM_NONE;
// Will the assigned register cover the lifetime? If not, does it at least
// meet the preferences for the next RefPosition?
LsraLocation nextPhysRegLocation = nextFixedRef[assignedRegister];
if (nextPhysRegLocation <= currentInterval->lastRefPosition->nodeLocation)
{
// Check to see if the existing assignment matches the preferences (e.g. callee save registers)
// and ensure that the next use of this localVar does not occur after the nextPhysRegRefPos
// There must be a next RefPosition, because we know that the Interval extends beyond the
// nextPhysRegRefPos.
assert(nextRefPosition != nullptr);
if (!matchesPreferences || nextPhysRegLocation < nextRefPosition->nodeLocation)
{
keepAssignment = false;
}
else if ((nextRefPosition->registerAssignment != assignedRegBit) &&
(nextPhysRegLocation <= nextRefPosition->getRefEndLocation()))
{
keepAssignment = false;
}
}
else if (refType == RefTypeParamDef && !matchesPreferences)
{
// Don't use the register, even if available, if it doesn't match the preferences.
// Note that this case is only for ParamDefs, for which we haven't yet taken preferences
// into account (we've just automatically got the initial location). In other cases,
// we would already have put it in a preferenced register, if it was available.
// TODO-CQ: Consider expanding this to check availability - that would duplicate
// code here, but otherwise we may wind up in this register anyway.
keepAssignment = false;
}
if (keepAssignment == false)
{
RegRecord* physRegRecord = getRegisterRecord(currentInterval->physReg);
currentRefPosition->registerAssignment = allRegs(currentInterval->registerType);
currentRefPosition->isFixedRegRef = false;
unassignPhysRegNoSpill(physRegRecord);
// If the preferences are currently set to just this register, reset them to allRegs
// of the appropriate type (just as we just reset the registerAssignment for this
// RefPosition.
// Otherwise, simply remove this register from the preferences, if it's there.
if (currentInterval->registerPreferences == assignedRegBit)
{
currentInterval->registerPreferences = currentRefPosition->registerAssignment;
}
else
{
currentInterval->registerPreferences &= ~assignedRegBit;
}
assignedRegister = REG_NA;
assignedRegBit = RBM_NONE;
}
}
}
if (assignedRegister != REG_NA)
{
RegRecord* physRegRecord = getRegisterRecord(assignedRegister);
assert((assignedRegBit == currentRefPosition->registerAssignment) ||
(physRegRecord->assignedInterval == currentInterval) ||
!isRegInUse(assignedRegister, currentInterval->registerType));
if (conflictingFixedRegReference(assignedRegister, currentRefPosition))
{
// We may have already reassigned the register to the conflicting reference.
// If not, we need to unassign this interval.
if (physRegRecord->assignedInterval == currentInterval)
{
unassignPhysRegNoSpill(physRegRecord);
physRegRecord->assignedInterval = nullptr;
clearConstantReg(assignedRegister, currentInterval->registerType);
}
currentRefPosition->moveReg = true;
assignedRegister = REG_NA;
currentRefPosition->registerAssignment &= ~assignedRegBit;
setIntervalAsSplit(currentInterval);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_MOVE_REG, currentInterval, assignedRegister));
}
else if ((genRegMask(assignedRegister) & currentRefPosition->registerAssignment) != 0)
{
currentRefPosition->registerAssignment = assignedRegBit;
if (!currentInterval->isActive)
{
// If we've got an exposed use at the top of a block, the
// interval might not have been active. Otherwise if it's a use,
// the interval must be active.
if (refType == RefTypeDummyDef)
{
currentInterval->isActive = true;
assert(getRegisterRecord(assignedRegister)->assignedInterval == currentInterval);
}
else
{
currentRefPosition->reload = true;
}
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, currentInterval, assignedRegister));
}
else
{
// It's already in a register, but not one we need.
if (!RefTypeIsDef(currentRefPosition->refType))
{
regNumber copyReg = assignCopyReg(currentRefPosition);
lastAllocatedRefPosition = currentRefPosition;
bool unassign = false;
if (currentInterval->isWriteThru)
{
if (currentRefPosition->refType == RefTypeDef)
{
currentRefPosition->writeThru = true;
}
if (!currentRefPosition->lastUse)
{
if (currentRefPosition->spillAfter)
{
unassign = true;
}
}
}
regMaskTP copyRegMask = getRegMask(copyReg, currentInterval->registerType);
regMaskTP assignedRegMask = getRegMask(assignedRegister, currentInterval->registerType);
regsInUseThisLocation |= copyRegMask | assignedRegMask;
if (currentRefPosition->lastUse)
{
if (currentRefPosition->delayRegFree)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED, currentInterval,
assignedRegister));
delayRegsToFree |= copyRegMask | assignedRegMask;
regsInUseNextLocation |= copyRegMask | assignedRegMask;
}
else
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE, currentInterval, assignedRegister));
regsToFree |= copyRegMask | assignedRegMask;
}
}
else
{
copyRegsToFree |= copyRegMask;
if (currentRefPosition->delayRegFree)
{
regsInUseNextLocation |= copyRegMask | assignedRegMask;
}
}
// If this is a tree temp (non-localVar) interval, we will need an explicit move.
// Note: In theory a moveReg should cause the Interval to now have the new reg as its
// assigned register. However, that's not currently how this works.
// If we ever actually move lclVar intervals instead of copying, this will need to change.
if (!currentInterval->isLocalVar)
{
currentRefPosition->moveReg = true;
currentRefPosition->copyReg = false;
}
clearNextIntervalRef(copyReg, currentInterval->registerType);
clearSpillCost(copyReg, currentInterval->registerType);
updateNextIntervalRef(assignedRegister, currentInterval);
updateSpillCost(assignedRegister, currentInterval);
continue;
}
else
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NEEDS_NEW_REG, nullptr, assignedRegister));
regsToFree |= getRegMask(assignedRegister, currentInterval->registerType);
// We want a new register, but we don't want this to be considered a spill.
assignedRegister = REG_NA;
if (physRegRecord->assignedInterval == currentInterval)
{
unassignPhysRegNoSpill(physRegRecord);
}
}
}
}
if (assignedRegister == REG_NA)
{
if (currentRefPosition->RegOptional())
{
// We can avoid allocating a register if it is a last use requiring a reload.
if (currentRefPosition->lastUse && currentRefPosition->reload)
{
allocate = false;
}
else if (currentInterval->isWriteThru)
{
// Don't allocate if the next reference is in a cold block.
if (nextRefPosition == nullptr || (nextRefPosition->nodeLocation >= firstColdLoc))
{
allocate = false;
}
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(TARGET_XARCH)
// We can also avoid allocating a register (in fact we don't want to) if we have
// an UpperVectorRestore on xarch where the value is on the stack.
if ((currentRefPosition->refType == RefTypeUpperVectorRestore) && (currentInterval->physReg == REG_NA))
{
assert(currentRefPosition->regOptional);
allocate = false;
}
#endif
#ifdef DEBUG
// Under stress mode, don't allocate registers to RegOptional RefPositions.
if (allocate && regOptionalNoAlloc())
{
allocate = false;
}
#endif
}
RegisterScore registerScore = NONE;
if (allocate)
{
// Allocate a register, if we must, or if it is profitable to do so.
// If we have a fixed reg requirement, and the interval is inactive in another register,
// unassign that register.
if (currentRefPosition->isFixedRegRef && !currentInterval->isActive &&
(currentInterval->assignedReg != nullptr) &&
(currentInterval->assignedReg->assignedInterval == currentInterval) &&
(genRegMask(currentInterval->assignedReg->regNum) != currentRefPosition->registerAssignment))
{
unassignPhysReg(currentInterval->assignedReg, nullptr);
}
assignedRegister = allocateReg(currentInterval, currentRefPosition DEBUG_ARG(®isterScore));
}
// If no register was found, this RefPosition must not require a register.
if (assignedRegister == REG_NA)
{
assert(currentRefPosition->RegOptional());
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval));
currentRefPosition->registerAssignment = RBM_NONE;
currentRefPosition->reload = false;
currentInterval->isActive = false;
setIntervalAsSpilled(currentInterval);
}
#ifdef DEBUG
else
{
if (VERBOSE)
{
if (currentInterval->isConstant && (currentRefPosition->treeNode != nullptr) &&
currentRefPosition->treeNode->IsReuseRegVal())
{
dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, currentInterval, assignedRegister, currentBlock,
registerScore);
}
else
{
dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, currentInterval, assignedRegister, currentBlock,
registerScore);
}
}
}
#endif // DEBUG
if (refType == RefTypeDummyDef && assignedRegister != REG_NA)
{
setInVarRegForBB(curBBNum, currentInterval->varNum, assignedRegister);
}
// If we allocated a register, and this is a use of a spilled value,
// it should have been marked for reload above.
if (assignedRegister != REG_NA && RefTypeIsUse(refType) && !isInRegister)
{
assert(currentRefPosition->reload);
}
}
// If we allocated a register, record it
if (assignedRegister != REG_NA)
{
assignedRegBit = genRegMask(assignedRegister);
regMaskTP regMask = getRegMask(assignedRegister, currentInterval->registerType);
regsInUseThisLocation |= regMask;
if (currentRefPosition->delayRegFree)
{
regsInUseNextLocation |= regMask;
}
currentRefPosition->registerAssignment = assignedRegBit;
currentInterval->physReg = assignedRegister;
regsToFree &= ~regMask; // we'll set it again later if it's dead
// If this interval is dead, free the register.
// The interval could be dead if this is a user variable, or if the
// node is being evaluated for side effects, or a call whose result
// is not used, etc.
// If this is an UpperVector we'll neither free it nor preference it
// (it will be freed when it is used).
bool unassign = false;
if (!currentInterval->IsUpperVector())
{
if (currentInterval->isWriteThru)
{
if (currentRefPosition->refType == RefTypeDef)
{
currentRefPosition->writeThru = true;
}
if (!currentRefPosition->lastUse)
{
if (currentRefPosition->spillAfter)
{
unassign = true;
}
}
}
if (currentRefPosition->lastUse || currentRefPosition->nextRefPosition == nullptr)
{
assert(currentRefPosition->isIntervalRef());
// If this isn't a final use, we'll mark the register as available, but keep the association.
if ((refType != RefTypeExpUse) && (currentRefPosition->nextRefPosition == nullptr))
{
unassign = true;
}
else
{
if (currentRefPosition->delayRegFree)
{
delayRegsToMakeInactive |= regMask;
}
else
{
regsToMakeInactive |= regMask;
}
// TODO-Cleanup: this makes things consistent with previous, and will enable preferences
// to be propagated, but it seems less than ideal.
currentInterval->isActive = false;
}
// Update the register preferences for the relatedInterval, if this is 'preferencedToDef'.
// Don't propagate to subsequent relatedIntervals; that will happen as they are allocated, and we
// don't know yet whether the register will be retained.
if (currentInterval->relatedInterval != nullptr)
{
currentInterval->relatedInterval->updateRegisterPreferences(assignedRegBit);
}
}
if (unassign)
{
if (currentRefPosition->delayRegFree)
{
delayRegsToFree |= regMask;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED));
}
else
{
regsToFree |= regMask;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE));
}
}
}
if (!unassign)
{
updateNextIntervalRef(assignedRegister, currentInterval);
updateSpillCost(assignedRegister, currentInterval);
}
}
lastAllocatedRefPosition = currentRefPosition;
}
#ifdef JIT32_GCENCODER
// For the JIT32_GCENCODER, when lvaKeepAliveAndReportThis is true, we must either keep the "this" pointer
// in the same register for the entire method, or keep it on the stack. Rather than imposing this constraint
// as we allocate, we will force all refs to the stack if it is split or spilled.
if (enregisterLocalVars && compiler->lvaKeepAliveAndReportThis())
{
LclVarDsc* thisVarDsc = compiler->lvaGetDesc(compiler->info.compThisArg);
if (thisVarDsc->lvLRACandidate)
{
Interval* interval = getIntervalForLocalVar(thisVarDsc->lvVarIndex);
if (interval->isSplit)
{
// We'll have to spill this.
setIntervalAsSpilled(interval);
}
if (interval->isSpilled)
{
unsigned prevBBNum = 0;
for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
{
// For the resolution phase, we need to ensure that any block with exposed uses has the
// incoming reg for 'this' as REG_STK.
if (RefTypeIsUse(ref->refType) && (ref->bbNum != prevBBNum))
{
VarToRegMap inVarToRegMap = getInVarToRegMap(ref->bbNum);
setVarReg(inVarToRegMap, thisVarDsc->lvVarIndex, REG_STK);
}
if (ref->RegOptional())
{
ref->registerAssignment = RBM_NONE;
ref->reload = false;
ref->spillAfter = false;
}
switch (ref->refType)
{
case RefTypeDef:
if (ref->registerAssignment != RBM_NONE)
{
ref->spillAfter = true;
}
break;
case RefTypeUse:
if (ref->registerAssignment != RBM_NONE)
{
ref->reload = true;
ref->spillAfter = true;
ref->copyReg = false;
ref->moveReg = false;
}
break;
default:
break;
}
prevBBNum = ref->bbNum;
}
}
}
}
#endif // JIT32_GCENCODER
// Free registers to clear associated intervals for resolution phase
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
// If we have extended lifetimes, we need to make sure all the registers are freed.
for (size_t regNumIndex = 0; regNumIndex <= REG_FP_LAST; regNumIndex++)
{
RegRecord& regRecord = physRegs[regNumIndex];
Interval* interval = regRecord.assignedInterval;
if (interval != nullptr)
{
interval->isActive = false;
unassignPhysReg(®Record, nullptr);
}
}
}
else
#endif // DEBUG
{
freeRegisters(regsToFree | delayRegsToFree);
}
#ifdef DEBUG
if (VERBOSE)
{
// Dump the RegRecords after the last RefPosition is handled.
dumpRegRecords();
printf("\n");
dumpRefPositions("AFTER ALLOCATION");
dumpVarRefPositions("AFTER ALLOCATION");
// Dump the intervals that remain active
printf("Active intervals at end of allocation:\n");
// We COULD just reuse the intervalIter from above, but ArrayListIterator doesn't
// provide a Reset function (!) - we'll probably replace this so don't bother
// adding it
for (Interval& interval : intervals)
{
if (interval.isActive)
{
printf("Active ");
interval.dump();
}
}
printf("\n");
}
#endif // DEBUG
}
//-----------------------------------------------------------------------------
// updateAssignedInterval: Update assigned interval of register.
//
// Arguments:
// reg - register to be updated
// interval - interval to be assigned
// regType - register type
//
// Return Value:
// None
//
// Note:
// For ARM32, two float registers consisting a double register are updated
// together when "regType" is TYP_DOUBLE.
//
void LinearScan::updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType)
{
#ifdef TARGET_ARM
// Update overlapping floating point register for TYP_DOUBLE.
Interval* oldAssignedInterval = reg->assignedInterval;
regNumber doubleReg = REG_NA;
if (regType == TYP_DOUBLE)
{
RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg);
doubleReg = genIsValidDoubleReg(reg->regNum) ? reg->regNum : anotherHalfReg->regNum;
anotherHalfReg->assignedInterval = interval;
}
else if ((oldAssignedInterval != nullptr) && (oldAssignedInterval->registerType == TYP_DOUBLE))
{
RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg);
doubleReg = genIsValidDoubleReg(reg->regNum) ? reg->regNum : anotherHalfReg->regNum;
anotherHalfReg->assignedInterval = nullptr;
}
if (doubleReg != REG_NA)
{
clearNextIntervalRef(doubleReg, TYP_DOUBLE);
clearSpillCost(doubleReg, TYP_DOUBLE);
clearConstantReg(doubleReg, TYP_DOUBLE);
}
#endif
reg->assignedInterval = interval;
if (interval != nullptr)
{
setRegInUse(reg->regNum, interval->registerType);
if (interval->isConstant)
{
setConstantReg(reg->regNum, interval->registerType);
}
else
{
clearConstantReg(reg->regNum, interval->registerType);
}
updateNextIntervalRef(reg->regNum, interval);
updateSpillCost(reg->regNum, interval);
}
else
{
clearNextIntervalRef(reg->regNum, reg->registerType);
clearSpillCost(reg->regNum, reg->registerType);
}
}
//-----------------------------------------------------------------------------
// updatePreviousInterval: Update previous interval of register.
//
// Arguments:
// reg - register to be updated
// interval - interval to be assigned
// regType - register type
//
// Return Value:
// None
//
// Assumptions:
// For ARM32, when "regType" is TYP_DOUBLE, "reg" should be a even-numbered
// float register, i.e. lower half of double register.
//
// Note:
// For ARM32, two float registers consisting a double register are updated
// together when "regType" is TYP_DOUBLE.
//
void LinearScan::updatePreviousInterval(RegRecord* reg, Interval* interval, RegisterType regType)
{
reg->previousInterval = interval;
#ifdef TARGET_ARM
// Update overlapping floating point register for TYP_DOUBLE
if (regType == TYP_DOUBLE)
{
RegRecord* anotherHalfReg = findAnotherHalfRegRec(reg);
anotherHalfReg->previousInterval = interval;
}
#endif
}
//-----------------------------------------------------------------------------
// writeLocalReg: Write the register assignment for a GT_LCL_VAR node.
//
// Arguments:
// lclNode - The GT_LCL_VAR node
// varNum - The variable number for the register
// reg - The assigned register
//
// Return Value:
// None
//
// Note:
// For a multireg node, 'varNum' will be the field local for the given register.
//
void LinearScan::writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumber reg)
{
assert((lclNode->GetLclNum() == varNum) == !lclNode->IsMultiReg());
if (lclNode->GetLclNum() == varNum)
{
lclNode->SetRegNum(reg);
}
else
{
assert(compiler->lvaEnregMultiRegVars);
LclVarDsc* parentVarDsc = compiler->lvaGetDesc(lclNode);
assert(parentVarDsc->lvPromoted);
unsigned regIndex = varNum - parentVarDsc->lvFieldLclStart;
assert(regIndex < MAX_MULTIREG_COUNT);
lclNode->SetRegNumByIdx(reg, regIndex);
}
}
//-----------------------------------------------------------------------------
// LinearScan::resolveLocalRef
// Description:
// Update the graph for a local reference.
// Also, track the register (if any) that is currently occupied.
// Arguments:
// treeNode: The lclVar that's being resolved
// currentRefPosition: the RefPosition associated with the treeNode
//
// Details:
// This method is called for each local reference, during the resolveRegisters
// phase of LSRA. It is responsible for keeping the following in sync:
// - varDsc->GetRegNum() (and GetOtherReg()) contain the unique register location.
// If it is not in the same register through its lifetime, it is set to REG_STK.
// - interval->physReg is set to the assigned register
// (i.e. at the code location which is currently being handled by resolveRegisters())
// - interval->isActive is true iff the interval is live and occupying a register
// - interval->isSpilled should have already been set to true if the interval is EVER spilled
// - interval->isSplit is set to true if the interval does not occupy the same
// register throughout the method
// - RegRecord->assignedInterval points to the interval which currently occupies
// the register
// - For each lclVar node:
// - GetRegNum()/gtRegPair is set to the currently allocated register(s).
// - GTF_SPILLED is set on a use if it must be reloaded prior to use.
// - GTF_SPILL is set if it must be spilled after use.
//
// A copyReg is an ugly case where the variable must be in a specific (fixed) register,
// but it currently resides elsewhere. The register allocator must track the use of the
// fixed register, but it marks the lclVar node with the register it currently lives in
// and the code generator does the necessary move.
//
// Before beginning, the varDsc for each parameter must be set to its initial location.
//
// NICE: Consider tracking whether an Interval is always in the same location (register/stack)
// in which case it will require no resolution.
//
void LinearScan::resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, RefPosition* currentRefPosition)
{
assert((block == nullptr) == (treeNode == nullptr));
assert(enregisterLocalVars);
// Is this a tracked local? Or just a register allocated for loading
// a non-tracked one?
Interval* interval = currentRefPosition->getInterval();
assert(interval->isLocalVar);
interval->recentRefPosition = currentRefPosition;
LclVarDsc* varDsc = interval->getLocalVar(compiler);
// NOTE: we set the LastUse flag here unless we are extending lifetimes, in which case we write
// this bit in checkLastUses. This is a bit of a hack, but is necessary because codegen requires
// accurate last use info that is not reflected in the lastUse bit on ref positions when we are extending
// lifetimes. See also the comments in checkLastUses.
if ((treeNode != nullptr) && !extendLifetimes())
{
if (currentRefPosition->lastUse)
{
treeNode->SetLastUse(currentRefPosition->getMultiRegIdx());
}
else
{
treeNode->ClearLastUse(currentRefPosition->getMultiRegIdx());
}
if ((currentRefPosition->registerAssignment != RBM_NONE) && (interval->physReg == REG_NA) &&
currentRefPosition->RegOptional() && currentRefPosition->lastUse &&
(currentRefPosition->refType == RefTypeUse))
{
// This can happen if the incoming location for the block was changed from a register to the stack
// during resolution. In this case we're better off making it contained.
assert(inVarToRegMaps[curBBNum][varDsc->lvVarIndex] == REG_STK);
currentRefPosition->registerAssignment = RBM_NONE;
writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA);
}
}
if (currentRefPosition->registerAssignment == RBM_NONE)
{
assert(currentRefPosition->RegOptional());
assert(interval->isSpilled);
varDsc->SetRegNum(REG_STK);
if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval)
{
updateAssignedInterval(interval->assignedReg, nullptr, interval->registerType);
}
interval->assignedReg = nullptr;
interval->physReg = REG_NA;
interval->isActive = false;
// Set this as contained if it is not a multi-reg (we could potentially mark it s contained
// if all uses are from spill, but that adds complexity.
if ((currentRefPosition->refType == RefTypeUse) && !treeNode->IsMultiReg())
{
assert(treeNode != nullptr);
treeNode->SetContained();
}
return;
}
// In most cases, assigned and home registers will be the same
// The exception is the copyReg case, where we've assigned a register
// for a specific purpose, but will be keeping the register assignment
regNumber assignedReg = currentRefPosition->assignedReg();
regNumber homeReg = assignedReg;
// Undo any previous association with a physical register, UNLESS this
// is a copyReg
if (!currentRefPosition->copyReg)
{
regNumber oldAssignedReg = interval->physReg;
if (oldAssignedReg != REG_NA && assignedReg != oldAssignedReg)
{
RegRecord* oldRegRecord = getRegisterRecord(oldAssignedReg);
if (oldRegRecord->assignedInterval == interval)
{
updateAssignedInterval(oldRegRecord, nullptr, interval->registerType);
}
}
}
if (currentRefPosition->refType == RefTypeUse && !currentRefPosition->reload)
{
// Was this spilled after our predecessor was scheduled?
if (interval->physReg == REG_NA)
{
assert(inVarToRegMaps[curBBNum][varDsc->lvVarIndex] == REG_STK);
currentRefPosition->reload = true;
}
}
bool reload = currentRefPosition->reload;
bool spillAfter = currentRefPosition->spillAfter;
bool writeThru = currentRefPosition->writeThru;
// In the reload case we either:
// - Set the register to REG_STK if it will be referenced only from the home location, or
// - Set the register to the assigned register and set GTF_SPILLED if it must be loaded into a register.
if (reload)
{
assert(currentRefPosition->refType != RefTypeDef);
assert(interval->isSpilled);
varDsc->SetRegNum(REG_STK);
if (!spillAfter)
{
interval->physReg = assignedReg;
}
// If there is no treeNode, this must be a RefTypeExpUse, in
// which case we did the reload already
if (treeNode != nullptr)
{
treeNode->gtFlags |= GTF_SPILLED;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx());
}
if (spillAfter)
{
if (currentRefPosition->RegOptional())
{
// This is a use of lclVar that is flagged as reg-optional
// by lower/codegen and marked for both reload and spillAfter.
// In this case we can avoid unnecessary reload and spill
// by setting reg on lclVar to REG_STK and reg on tree node
// to REG_NA. Codegen will generate the code by considering
// it as a contained memory operand.
//
// Note that varDsc->GetRegNum() is already to REG_STK above.
interval->physReg = REG_NA;
writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA);
treeNode->gtFlags &= ~GTF_SPILLED;
treeNode->SetContained();
// We don't support RegOptional for multi-reg localvars.
assert(!treeNode->IsMultiReg());
}
else
{
treeNode->gtFlags |= GTF_SPILL;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
}
}
}
else
{
assert(currentRefPosition->refType == RefTypeExpUse);
}
}
else if (spillAfter && !RefTypeIsUse(currentRefPosition->refType) && (treeNode != nullptr) &&
(!treeNode->IsMultiReg() || treeNode->gtGetOp1()->IsMultiRegNode()))
{
// In the case of a pure def, don't bother spilling - just assign it to the
// stack. However, we need to remember that it was spilled.
// We can't do this in the case of a multi-reg node with a non-multireg source as
// we need the register to extract into.
assert(interval->isSpilled);
varDsc->SetRegNum(REG_STK);
interval->physReg = REG_NA;
writeLocalReg(treeNode->AsLclVar(), interval->varNum, REG_NA);
}
else // Not reload and Not pure-def that's spillAfter
{
if (currentRefPosition->copyReg || currentRefPosition->moveReg)
{
// For a copyReg or moveReg, we have two cases:
// - In the first case, we have a fixedReg - i.e. a register which the code
// generator is constrained to use.
// The code generator will generate the appropriate move to meet the requirement.
// - In the second case, we were forced to use a different register because of
// interference (or JitStressRegs).
// In this case, we generate a GT_COPY.
// In either case, we annotate the treeNode with the register in which the value
// currently lives. For moveReg, the homeReg is the new register (as assigned above).
// But for copyReg, the homeReg remains unchanged.
assert(treeNode != nullptr);
writeLocalReg(treeNode->AsLclVar(), interval->varNum, interval->physReg);
if (currentRefPosition->copyReg)
{
homeReg = interval->physReg;
}
else
{
assert(interval->isSplit);
interval->physReg = assignedReg;
}
if (!currentRefPosition->isFixedRegRef || currentRefPosition->moveReg)
{
// This is the second case, where we need to generate a copy
insertCopyOrReload(block, treeNode, currentRefPosition->getMultiRegIdx(), currentRefPosition);
}
}
else
{
interval->physReg = assignedReg;
if (!interval->isSpilled && !interval->isSplit)
{
if (varDsc->GetRegNum() != REG_STK)
{
// If the register assignments don't match, then this interval is split.
if (varDsc->GetRegNum() != assignedReg)
{
setIntervalAsSplit(interval);
varDsc->SetRegNum(REG_STK);
}
}
else
{
varDsc->SetRegNum(assignedReg);
}
}
}
if (spillAfter)
{
if (treeNode != nullptr)
{
treeNode->gtFlags |= GTF_SPILL;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
}
assert(interval->isSpilled);
interval->physReg = REG_NA;
varDsc->SetRegNum(REG_STK);
}
if (writeThru && (treeNode != nullptr))
{
// This is a def of a write-thru EH var (only defs are marked 'writeThru').
treeNode->gtFlags |= GTF_SPILL;
// We also mark writeThru defs that are not last-use with GTF_SPILLED to indicate that they are conceptually
// spilled and immediately "reloaded", i.e. the register remains live.
// Note that we can have a "last use" write that has no exposed uses in the standard
// (non-eh) control flow, but that may be used on an exception path. Hence the need
// to retain these defs, and to ensure that they write.
if (!currentRefPosition->lastUse)
{
treeNode->gtFlags |= GTF_SPILLED;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx());
}
}
}
if (currentRefPosition->singleDefSpill && (treeNode != nullptr))
{
// This is the first (and only) def of a single-def var (only defs are marked 'singleDefSpill').
// Mark it as GTF_SPILL, so it is spilled immediately to the stack at definition and
// GTF_SPILLED, so the variable stays live in the register.
//
// TODO: This approach would still create the resolution moves but during codegen, will check for
// `lvSpillAtSingleDef` to decide whether to generate spill or not. In future, see if there is some
// better way to avoid resolution moves, perhaps by updating the varDsc->SetRegNum(REG_STK) in this
// method?
treeNode->gtFlags |= GTF_SPILL;
treeNode->gtFlags |= GTF_SPILLED;
if (treeNode->IsMultiReg())
{
treeNode->SetRegSpillFlagByIdx(GTF_SPILLED, currentRefPosition->getMultiRegIdx());
}
varDsc->lvSpillAtSingleDef = true;
}
}
// Update the physRegRecord for the register, so that we know what vars are in
// regs at the block boundaries
RegRecord* physRegRecord = getRegisterRecord(homeReg);
if (spillAfter || currentRefPosition->lastUse)
{
interval->isActive = false;
interval->assignedReg = nullptr;
interval->physReg = REG_NA;
updateAssignedInterval(physRegRecord, nullptr, interval->registerType);
}
else
{
interval->isActive = true;
interval->assignedReg = physRegRecord;
updateAssignedInterval(physRegRecord, interval, interval->registerType);
}
}
void LinearScan::writeRegisters(RefPosition* currentRefPosition, GenTree* tree)
{
lsraAssignRegToTree(tree, currentRefPosition->assignedReg(), currentRefPosition->getMultiRegIdx());
}
//------------------------------------------------------------------------
// insertCopyOrReload: Insert a copy in the case where a tree node value must be moved
// to a different register at the point of use (GT_COPY), or it is reloaded to a different register
// than the one it was spilled from (GT_RELOAD).
//
// Arguments:
// block - basic block in which GT_COPY/GT_RELOAD is inserted.
// tree - This is the node to copy or reload.
// Insert copy or reload node between this node and its parent.
// multiRegIdx - register position of tree node for which copy or reload is needed.
// refPosition - The RefPosition at which copy or reload will take place.
//
// Notes:
// The GT_COPY or GT_RELOAD will be inserted in the proper spot in execution order where the reload is to occur.
//
// For example, for this tree (numbers are execution order, lower is earlier and higher is later):
//
// +---------+----------+
// | GT_ADD (3) |
// +---------+----------+
// |
// / '\'
// / '\'
// / '\'
// +-------------------+ +----------------------+
// | x (1) | "tree" | y (2) |
// +-------------------+ +----------------------+
//
// generate this tree:
//
// +---------+----------+
// | GT_ADD (4) |
// +---------+----------+
// |
// / '\'
// / '\'
// / '\'
// +-------------------+ +----------------------+
// | GT_RELOAD (3) | | y (2) |
// +-------------------+ +----------------------+
// |
// +-------------------+
// | x (1) | "tree"
// +-------------------+
//
// Note in particular that the GT_RELOAD node gets inserted in execution order immediately before the parent of "tree",
// which seems a bit weird since normally a node's parent (in this case, the parent of "x", GT_RELOAD in the "after"
// picture) immediately follows all of its children (that is, normally the execution ordering is postorder).
// The ordering must be this weird "out of normal order" way because the "x" node is being spilled, probably
// because the expression in the tree represented above by "y" has high register requirements. We don't want
// to reload immediately, of course. So we put GT_RELOAD where the reload should actually happen.
//
// Note that GT_RELOAD is required when we reload to a different register than the one we spilled to. It can also be
// used if we reload to the same register. Normally, though, in that case we just mark the node with GTF_SPILLED,
// and the unspilling code automatically reuses the same register, and does the reload when it notices that flag
// when considering a node's operands.
//
void LinearScan::insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition)
{
LIR::Range& blockRange = LIR::AsRange(block);
LIR::Use treeUse;
bool foundUse = blockRange.TryGetUse(tree, &treeUse);
assert(foundUse);
GenTree* parent = treeUse.User();
genTreeOps oper;
if (refPosition->reload)
{
oper = GT_RELOAD;
}
else
{
oper = GT_COPY;
INTRACK_STATS(updateLsraStat(STAT_COPY_REG, block->bbNum));
}
// If the parent is a reload/copy node, then tree must be a multi-reg node
// that has already had one of its registers spilled.
// It is possible that one of its RefTypeDef positions got spilled and the next
// use of it requires it to be in a different register.
//
// In this case set the i'th position reg of reload/copy node to the reg allocated
// for copy/reload refPosition. Essentially a copy/reload node will have a reg
// for each multi-reg position of its child. If there is a valid reg in i'th
// position of GT_COPY or GT_RELOAD node then the corresponding result of its
// child needs to be copied or reloaded to that reg.
if (parent->IsCopyOrReload())
{
noway_assert(parent->OperGet() == oper);
noway_assert(tree->IsMultiRegNode());
GenTreeCopyOrReload* copyOrReload = parent->AsCopyOrReload();
noway_assert(copyOrReload->GetRegNumByIdx(multiRegIdx) == REG_NA);
copyOrReload->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx);
}
else
{
var_types regType = tree->TypeGet();
if ((regType == TYP_STRUCT) && !tree->IsMultiRegNode())
{
assert(compiler->compEnregStructLocals());
assert(tree->IsLocal());
const GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
// We create struct copies with a primitive type so we don't bother copy node with parsing structHndl.
// Note that for multiReg node we keep each regType in the tree and don't need this.
regType = varDsc->GetRegisterType(lcl);
assert(regType != TYP_UNDEF);
}
// Create the new node, with "tree" as its only child.
GenTreeCopyOrReload* newNode = new (compiler, oper) GenTreeCopyOrReload(oper, regType, tree);
assert(refPosition->registerAssignment != RBM_NONE);
SetLsraAdded(newNode);
newNode->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx);
if (refPosition->copyReg)
{
// This is a TEMPORARY copy
assert(isCandidateLocalRef(tree) || tree->IsMultiRegLclVar());
newNode->SetLastUse(multiRegIdx);
}
// Insert the copy/reload after the spilled node and replace the use of the original node with a use
// of the copy/reload.
blockRange.InsertAfter(tree, newNode);
treeUse.ReplaceWith(newNode);
}
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
//------------------------------------------------------------------------
// insertUpperVectorSave: Insert code to save the upper half of a vector that lives
// in a callee-save register at the point of a kill (the upper half is
// not preserved).
//
// Arguments:
// tree - This is the node before which we will insert the Save.
// It will be a call or some node that turns into a call.
// refPosition - The RefTypeUpperVectorSave RefPosition.
// upperInterval - The Interval for the upper half of the large vector lclVar.
// block - the BasicBlock containing the call.
//
void LinearScan::insertUpperVectorSave(GenTree* tree,
RefPosition* refPosition,
Interval* upperVectorInterval,
BasicBlock* block)
{
JITDUMP("Inserting UpperVectorSave for RP #%d before %d.%s:\n", refPosition->rpNum, tree->gtTreeID,
GenTree::OpName(tree->gtOper));
Interval* lclVarInterval = upperVectorInterval->relatedInterval;
assert(lclVarInterval->isLocalVar == true);
assert(refPosition->getInterval() == upperVectorInterval);
regNumber lclVarReg = lclVarInterval->physReg;
if (lclVarReg == REG_NA)
{
return;
}
#ifdef DEBUG
if (tree->IsCall())
{
// Make sure that we do not insert vector save before calls that does not return.
assert(!tree->AsCall()->IsNoReturn());
}
#endif
LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarInterval->varNum);
assert(Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType()));
// On Arm64, we must always have a register to save the upper half,
// while on x86 we can spill directly to memory.
regNumber spillReg = refPosition->assignedReg();
#ifdef TARGET_ARM64
bool spillToMem = refPosition->spillAfter;
assert(spillReg != REG_NA);
#else
bool spillToMem = (spillReg == REG_NA);
assert(!refPosition->spillAfter);
#endif
LIR::Range& blockRange = LIR::AsRange(block);
// Insert the save before the call.
GenTree* saveLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, varDsc->lvType);
saveLcl->SetRegNum(lclVarReg);
SetLsraAdded(saveLcl);
GenTreeSIMD* simdNode = compiler->gtNewSIMDNode(LargeVectorSaveType, saveLcl, SIMDIntrinsicUpperSave,
varDsc->GetSimdBaseJitType(), genTypeSize(varDsc));
if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF)
{
// There are a few scenarios where we can get a LCL_VAR which
// doesn't know the underlying baseType. In that scenario, we
// will just lie and say it is a float. Codegen doesn't actually
// care what the type is but this avoids an assert that would
// otherwise be fired from the more general checks that happen.
simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT);
}
SetLsraAdded(simdNode);
simdNode->SetRegNum(spillReg);
if (spillToMem)
{
simdNode->gtFlags |= GTF_SPILL;
upperVectorInterval->physReg = REG_NA;
}
else
{
assert((genRegMask(spillReg) & RBM_FLT_CALLEE_SAVED) != RBM_NONE);
upperVectorInterval->physReg = spillReg;
}
blockRange.InsertBefore(tree, LIR::SeqTree(compiler, simdNode));
DISPTREE(simdNode);
JITDUMP("\n");
}
//------------------------------------------------------------------------
// insertUpperVectorRestore: Insert code to restore the upper half of a vector that has been partially spilled.
//
// Arguments:
// tree - This is the node for which we will insert the Restore.
// If non-null, it will be a use of the large vector lclVar.
// If null, the Restore will be added to the end of the block.
// upperVectorInterval - The Interval for the upper vector for the lclVar.
// block - the BasicBlock into which we will be inserting the code.
//
// Notes:
// In the case where 'tree' is non-null, we will insert the restore just prior to
// its use, in order to ensure the proper ordering.
//
void LinearScan::insertUpperVectorRestore(GenTree* tree,
RefPosition* refPosition,
Interval* upperVectorInterval,
BasicBlock* block)
{
JITDUMP("Adding UpperVectorRestore for RP #%d ", refPosition->rpNum);
Interval* lclVarInterval = upperVectorInterval->relatedInterval;
assert(lclVarInterval->isLocalVar == true);
regNumber lclVarReg = lclVarInterval->physReg;
// We should not call this method if the lclVar is not in a register (we should have simply marked the entire
// lclVar as spilled).
assert(lclVarReg != REG_NA);
LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarInterval->varNum);
assert(Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType()));
GenTree* restoreLcl = nullptr;
restoreLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, varDsc->lvType);
restoreLcl->SetRegNum(lclVarReg);
SetLsraAdded(restoreLcl);
GenTreeSIMD* simdNode = compiler->gtNewSIMDNode(varDsc->TypeGet(), restoreLcl, SIMDIntrinsicUpperRestore,
varDsc->GetSimdBaseJitType(), genTypeSize(varDsc->lvType));
if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF)
{
// There are a few scenarios where we can get a LCL_VAR which
// doesn't know the underlying baseType. In that scenario, we
// will just lie and say it is a float. Codegen doesn't actually
// care what the type is but this avoids an assert that would
// otherwise be fired from the more general checks that happen.
simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT);
}
regNumber restoreReg = upperVectorInterval->physReg;
SetLsraAdded(simdNode);
if (restoreReg == REG_NA)
{
// We need a stack location for this.
assert(lclVarInterval->isSpilled);
#ifdef TARGET_AMD64
assert(refPosition->assignedReg() == REG_NA);
simdNode->gtFlags |= GTF_NOREG_AT_USE;
#else
simdNode->gtFlags |= GTF_SPILLED;
assert(refPosition->assignedReg() != REG_NA);
restoreReg = refPosition->assignedReg();
#endif
}
simdNode->SetRegNum(restoreReg);
LIR::Range& blockRange = LIR::AsRange(block);
JITDUMP("Adding UpperVectorRestore ");
if (tree != nullptr)
{
JITDUMP("before %d.%s:\n", tree->gtTreeID, GenTree::OpName(tree->gtOper));
LIR::Use treeUse;
bool foundUse = blockRange.TryGetUse(tree, &treeUse);
assert(foundUse);
// We need to insert the restore prior to the use, not (necessarily) immediately after the lclVar.
blockRange.InsertBefore(treeUse.User(), LIR::SeqTree(compiler, simdNode));
}
else
{
JITDUMP("at end of " FMT_BB ":\n", block->bbNum);
if (block->KindIs(BBJ_COND, BBJ_SWITCH))
{
noway_assert(!blockRange.IsEmpty());
GenTree* branch = blockRange.LastNode();
assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE ||
branch->OperGet() == GT_SWITCH);
blockRange.InsertBefore(branch, LIR::SeqTree(compiler, simdNode));
}
else
{
assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS));
blockRange.InsertAtEnd(LIR::SeqTree(compiler, simdNode));
}
}
DISPTREE(simdNode);
JITDUMP("\n");
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
//------------------------------------------------------------------------
// initMaxSpill: Initializes the LinearScan members used to track the max number
// of concurrent spills. This is needed so that we can set the
// fields in Compiler, so that the code generator, in turn can
// allocate the right number of spill locations.
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Assumptions:
// This is called before any calls to updateMaxSpill().
void LinearScan::initMaxSpill()
{
needDoubleTmpForFPCall = false;
needFloatTmpForFPCall = false;
for (int i = 0; i < TYP_COUNT; i++)
{
maxSpill[i] = 0;
currentSpill[i] = 0;
}
}
//------------------------------------------------------------------------
// recordMaxSpill: Sets the fields in Compiler for the max number of concurrent spills.
// (See the comment on initMaxSpill.)
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Assumptions:
// This is called after updateMaxSpill() has been called for all "real"
// RefPositions.
void LinearScan::recordMaxSpill()
{
// Note: due to the temp normalization process (see tmpNormalizeType)
// only a few types should actually be seen here.
JITDUMP("Recording the maximum number of concurrent spills:\n");
#ifdef TARGET_X86
var_types returnType = RegSet::tmpNormalizeType(compiler->info.compRetType);
if (needDoubleTmpForFPCall || (returnType == TYP_DOUBLE))
{
JITDUMP("Adding a spill temp for moving a double call/return value between xmm reg and x87 stack.\n");
maxSpill[TYP_DOUBLE] += 1;
}
if (needFloatTmpForFPCall || (returnType == TYP_FLOAT))
{
JITDUMP("Adding a spill temp for moving a float call/return value between xmm reg and x87 stack.\n");
maxSpill[TYP_FLOAT] += 1;
}
#endif // TARGET_X86
compiler->codeGen->regSet.tmpBeginPreAllocateTemps();
for (int i = 0; i < TYP_COUNT; i++)
{
if (var_types(i) != RegSet::tmpNormalizeType(var_types(i)))
{
// Only normalized types should have anything in the maxSpill array.
// We assume here that if type 'i' does not normalize to itself, then
// nothing else normalizes to 'i', either.
assert(maxSpill[i] == 0);
}
if (maxSpill[i] != 0)
{
JITDUMP(" %s: %d\n", varTypeName(var_types(i)), maxSpill[i]);
compiler->codeGen->regSet.tmpPreAllocateTemps(var_types(i), maxSpill[i]);
}
}
JITDUMP("\n");
}
//------------------------------------------------------------------------
// updateMaxSpill: Update the maximum number of concurrent spills
//
// Arguments:
// refPosition - the current RefPosition being handled
//
// Return Value:
// None.
//
// Assumptions:
// The RefPosition has an associated interval (getInterval() will
// otherwise assert).
//
// Notes:
// This is called for each "real" RefPosition during the writeback
// phase of LSRA. It keeps track of how many concurrently-live
// spills there are, and the largest number seen so far.
void LinearScan::updateMaxSpill(RefPosition* refPosition)
{
RefType refType = refPosition->refType;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if ((refType == RefTypeUpperVectorSave) || (refType == RefTypeUpperVectorRestore))
{
Interval* interval = refPosition->getInterval();
// If this is not an 'upperVector', it must be a tree temp that has been already
// (fully) spilled.
if (!interval->isUpperVector)
{
assert(interval->firstRefPosition->spillAfter);
}
else
{
// The UpperVector RefPositions spill to the localVar's home location.
Interval* lclVarInterval = interval->relatedInterval;
assert(lclVarInterval->isSpilled || (!refPosition->spillAfter && !refPosition->reload));
}
return;
}
#endif // !FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (refPosition->spillAfter || refPosition->reload ||
(refPosition->RegOptional() && refPosition->assignedReg() == REG_NA))
{
Interval* interval = refPosition->getInterval();
if (!interval->isLocalVar)
{
GenTree* treeNode = refPosition->treeNode;
if (treeNode == nullptr)
{
assert(RefTypeIsUse(refType));
treeNode = interval->firstRefPosition->treeNode;
}
assert(treeNode != nullptr);
// The tmp allocation logic 'normalizes' types to a small number of
// types that need distinct stack locations from each other.
// Those types are currently gc refs, byrefs, <= 4 byte non-GC items,
// 8-byte non-GC items, and 16-byte or 32-byte SIMD vectors.
// LSRA is agnostic to those choices but needs
// to know what they are here.
var_types type;
if (!treeNode->IsMultiRegNode())
{
type = getDefType(treeNode);
}
else
{
type = treeNode->GetRegTypeByIndex(refPosition->getMultiRegIdx());
}
type = RegSet::tmpNormalizeType(type);
if (refPosition->spillAfter && !refPosition->reload)
{
currentSpill[type]++;
if (currentSpill[type] > maxSpill[type])
{
maxSpill[type] = currentSpill[type];
}
}
else if (refPosition->reload)
{
assert(currentSpill[type] > 0);
currentSpill[type]--;
}
else if (refPosition->RegOptional() && refPosition->assignedReg() == REG_NA)
{
// A spill temp not getting reloaded into a reg because it is
// marked as allocate if profitable and getting used from its
// memory location. To properly account max spill for typ we
// decrement spill count.
assert(RefTypeIsUse(refType));
assert(currentSpill[type] > 0);
currentSpill[type]--;
}
JITDUMP(" Max spill for %s is %d\n", varTypeName(type), maxSpill[type]);
}
}
}
// This is the final phase of register allocation. It writes the register assignments to
// the tree, and performs resolution across joins and backedges.
//
void LinearScan::resolveRegisters()
{
// Iterate over the tree and the RefPositions in lockstep
// - annotate the tree with register assignments by setting GetRegNum() or gtRegPair (for longs)
// on the tree node
// - track globally-live var locations
// - add resolution points at split/merge/critical points as needed
// Need to use the same traversal order as the one that assigns the location numbers.
// Dummy RefPositions have been added at any split, join or critical edge, at the
// point where resolution may be required. These are located:
// - for a split, at the top of the non-adjacent block
// - for a join, at the bottom of the non-adjacent joining block
// - for a critical edge, at the top of the target block of each critical
// edge.
// Note that a target block may have multiple incoming critical or split edges
//
// These RefPositions record the expected location of the Interval at that point.
// At each branch, we identify the location of each liveOut interval, and check
// against the RefPositions at the target.
BasicBlock* block;
LsraLocation currentLocation = MinLocation;
// Clear register assignments - these will be reestablished as lclVar defs (including RefTypeParamDefs)
// are encountered.
if (enregisterLocalVars)
{
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
assignedInterval->assignedReg = nullptr;
assignedInterval->physReg = REG_NA;
}
physRegRecord->assignedInterval = nullptr;
physRegRecord->recentRefPosition = nullptr;
}
// Clear "recentRefPosition" for lclVar intervals
for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
if (localVarIntervals[varIndex] != nullptr)
{
localVarIntervals[varIndex]->recentRefPosition = nullptr;
localVarIntervals[varIndex]->isActive = false;
}
else
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
}
}
}
// handle incoming arguments and special temps
RefPositionIterator refPosIterator = refPositions.begin();
RefPosition* currentRefPosition = &refPosIterator;
if (enregisterLocalVars)
{
VarToRegMap entryVarToRegMap = inVarToRegMaps[compiler->fgFirstBB->bbNum];
for (; refPosIterator != refPositions.end() &&
(currentRefPosition->refType == RefTypeParamDef || currentRefPosition->refType == RefTypeZeroInit);
++refPosIterator, currentRefPosition = &refPosIterator)
{
Interval* interval = currentRefPosition->getInterval();
assert(interval != nullptr && interval->isLocalVar);
resolveLocalRef(nullptr, nullptr, currentRefPosition);
regNumber reg = REG_STK;
int varIndex = interval->getVarIndex(compiler);
if (!currentRefPosition->spillAfter && currentRefPosition->registerAssignment != RBM_NONE)
{
reg = currentRefPosition->assignedReg();
}
else
{
reg = REG_STK;
interval->isActive = false;
}
setVarReg(entryVarToRegMap, varIndex, reg);
}
}
else
{
assert(refPosIterator == refPositions.end() ||
(refPosIterator->refType != RefTypeParamDef && refPosIterator->refType != RefTypeZeroInit));
}
// write back assignments
for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
assert(curBBNum == block->bbNum);
if (enregisterLocalVars)
{
// Record the var locations at the start of this block.
// (If it's fgFirstBB, we've already done that above, see entryVarToRegMap)
curBBStartLocation = currentRefPosition->nodeLocation;
if (block != compiler->fgFirstBB)
{
processBlockStartLocations(block);
}
// Handle the DummyDefs, updating the incoming var location.
for (; refPosIterator != refPositions.end() && currentRefPosition->refType == RefTypeDummyDef;
++refPosIterator, currentRefPosition = &refPosIterator)
{
assert(currentRefPosition->isIntervalRef());
// Don't mark dummy defs as reload
currentRefPosition->reload = false;
resolveLocalRef(nullptr, nullptr, currentRefPosition);
regNumber reg;
if (currentRefPosition->registerAssignment != RBM_NONE)
{
reg = currentRefPosition->assignedReg();
}
else
{
reg = REG_STK;
currentRefPosition->getInterval()->isActive = false;
}
setInVarRegForBB(curBBNum, currentRefPosition->getInterval()->varNum, reg);
}
}
// The next RefPosition should be for the block. Move past it.
assert(refPosIterator != refPositions.end());
assert(currentRefPosition->refType == RefTypeBB);
++refPosIterator;
currentRefPosition = &refPosIterator;
// Handle the RefPositions for the block
for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB &&
currentRefPosition->refType != RefTypeDummyDef;
++refPosIterator, currentRefPosition = &refPosIterator)
{
currentLocation = currentRefPosition->nodeLocation;
// Ensure that the spill & copy info is valid.
// First, if it's reload, it must not be copyReg or moveReg
assert(!currentRefPosition->reload || (!currentRefPosition->copyReg && !currentRefPosition->moveReg));
// If it's copyReg it must not be moveReg, and vice-versa
assert(!currentRefPosition->copyReg || !currentRefPosition->moveReg);
switch (currentRefPosition->refType)
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
case RefTypeUpperVectorSave:
case RefTypeUpperVectorRestore:
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
case RefTypeUse:
case RefTypeDef:
// These are the ones we're interested in
break;
case RefTypeKill:
case RefTypeFixedReg:
// These require no handling at resolution time
assert(currentRefPosition->referent != nullptr);
currentRefPosition->referent->recentRefPosition = currentRefPosition;
continue;
case RefTypeExpUse:
// Ignore the ExpUse cases - a RefTypeExpUse would only exist if the
// variable is dead at the entry to the next block. So we'll mark
// it as in its current location and resolution will take care of any
// mismatch.
assert(getNextBlock() == nullptr ||
!VarSetOps::IsMember(compiler, getNextBlock()->bbLiveIn,
currentRefPosition->getInterval()->getVarIndex(compiler)));
currentRefPosition->referent->recentRefPosition = currentRefPosition;
continue;
case RefTypeKillGCRefs:
// No action to take at resolution time, and no interval to update recentRefPosition for.
continue;
case RefTypeDummyDef:
case RefTypeParamDef:
case RefTypeZeroInit:
// Should have handled all of these already
default:
unreached();
break;
}
updateMaxSpill(currentRefPosition);
GenTree* treeNode = currentRefPosition->treeNode;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (currentRefPosition->refType == RefTypeUpperVectorSave)
{
// The treeNode is a call or something that might become one.
noway_assert(treeNode != nullptr);
// If the associated interval is an UpperVector, this must be a RefPosition for a LargeVectorType
// LocalVar.
// Otherwise, this is a non-lclVar interval that has been spilled, and we don't need to do anything.
Interval* interval = currentRefPosition->getInterval();
if (interval->isUpperVector)
{
Interval* localVarInterval = interval->relatedInterval;
if ((localVarInterval->physReg != REG_NA) && !localVarInterval->isPartiallySpilled)
{
// If the localVar is in a register, it must be in a register that is not trashed by
// the current node (otherwise it would have already been spilled).
assert((genRegMask(localVarInterval->physReg) & getKillSetForNode(treeNode)) == RBM_NONE);
// If we have allocated a register to spill it to, we will use that; otherwise, we will spill it
// to the stack. We can use as a temp register any non-arg caller-save register.
currentRefPosition->referent->recentRefPosition = currentRefPosition;
insertUpperVectorSave(treeNode, currentRefPosition, currentRefPosition->getInterval(), block);
localVarInterval->isPartiallySpilled = true;
}
}
else
{
// This is a non-lclVar interval that must have been spilled.
assert(!currentRefPosition->getInterval()->isLocalVar);
assert(currentRefPosition->getInterval()->firstRefPosition->spillAfter);
}
continue;
}
else if (currentRefPosition->refType == RefTypeUpperVectorRestore)
{
// Since we don't do partial restores of tree temp intervals, this must be an upperVector.
Interval* interval = currentRefPosition->getInterval();
Interval* localVarInterval = interval->relatedInterval;
assert(interval->isUpperVector && (localVarInterval != nullptr));
if (localVarInterval->physReg != REG_NA)
{
assert(localVarInterval->isPartiallySpilled);
assert((localVarInterval->assignedReg != nullptr) &&
(localVarInterval->assignedReg->regNum == localVarInterval->physReg) &&
(localVarInterval->assignedReg->assignedInterval == localVarInterval));
insertUpperVectorRestore(treeNode, currentRefPosition, interval, block);
}
localVarInterval->isPartiallySpilled = false;
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Most uses won't actually need to be recorded (they're on the def).
// In those cases, treeNode will be nullptr.
if (treeNode == nullptr)
{
// This is either a use, a dead def, or a field of a struct
Interval* interval = currentRefPosition->getInterval();
assert(currentRefPosition->refType == RefTypeUse ||
currentRefPosition->registerAssignment == RBM_NONE || interval->isStructField ||
interval->IsUpperVector());
// TODO-Review: Need to handle the case where any of the struct fields
// are reloaded/spilled at this use
assert(!interval->isStructField ||
(currentRefPosition->reload == false && currentRefPosition->spillAfter == false));
if (interval->isLocalVar && !interval->isStructField)
{
LclVarDsc* varDsc = interval->getLocalVar(compiler);
// This must be a dead definition. We need to mark the lclVar
// so that it's not considered a candidate for lvRegister, as
// this dead def will have to go to the stack.
assert(currentRefPosition->refType == RefTypeDef);
varDsc->SetRegNum(REG_STK);
}
continue;
}
assert(currentRefPosition->isIntervalRef());
if (currentRefPosition->getInterval()->isInternal)
{
treeNode->gtRsvdRegs |= currentRefPosition->registerAssignment;
}
else
{
writeRegisters(currentRefPosition, treeNode);
if (treeNode->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR) && currentRefPosition->getInterval()->isLocalVar)
{
resolveLocalRef(block, treeNode->AsLclVar(), currentRefPosition);
}
// Mark spill locations on temps
// (local vars are handled in resolveLocalRef, above)
// Note that the tree node will be changed from GTF_SPILL to GTF_SPILLED
// in codegen, taking care of the "reload" case for temps
else if (currentRefPosition->spillAfter || (currentRefPosition->nextRefPosition != nullptr &&
currentRefPosition->nextRefPosition->moveReg))
{
if (treeNode != nullptr)
{
if (currentRefPosition->spillAfter)
{
treeNode->gtFlags |= GTF_SPILL;
// If this is a constant interval that is reusing a pre-existing value, we actually need
// to generate the value at this point in order to spill it.
if (treeNode->IsReuseRegVal())
{
treeNode->ResetReuseRegVal();
}
// In case of multi-reg node, also set spill flag on the
// register specified by multi-reg index of current RefPosition.
// Note that the spill flag on treeNode indicates that one or
// more its allocated registers are in that state.
if (treeNode->IsMultiRegCall())
{
GenTreeCall* call = treeNode->AsCall();
call->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
#if FEATURE_ARG_SPLIT
else if (treeNode->OperIsPutArgSplit())
{
GenTreePutArgSplit* splitArg = treeNode->AsPutArgSplit();
splitArg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
#ifdef TARGET_ARM
else if (compFeatureArgSplit() && treeNode->OperIsMultiRegOp())
{
GenTreeMultiRegOp* multiReg = treeNode->AsMultiRegOp();
multiReg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx());
}
#endif // TARGET_ARM
#endif // FEATURE_ARG_SPLIT
}
// If the value is reloaded or moved to a different register, we need to insert
// a node to hold the register to which it should be reloaded
RefPosition* nextRefPosition = currentRefPosition->nextRefPosition;
noway_assert(nextRefPosition != nullptr);
if (INDEBUG(alwaysInsertReload() ||)
nextRefPosition->assignedReg() != currentRefPosition->assignedReg())
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Note that we asserted above that this is an Interval RefPosition.
Interval* currentInterval = currentRefPosition->getInterval();
if (!currentInterval->isUpperVector && nextRefPosition->refType == RefTypeUpperVectorSave)
{
// The currentRefPosition is a spill of a tree temp.
// These have no associated Restore, as we always spill if the vector is
// in a register when this is encountered.
// The nextRefPosition we're interested in (where we may need to insert a
// reload or flag as GTF_NOREG_AT_USE) is the subsequent RefPosition.
assert(!currentInterval->isLocalVar);
nextRefPosition = nextRefPosition->nextRefPosition;
assert(nextRefPosition->refType != RefTypeUpperVectorSave);
}
// UpperVector intervals may have unique assignments at each reference.
if (!currentInterval->isUpperVector)
#endif
{
if (nextRefPosition->assignedReg() != REG_NA)
{
insertCopyOrReload(block, treeNode, currentRefPosition->getMultiRegIdx(),
nextRefPosition);
}
else
{
assert(nextRefPosition->RegOptional());
// In case of tree temps, if def is spilled and use didn't
// get a register, set a flag on tree node to be treated as
// contained at the point of its use.
if (currentRefPosition->spillAfter && currentRefPosition->refType == RefTypeDef &&
nextRefPosition->refType == RefTypeUse)
{
assert(nextRefPosition->treeNode == nullptr);
treeNode->gtFlags |= GTF_NOREG_AT_USE;
}
}
}
}
}
// We should never have to "spill after" a temp use, since
// they're single use
else
{
unreached();
}
}
}
}
if (enregisterLocalVars)
{
processBlockEndLocations(block);
}
}
if (enregisterLocalVars)
{
#ifdef DEBUG
if (VERBOSE)
{
printf("-----------------------\n");
printf("RESOLVING BB BOUNDARIES\n");
printf("-----------------------\n");
printf("Resolution Candidates: ");
dumpConvertedVarSet(compiler, resolutionCandidateVars);
printf("\n");
printf("Has %sCritical Edges\n\n", hasCriticalEdges ? "" : "No ");
printf("Prior to Resolution\n");
for (BasicBlock* const block : compiler->Blocks())
{
printf("\n" FMT_BB, block->bbNum);
if (block->hasEHBoundaryIn())
{
JITDUMP(" EH flow in");
}
if (block->hasEHBoundaryOut())
{
JITDUMP(" EH flow out");
}
printf("\nuse def in out\n");
dumpConvertedVarSet(compiler, block->bbVarUse);
printf("\n");
dumpConvertedVarSet(compiler, block->bbVarDef);
printf("\n");
dumpConvertedVarSet(compiler, block->bbLiveIn);
printf("\n");
dumpConvertedVarSet(compiler, block->bbLiveOut);
printf("\n");
dumpInVarToRegMap(block);
dumpOutVarToRegMap(block);
}
printf("\n\n");
}
#endif // DEBUG
resolveEdges();
// Verify register assignments on variables
unsigned lclNum;
LclVarDsc* varDsc;
for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++)
{
if (!isCandidateVar(varDsc))
{
varDsc->SetRegNum(REG_STK);
}
else
{
Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex);
// Determine initial position for parameters
if (varDsc->lvIsParam)
{
regMaskTP initialRegMask = interval->firstRefPosition->registerAssignment;
regNumber initialReg = (initialRegMask == RBM_NONE || interval->firstRefPosition->spillAfter)
? REG_STK
: genRegNumFromMask(initialRegMask);
#ifdef TARGET_ARM
if (varTypeIsMultiReg(varDsc))
{
// TODO-ARM-NYI: Map the hi/lo intervals back to lvRegNum and GetOtherReg() (these should NYI
// before this)
assert(!"Multi-reg types not yet supported");
}
else
#endif // TARGET_ARM
{
varDsc->SetArgInitReg(initialReg);
JITDUMP(" Set V%02u argument initial register to %s\n", lclNum, getRegName(initialReg));
}
// Stack args that are part of dependently-promoted structs should never be register candidates (see
// LinearScan::isRegCandidate).
assert(varDsc->lvIsRegArg || !compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc));
}
// If lvRegNum is REG_STK, that means that either no register
// was assigned, or (more likely) that the same register was not
// used for all references. In that case, codegen gets the register
// from the tree node.
if (varDsc->GetRegNum() == REG_STK || interval->isSpilled || interval->isSplit)
{
// For codegen purposes, we'll set lvRegNum to whatever register
// it's currently in as we go.
// However, we never mark an interval as lvRegister if it has either been spilled
// or split.
varDsc->lvRegister = false;
// Skip any dead defs or exposed uses
// (first use exposed will only occur when there is no explicit initialization)
RefPosition* firstRefPosition = interval->firstRefPosition;
while ((firstRefPosition != nullptr) && (firstRefPosition->refType == RefTypeExpUse))
{
firstRefPosition = firstRefPosition->nextRefPosition;
}
if (firstRefPosition == nullptr)
{
// Dead interval
varDsc->lvLRACandidate = false;
if (varDsc->lvRefCnt() == 0)
{
varDsc->lvOnFrame = false;
}
else
{
// We may encounter cases where a lclVar actually has no references, but
// a non-zero refCnt. For safety (in case this is some "hidden" lclVar that we're
// not correctly recognizing), we'll mark those as needing a stack location.
// TODO-Cleanup: Make this an assert if/when we correct the refCnt
// updating.
varDsc->lvOnFrame = true;
}
}
else
{
// If the interval was not spilled, it doesn't need a stack location.
if (!interval->isSpilled)
{
varDsc->lvOnFrame = false;
}
if (firstRefPosition->registerAssignment == RBM_NONE || firstRefPosition->spillAfter)
{
// Either this RefPosition is spilled, or regOptional or it is not a "real" def or use
assert(
firstRefPosition->spillAfter || firstRefPosition->RegOptional() ||
(firstRefPosition->refType != RefTypeDef && firstRefPosition->refType != RefTypeUse));
varDsc->SetRegNum(REG_STK);
}
else
{
varDsc->SetRegNum(firstRefPosition->assignedReg());
}
}
}
else
{
{
varDsc->lvRegister = true;
varDsc->lvOnFrame = false;
}
#ifdef DEBUG
regMaskTP registerAssignment = genRegMask(varDsc->GetRegNum());
assert(!interval->isSpilled && !interval->isSplit);
RefPosition* refPosition = interval->firstRefPosition;
assert(refPosition != nullptr);
while (refPosition != nullptr)
{
// All RefPositions must match, except for dead definitions,
// copyReg/moveReg and RefTypeExpUse positions
if (refPosition->registerAssignment != RBM_NONE && !refPosition->copyReg &&
!refPosition->moveReg && refPosition->refType != RefTypeExpUse)
{
assert(refPosition->registerAssignment == registerAssignment);
}
refPosition = refPosition->nextRefPosition;
}
#endif // DEBUG
}
}
}
}
#ifdef DEBUG
if (VERBOSE)
{
printf("Trees after linear scan register allocator (LSRA)\n");
compiler->fgDispBasicBlocks(true);
}
verifyFinalAllocation();
#endif // DEBUG
compiler->raMarkStkVars();
recordMaxSpill();
// TODO-CQ: Review this comment and address as needed.
// Change all unused promoted non-argument struct locals to a non-GC type (in this case TYP_INT)
// so that the gc tracking logic and lvMustInit logic will ignore them.
// Extract the code that does this from raAssignVars, and call it here.
// PRECONDITIONS: Ensure that lvPromoted is set on promoted structs, if and
// only if it is promoted on all paths.
// Call might be something like:
// compiler->BashUnusedStructLocals();
}
//
//------------------------------------------------------------------------
// insertMove: Insert a move of a lclVar with the given lclNum into the given block.
//
// Arguments:
// block - the BasicBlock into which the move will be inserted.
// insertionPoint - the instruction before which to insert the move
// lclNum - the lclNum of the var to be moved
// fromReg - the register from which the var is moving
// toReg - the register to which the var is moving
//
// Return Value:
// None.
//
// Notes:
// If insertionPoint is non-NULL, insert before that instruction;
// otherwise, insert "near" the end (prior to the branch, if any).
// If fromReg or toReg is REG_STK, then move from/to memory, respectively.
void LinearScan::insertMove(
BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber fromReg, regNumber toReg)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
// the lclVar must be a register candidate
assert(isRegCandidate(varDsc));
// One or both MUST be a register
assert(fromReg != REG_STK || toReg != REG_STK);
// They must not be the same register.
assert(fromReg != toReg);
// This var can't be marked lvRegister now
varDsc->SetRegNum(REG_STK);
GenTree* src = compiler->gtNewLclvNode(lclNum, varDsc->TypeGet());
SetLsraAdded(src);
// There are three cases we need to handle:
// - We are loading a lclVar from the stack.
// - We are storing a lclVar to the stack.
// - We are copying a lclVar between registers.
//
// In the first and second cases, the lclVar node will be marked with GTF_SPILLED and GTF_SPILL, respectively.
// It is up to the code generator to ensure that any necessary normalization is done when loading or storing the
// lclVar's value.
//
// In the third case, we generate GT_COPY(GT_LCL_VAR) and type each node with the normalized type of the lclVar.
// This is safe because a lclVar is always normalized once it is in a register.
GenTree* dst = src;
if (fromReg == REG_STK)
{
src->gtFlags |= GTF_SPILLED;
src->SetRegNum(toReg);
}
else if (toReg == REG_STK)
{
src->gtFlags |= GTF_SPILL;
src->SetRegNum(fromReg);
}
else
{
var_types movType = varDsc->GetRegisterType();
src->gtType = movType;
dst = new (compiler, GT_COPY) GenTreeCopyOrReload(GT_COPY, movType, src);
// This is the new home of the lclVar - indicate that by clearing the GTF_VAR_DEATH flag.
// Note that if src is itself a lastUse, this will have no effect.
dst->gtFlags &= ~(GTF_VAR_DEATH);
src->SetRegNum(fromReg);
dst->SetRegNum(toReg);
SetLsraAdded(dst);
}
dst->SetUnusedValue();
LIR::Range treeRange = LIR::SeqTree(compiler, dst);
LIR::Range& blockRange = LIR::AsRange(block);
if (insertionPoint != nullptr)
{
blockRange.InsertBefore(insertionPoint, std::move(treeRange));
}
else
{
// Put the copy at the bottom
GenTree* lastNode = blockRange.LastNode();
if (block->KindIs(BBJ_COND, BBJ_SWITCH))
{
noway_assert(!blockRange.IsEmpty());
GenTree* branch = lastNode;
assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE ||
branch->OperGet() == GT_SWITCH);
blockRange.InsertBefore(branch, std::move(treeRange));
}
else
{
// These block kinds don't have a branch at the end.
assert((lastNode == nullptr) || (!lastNode->OperIsConditionalJump() &&
!lastNode->OperIs(GT_SWITCH_TABLE, GT_SWITCH, GT_RETURN, GT_RETFILT)));
blockRange.InsertAtEnd(std::move(treeRange));
}
}
}
void LinearScan::insertSwap(
BasicBlock* block, GenTree* insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2)
{
#ifdef DEBUG
if (VERBOSE)
{
const char* insertionPointString = "top";
if (insertionPoint == nullptr)
{
insertionPointString = "bottom";
}
printf(" " FMT_BB " %s: swap V%02u in %s with V%02u in %s\n", block->bbNum, insertionPointString, lclNum1,
getRegName(reg1), lclNum2, getRegName(reg2));
}
#endif // DEBUG
LclVarDsc* varDsc1 = compiler->lvaGetDesc(lclNum1);
LclVarDsc* varDsc2 = compiler->lvaGetDesc(lclNum2);
assert(reg1 != REG_STK && reg1 != REG_NA && reg2 != REG_STK && reg2 != REG_NA);
GenTree* lcl1 = compiler->gtNewLclvNode(lclNum1, varDsc1->TypeGet());
lcl1->SetRegNum(reg1);
SetLsraAdded(lcl1);
GenTree* lcl2 = compiler->gtNewLclvNode(lclNum2, varDsc2->TypeGet());
lcl2->SetRegNum(reg2);
SetLsraAdded(lcl2);
GenTree* swap = compiler->gtNewOperNode(GT_SWAP, TYP_VOID, lcl1, lcl2);
swap->SetRegNum(REG_NA);
SetLsraAdded(swap);
lcl1->gtNext = lcl2;
lcl2->gtPrev = lcl1;
lcl2->gtNext = swap;
swap->gtPrev = lcl2;
LIR::Range swapRange = LIR::SeqTree(compiler, swap);
LIR::Range& blockRange = LIR::AsRange(block);
if (insertionPoint != nullptr)
{
blockRange.InsertBefore(insertionPoint, std::move(swapRange));
}
else
{
// Put the copy at the bottom
// If there's a branch, make an embedded statement that executes just prior to the branch
if (block->KindIs(BBJ_COND, BBJ_SWITCH))
{
noway_assert(!blockRange.IsEmpty());
GenTree* branch = blockRange.LastNode();
assert(branch->OperIsConditionalJump() || branch->OperGet() == GT_SWITCH_TABLE ||
branch->OperGet() == GT_SWITCH);
blockRange.InsertBefore(branch, std::move(swapRange));
}
else
{
assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS));
blockRange.InsertAtEnd(std::move(swapRange));
}
}
}
//------------------------------------------------------------------------
// getTempRegForResolution: Get a free register to use for resolution code.
//
// Arguments:
// fromBlock - The "from" block on the edge being resolved.
// toBlock - The "to"block on the edge
// type - the type of register required
//
// Return Value:
// Returns a register that is free on the given edge, or REG_NA if none is available.
//
// Notes:
// It is up to the caller to check the return value, and to determine whether a register is
// available, and to handle that case appropriately.
// It is also up to the caller to cache the return value, as this is not cheap to compute.
regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type)
{
// TODO-Throughput: This would be much more efficient if we add RegToVarMaps instead of VarToRegMaps
// and they would be more space-efficient as well.
VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum);
VarToRegMap toVarToRegMap = getInVarToRegMap(toBlock->bbNum);
#ifdef TARGET_ARM
regMaskTP freeRegs;
if (type == TYP_DOUBLE)
{
// We have to consider all float registers for TYP_DOUBLE
freeRegs = allRegs(TYP_FLOAT);
}
else
{
freeRegs = allRegs(type);
}
#else // !TARGET_ARM
regMaskTP freeRegs = allRegs(type);
#endif // !TARGET_ARM
#ifdef DEBUG
if (getStressLimitRegs() == LSRA_LIMIT_SMALL_SET)
{
return REG_NA;
}
#endif // DEBUG
INDEBUG(freeRegs = stressLimitRegs(nullptr, freeRegs));
// We are only interested in the variables that are live-in to the "to" block.
VarSetOps::Iter iter(compiler, toBlock->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex) && freeRegs != RBM_NONE)
{
regNumber fromReg = getVarReg(fromVarToRegMap, varIndex);
regNumber toReg = getVarReg(toVarToRegMap, varIndex);
assert(fromReg != REG_NA && toReg != REG_NA);
if (fromReg != REG_STK)
{
freeRegs &= ~genRegMask(fromReg, getIntervalForLocalVar(varIndex)->registerType);
}
if (toReg != REG_STK)
{
freeRegs &= ~genRegMask(toReg, getIntervalForLocalVar(varIndex)->registerType);
}
}
#ifdef TARGET_ARM
if (type == TYP_DOUBLE)
{
// Exclude any doubles for which the odd half isn't in freeRegs.
freeRegs = freeRegs & ((freeRegs << 1) & RBM_ALLDOUBLE);
}
#endif
if (freeRegs == RBM_NONE)
{
return REG_NA;
}
else
{
regNumber tempReg = genRegNumFromMask(genFindLowestBit(freeRegs));
return tempReg;
}
}
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// addResolutionForDouble: Add resolution move(s) for TYP_DOUBLE interval
// and update location.
//
// Arguments:
// block - the BasicBlock into which the move will be inserted.
// insertionPoint - the instruction before which to insert the move
// sourceIntervals - maintains sourceIntervals[reg] which each 'reg' is associated with
// location - maintains location[reg] which is the location of the var that was originally in 'reg'.
// toReg - the register to which the var is moving
// fromReg - the register from which the var is moving
// resolveType - the type of resolution to be performed
//
// Return Value:
// None.
//
// Notes:
// It inserts at least one move and updates incoming parameter 'location'.
//
void LinearScan::addResolutionForDouble(BasicBlock* block,
GenTree* insertionPoint,
Interval** sourceIntervals,
regNumberSmall* location,
regNumber toReg,
regNumber fromReg,
ResolveType resolveType)
{
regNumber secondHalfTargetReg = REG_NEXT(fromReg);
Interval* intervalToBeMoved1 = sourceIntervals[fromReg];
Interval* intervalToBeMoved2 = sourceIntervals[secondHalfTargetReg];
assert(!(intervalToBeMoved1 == nullptr && intervalToBeMoved2 == nullptr));
if (intervalToBeMoved1 != nullptr)
{
if (intervalToBeMoved1->registerType == TYP_DOUBLE)
{
// TYP_DOUBLE interval occupies a double register, i.e. two float registers.
assert(intervalToBeMoved2 == nullptr);
assert(genIsValidDoubleReg(toReg));
}
else
{
// TYP_FLOAT interval occupies 1st half of double register, i.e. 1st float register
assert(genIsValidFloatReg(toReg));
}
addResolution(block, insertionPoint, intervalToBeMoved1, toReg, fromReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[fromReg] = (regNumberSmall)toReg;
}
if (intervalToBeMoved2 != nullptr)
{
// TYP_FLOAT interval occupies 2nd half of double register.
assert(intervalToBeMoved2->registerType == TYP_FLOAT);
regNumber secondHalfTempReg = REG_NEXT(toReg);
addResolution(block, insertionPoint, intervalToBeMoved2, secondHalfTempReg, secondHalfTargetReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[secondHalfTargetReg] = (regNumberSmall)secondHalfTempReg;
}
return;
}
#endif // TARGET_ARM
//------------------------------------------------------------------------
// addResolution: Add a resolution move of the given interval
//
// Arguments:
// block - the BasicBlock into which the move will be inserted.
// insertionPoint - the instruction before which to insert the move
// interval - the interval of the var to be moved
// toReg - the register to which the var is moving
// fromReg - the register from which the var is moving
//
// Return Value:
// None.
//
// Notes:
// For joins, we insert at the bottom (indicated by an insertionPoint
// of nullptr), while for splits we insert at the top.
// This is because for joins 'block' is a pred of the join, while for splits it is a succ.
// For critical edges, this function may be called twice - once to move from
// the source (fromReg), if any, to the stack, in which case toReg will be
// REG_STK, and we insert at the bottom (leave insertionPoint as nullptr).
// The next time, we want to move from the stack to the destination (toReg),
// in which case fromReg will be REG_STK, and we insert at the top.
void LinearScan::addResolution(
BasicBlock* block, GenTree* insertionPoint, Interval* interval, regNumber toReg, regNumber fromReg)
{
#ifdef DEBUG
const char* insertionPointString;
if (insertionPoint == nullptr)
{
// We can't add resolution to a register at the bottom of a block that has an EHBoundaryOut,
// except in the case of the "EH Dummy" resolution from the stack.
assert((block->bbNum > bbNumMaxBeforeResolution) || (fromReg == REG_STK) ||
!blockInfo[block->bbNum].hasEHBoundaryOut);
insertionPointString = "bottom";
}
else
{
// We can't add resolution at the top of a block that has an EHBoundaryIn,
// except in the case of the "EH Dummy" resolution to the stack.
assert((block->bbNum > bbNumMaxBeforeResolution) || (toReg == REG_STK) ||
!blockInfo[block->bbNum].hasEHBoundaryIn);
insertionPointString = "top";
}
// We should never add resolution move inside BBCallAlwaysPairTail.
noway_assert(!block->isBBCallAlwaysPairTail());
#endif // DEBUG
JITDUMP(" " FMT_BB " %s: move V%02u from ", block->bbNum, insertionPointString, interval->varNum);
JITDUMP("%s to %s", getRegName(fromReg), getRegName(toReg));
insertMove(block, insertionPoint, interval->varNum, fromReg, toReg);
if (fromReg == REG_STK || toReg == REG_STK)
{
assert(interval->isSpilled);
}
else
{
// We should have already marked this as spilled or split.
assert((interval->isSpilled) || (interval->isSplit));
}
INTRACK_STATS(updateLsraStat(STAT_RESOLUTION_MOV, block->bbNum));
}
//------------------------------------------------------------------------
// handleOutgoingCriticalEdges: Performs the necessary resolution on all critical edges that feed out of 'block'
//
// Arguments:
// block - the block with outgoing critical edges.
//
// Return Value:
// None..
//
// Notes:
// For all outgoing critical edges (i.e. any successor of this block which is
// a join edge), if there are any conflicts, split the edge by adding a new block,
// and generate the resolution code into that block.
void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
{
VARSET_TP outResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveOut, resolutionCandidateVars));
if (VarSetOps::IsEmpty(compiler, outResolutionSet))
{
return;
}
VARSET_TP sameResolutionSet(VarSetOps::MakeEmpty(compiler));
VARSET_TP diffResolutionSet(VarSetOps::MakeEmpty(compiler));
// Get the outVarToRegMap for this block
VarToRegMap outVarToRegMap = getOutVarToRegMap(block->bbNum);
unsigned succCount = block->NumSucc(compiler);
assert(succCount > 1);
// First, determine the live regs at the end of this block so that we know what regs are
// available to copy into.
// Note that for this purpose we use the full live-out set, because we must ensure that
// even the registers that remain the same across the edge are preserved correctly.
regMaskTP liveOutRegs = RBM_NONE;
VarSetOps::Iter liveOutIter(compiler, block->bbLiveOut);
unsigned liveOutVarIndex = 0;
while (liveOutIter.NextElem(&liveOutVarIndex))
{
regNumber fromReg = getVarReg(outVarToRegMap, liveOutVarIndex);
if (fromReg != REG_STK)
{
regMaskTP fromRegMask = genRegMask(fromReg, getIntervalForLocalVar(liveOutVarIndex)->registerType);
liveOutRegs |= fromRegMask;
}
}
// Next, if this blocks ends with a switch table, or for Arm64, ends with JCMP instruction,
// make sure to not copy into the registers that are consumed at the end of this block.
//
// Note: Only switches and JCMP (for Arm4) have input regs (and so can be fed by copies), so those
// are the only block-ending branches that need special handling.
regMaskTP consumedRegs = RBM_NONE;
if (block->bbJumpKind == BBJ_SWITCH)
{
// At this point, Lowering has transformed any non-switch-table blocks into
// cascading ifs.
GenTree* switchTable = LIR::AsRange(block).LastNode();
assert(switchTable != nullptr && switchTable->OperGet() == GT_SWITCH_TABLE);
consumedRegs = switchTable->gtRsvdRegs;
GenTree* op1 = switchTable->gtGetOp1();
GenTree* op2 = switchTable->gtGetOp2();
noway_assert(op1 != nullptr && op2 != nullptr);
assert(op1->GetRegNum() != REG_NA && op2->GetRegNum() != REG_NA);
// No floating point values, so no need to worry about the register type
// (i.e. for ARM32, where we used the genRegMask overload with a type).
assert(varTypeIsIntegralOrI(op1) && varTypeIsIntegralOrI(op2));
consumedRegs |= genRegMask(op1->GetRegNum());
consumedRegs |= genRegMask(op2->GetRegNum());
// Special handling for GT_COPY to not resolve into the source
// of switch's operand.
if (op1->OperIs(GT_COPY))
{
GenTree* srcOp1 = op1->gtGetOp1();
consumedRegs |= genRegMask(srcOp1->GetRegNum());
}
}
#ifdef TARGET_ARM64
// Next, if this blocks ends with a JCMP, we have to make sure:
// 1. Not to copy into the register that JCMP uses
// e.g. JCMP w21, BRANCH
// 2. Not to copy into the source of JCMP's operand before it is consumed
// e.g. Should not use w0 since it will contain wrong value after resolution
// call METHOD
// ; mov w0, w19 <-- should not resolve in w0 here.
// mov w21, w0
// JCMP w21, BRANCH
// 3. Not to modify the local variable it must consume
// Note: GT_COPY has special handling in codegen and its generation is merged with the
// node that consumes its result. So both, the input and output regs of GT_COPY must be
// excluded from the set available for resolution.
LclVarDsc* jcmpLocalVarDsc = nullptr;
if (block->bbJumpKind == BBJ_COND)
{
GenTree* lastNode = LIR::AsRange(block).LastNode();
if (lastNode->OperIs(GT_JCMP))
{
GenTree* op1 = lastNode->gtGetOp1();
consumedRegs |= genRegMask(op1->GetRegNum());
if (op1->OperIs(GT_COPY))
{
GenTree* srcOp1 = op1->gtGetOp1();
consumedRegs |= genRegMask(srcOp1->GetRegNum());
}
if (op1->IsLocal())
{
GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
jcmpLocalVarDsc = &compiler->lvaTable[lcl->GetLclNum()];
}
}
}
#endif
VarToRegMap sameVarToRegMap = sharedCriticalVarToRegMap;
regMaskTP sameWriteRegs = RBM_NONE;
regMaskTP diffReadRegs = RBM_NONE;
// For each var that may require resolution, classify them as:
// - in the same register at the end of this block and at each target (no resolution needed)
// - in different registers at different targets (resolve separately):
// diffResolutionSet
// - in the same register at each target at which it's live, but different from the end of
// this block. We may be able to resolve these as if it is "join", but only if they do not
// write to any registers that are read by those in the diffResolutionSet:
// sameResolutionSet
VarSetOps::Iter outResolutionSetIter(compiler, outResolutionSet);
unsigned outResolutionSetVarIndex = 0;
while (outResolutionSetIter.NextElem(&outResolutionSetVarIndex))
{
regNumber fromReg = getVarReg(outVarToRegMap, outResolutionSetVarIndex);
bool maybeSameLivePaths = false;
bool liveOnlyAtSplitEdge = true;
regNumber sameToReg = REG_NA;
for (unsigned succIndex = 0; succIndex < succCount; succIndex++)
{
BasicBlock* succBlock = block->GetSucc(succIndex, compiler);
if (!VarSetOps::IsMember(compiler, succBlock->bbLiveIn, outResolutionSetVarIndex))
{
maybeSameLivePaths = true;
continue;
}
else if (liveOnlyAtSplitEdge)
{
// Is the var live only at those target blocks which are connected by a split edge to this block
liveOnlyAtSplitEdge = ((succBlock->bbPreds->flNext == nullptr) && (succBlock != compiler->fgFirstBB));
}
regNumber toReg = getVarReg(getInVarToRegMap(succBlock->bbNum), outResolutionSetVarIndex);
if (sameToReg == REG_NA)
{
sameToReg = toReg;
continue;
}
if (toReg == sameToReg)
{
continue;
}
sameToReg = REG_NA;
break;
}
// Check for the cases where we can't write to a register.
// We only need to check for these cases if sameToReg is an actual register (not REG_STK).
if (sameToReg != REG_NA && sameToReg != REG_STK)
{
// If there's a path on which this var isn't live, it may use the original value in sameToReg.
// In this case, sameToReg will be in the liveOutRegs of this block.
// Similarly, if sameToReg is in sameWriteRegs, it has already been used (i.e. for a lclVar that's
// live only at another target), and we can't copy another lclVar into that reg in this block.
regMaskTP sameToRegMask =
genRegMask(sameToReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType);
if (maybeSameLivePaths &&
(((sameToRegMask & liveOutRegs) != RBM_NONE) || ((sameToRegMask & sameWriteRegs) != RBM_NONE)))
{
sameToReg = REG_NA;
}
// If this register is busy because it is used by a switch table at the end of the block
// (or for Arm64, it is consumed by JCMP), we can't do the copy in this block since we can't
// insert it after the switch (or for Arm64, can't insert and overwrite the operand/source
// of operand of JCMP).
if ((sameToRegMask & consumedRegs) != RBM_NONE)
{
sameToReg = REG_NA;
}
#ifdef TARGET_ARM64
if (jcmpLocalVarDsc && (jcmpLocalVarDsc->lvVarIndex == outResolutionSetVarIndex))
{
sameToReg = REG_NA;
}
#endif
// If the var is live only at those blocks connected by a split edge and not live-in at some of the
// target blocks, we will resolve it the same way as if it were in diffResolutionSet and resolution
// will be deferred to the handling of split edges, which means copy will only be at those target(s).
//
// Another way to achieve similar resolution for vars live only at split edges is by removing them
// from consideration up-front but it requires that we traverse those edges anyway to account for
// the registers that must not be overwritten.
if (liveOnlyAtSplitEdge && maybeSameLivePaths)
{
sameToReg = REG_NA;
}
}
if (sameToReg == REG_NA)
{
VarSetOps::AddElemD(compiler, diffResolutionSet, outResolutionSetVarIndex);
if (fromReg != REG_STK)
{
diffReadRegs |= genRegMask(fromReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType);
}
}
else if (sameToReg != fromReg)
{
VarSetOps::AddElemD(compiler, sameResolutionSet, outResolutionSetVarIndex);
setVarReg(sameVarToRegMap, outResolutionSetVarIndex, sameToReg);
if (sameToReg != REG_STK)
{
sameWriteRegs |= genRegMask(sameToReg, getIntervalForLocalVar(outResolutionSetVarIndex)->registerType);
}
}
}
if (!VarSetOps::IsEmpty(compiler, sameResolutionSet))
{
if ((sameWriteRegs & diffReadRegs) != RBM_NONE)
{
// We cannot split the "same" and "diff" regs if the "same" set writes registers
// that must be read by the "diff" set. (Note that when these are done as a "batch"
// we carefully order them to ensure all the input regs are read before they are
// overwritten.)
VarSetOps::UnionD(compiler, diffResolutionSet, sameResolutionSet);
VarSetOps::ClearD(compiler, sameResolutionSet);
}
else
{
// For any vars in the sameResolutionSet, we can simply add the move at the end of "block".
resolveEdge(block, nullptr, ResolveSharedCritical, sameResolutionSet);
}
}
if (!VarSetOps::IsEmpty(compiler, diffResolutionSet))
{
for (unsigned succIndex = 0; succIndex < succCount; succIndex++)
{
BasicBlock* succBlock = block->GetSucc(succIndex, compiler);
// Any "diffResolutionSet" resolution for a block with no other predecessors will be handled later
// as split resolution.
if ((succBlock->bbPreds->flNext == nullptr) && (succBlock != compiler->fgFirstBB))
{
continue;
}
// Now collect the resolution set for just this edge, if any.
// Check only the vars in diffResolutionSet that are live-in to this successor.
VarToRegMap succInVarToRegMap = getInVarToRegMap(succBlock->bbNum);
VARSET_TP edgeResolutionSet(VarSetOps::Intersection(compiler, diffResolutionSet, succBlock->bbLiveIn));
VarSetOps::Iter iter(compiler, edgeResolutionSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
regNumber fromReg = getVarReg(outVarToRegMap, varIndex);
regNumber toReg = getVarReg(succInVarToRegMap, varIndex);
if (fromReg == toReg)
{
VarSetOps::RemoveElemD(compiler, edgeResolutionSet, varIndex);
}
}
if (!VarSetOps::IsEmpty(compiler, edgeResolutionSet))
{
// For EH vars, we can always safely load them from the stack into the target for this block,
// so if we have only EH vars, we'll do that instead of splitting the edge.
if ((compiler->compHndBBtabCount > 0) && VarSetOps::IsSubset(compiler, edgeResolutionSet, exceptVars))
{
GenTree* insertionPoint = LIR::AsRange(succBlock).FirstNode();
VarSetOps::Iter edgeSetIter(compiler, edgeResolutionSet);
unsigned edgeVarIndex = 0;
while (edgeSetIter.NextElem(&edgeVarIndex))
{
regNumber toReg = getVarReg(succInVarToRegMap, edgeVarIndex);
setVarReg(succInVarToRegMap, edgeVarIndex, REG_STK);
if (toReg != REG_STK)
{
Interval* interval = getIntervalForLocalVar(edgeVarIndex);
assert(interval->isWriteThru);
addResolution(succBlock, insertionPoint, interval, toReg, REG_STK);
JITDUMP(" (EHvar)\n");
}
}
}
else
{
resolveEdge(block, succBlock, ResolveCritical, edgeResolutionSet);
}
}
}
}
}
//------------------------------------------------------------------------
// resolveEdges: Perform resolution across basic block edges
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Notes:
// Traverse the basic blocks.
// - If this block has a single predecessor that is not the immediately
// preceding block, perform any needed 'split' resolution at the beginning of this block
// - Otherwise if this block has critical incoming edges, handle them.
// - If this block has a single successor that has multiple predecesors, perform any needed
// 'join' resolution at the end of this block.
// Note that a block may have both 'split' or 'critical' incoming edge(s) and 'join' outgoing
// edges.
void LinearScan::resolveEdges()
{
JITDUMP("RESOLVING EDGES\n");
// The resolutionCandidateVars set was initialized with all the lclVars that are live-in to
// any block. We now intersect that set with any lclVars that ever spilled or split.
// If there are no candidates for resoultion, simply return.
VarSetOps::IntersectionD(compiler, resolutionCandidateVars, splitOrSpilledVars);
if (VarSetOps::IsEmpty(compiler, resolutionCandidateVars))
{
return;
}
// Handle all the critical edges first.
// We will try to avoid resolution across critical edges in cases where all the critical-edge
// targets of a block have the same home. We will then split the edges only for the
// remaining mismatches. We visit the out-edges, as that allows us to share the moves that are
// common among all the targets.
if (hasCriticalEdges)
{
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
// This is a new block added during resolution - we don't need to visit these now.
continue;
}
if (blockInfo[block->bbNum].hasCriticalOutEdge)
{
handleOutgoingCriticalEdges(block);
}
}
}
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
// This is a new block added during resolution - we don't need to visit these now.
continue;
}
unsigned succCount = block->NumSucc(compiler);
BasicBlock* uniquePredBlock = block->GetUniquePred(compiler);
// First, if this block has a single predecessor,
// we may need resolution at the beginning of this block.
// This may be true even if it's the block we used for starting locations,
// if a variable was spilled.
VARSET_TP inResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveIn, resolutionCandidateVars));
if (!VarSetOps::IsEmpty(compiler, inResolutionSet))
{
if (uniquePredBlock != nullptr)
{
// We may have split edges during critical edge resolution, and in the process split
// a non-critical edge as well.
// It is unlikely that we would ever have more than one of these in sequence (indeed,
// I don't think it's possible), but there's no need to assume that it can't.
while (uniquePredBlock->bbNum > bbNumMaxBeforeResolution)
{
uniquePredBlock = uniquePredBlock->GetUniquePred(compiler);
noway_assert(uniquePredBlock != nullptr);
}
resolveEdge(uniquePredBlock, block, ResolveSplit, inResolutionSet);
}
}
// Finally, if this block has a single successor:
// - and that has at least one other predecessor (otherwise we will do the resolution at the
// top of the successor),
// - and that is not the target of a critical edge (otherwise we've already handled it)
// we may need resolution at the end of this block.
if (succCount == 1)
{
BasicBlock* succBlock = block->GetSucc(0, compiler);
if (succBlock->GetUniquePred(compiler) == nullptr)
{
VARSET_TP outResolutionSet(
VarSetOps::Intersection(compiler, succBlock->bbLiveIn, resolutionCandidateVars));
if (!VarSetOps::IsEmpty(compiler, outResolutionSet))
{
resolveEdge(block, succBlock, ResolveJoin, outResolutionSet);
}
}
}
}
// Now, fixup the mapping for any blocks that were adding for edge splitting.
// See the comment prior to the call to fgSplitEdge() in resolveEdge().
// Note that we could fold this loop in with the checking code below, but that
// would only improve the debug case, and would clutter up the code somewhat.
if (compiler->fgBBNumMax > bbNumMaxBeforeResolution)
{
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
// There may be multiple blocks inserted when we split. But we must always have exactly
// one path (i.e. all blocks must be single-successor and single-predecessor),
// and only one block along the path may be non-empty.
// Note that we may have a newly-inserted block that is empty, but which connects
// two non-resolution blocks. This happens when an edge is split that requires it.
BasicBlock* succBlock = block;
do
{
succBlock = succBlock->GetUniqueSucc();
noway_assert(succBlock != nullptr);
} while ((succBlock->bbNum > bbNumMaxBeforeResolution) && succBlock->isEmpty());
BasicBlock* predBlock = block;
do
{
predBlock = predBlock->GetUniquePred(compiler);
noway_assert(predBlock != nullptr);
} while ((predBlock->bbNum > bbNumMaxBeforeResolution) && predBlock->isEmpty());
unsigned succBBNum = succBlock->bbNum;
unsigned predBBNum = predBlock->bbNum;
if (block->isEmpty())
{
// For the case of the empty block, find the non-resolution block (succ or pred).
if (predBBNum > bbNumMaxBeforeResolution)
{
assert(succBBNum <= bbNumMaxBeforeResolution);
predBBNum = 0;
}
else
{
succBBNum = 0;
}
}
else
{
assert((succBBNum <= bbNumMaxBeforeResolution) && (predBBNum <= bbNumMaxBeforeResolution));
}
SplitEdgeInfo info = {predBBNum, succBBNum};
getSplitBBNumToTargetBBNumMap()->Set(block->bbNum, info);
// Set both the live-in and live-out to the live-in of the successor (by construction liveness
// doesn't change in a split block).
VarSetOps::Assign(compiler, block->bbLiveIn, succBlock->bbLiveIn);
VarSetOps::Assign(compiler, block->bbLiveOut, succBlock->bbLiveIn);
}
}
}
#ifdef DEBUG
// Make sure the varToRegMaps match up on all edges.
bool foundMismatch = false;
for (BasicBlock* const block : compiler->Blocks())
{
if (block->isEmpty() && block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
VarToRegMap toVarToRegMap = getInVarToRegMap(block->bbNum);
for (BasicBlock* const predBlock : block->PredBlocks())
{
VarToRegMap fromVarToRegMap = getOutVarToRegMap(predBlock->bbNum);
VarSetOps::Iter iter(compiler, block->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
regNumber fromReg = getVarReg(fromVarToRegMap, varIndex);
regNumber toReg = getVarReg(toVarToRegMap, varIndex);
if (fromReg != toReg)
{
Interval* interval = getIntervalForLocalVar(varIndex);
// The fromReg and toReg may not match for a write-thru interval where the toReg is
// REG_STK, since the stack value is always valid for that case (so no move is needed).
if (!interval->isWriteThru || (toReg != REG_STK))
{
if (!foundMismatch)
{
foundMismatch = true;
printf("Found mismatched var locations after resolution!\n");
}
printf(" V%02u: " FMT_BB " to " FMT_BB ": %s to %s\n", interval->varNum, predBlock->bbNum,
block->bbNum, getRegName(fromReg), getRegName(toReg));
}
}
}
}
}
assert(!foundMismatch);
#endif
JITDUMP("\n");
}
//------------------------------------------------------------------------
// resolveEdge: Perform the specified type of resolution between two blocks.
//
// Arguments:
// fromBlock - the block from which the edge originates
// toBlock - the block at which the edge terminates
// resolveType - the type of resolution to be performed
// liveSet - the set of tracked lclVar indices which may require resolution
//
// Return Value:
// None.
//
// Assumptions:
// The caller must have performed the analysis to determine the type of the edge.
//
// Notes:
// This method emits the correctly ordered moves necessary to place variables in the
// correct registers across a Split, Join or Critical edge.
// In order to avoid overwriting register values before they have been moved to their
// new home (register/stack), it first does the register-to-stack moves (to free those
// registers), then the register to register moves, ensuring that the target register
// is free before the move, and then finally the stack to register moves.
void LinearScan::resolveEdge(BasicBlock* fromBlock,
BasicBlock* toBlock,
ResolveType resolveType,
VARSET_VALARG_TP liveSet)
{
VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum);
VarToRegMap toVarToRegMap;
if (resolveType == ResolveSharedCritical)
{
toVarToRegMap = sharedCriticalVarToRegMap;
}
else
{
toVarToRegMap = getInVarToRegMap(toBlock->bbNum);
}
// The block to which we add the resolution moves depends on the resolveType
BasicBlock* block;
switch (resolveType)
{
case ResolveJoin:
case ResolveSharedCritical:
block = fromBlock;
break;
case ResolveSplit:
block = toBlock;
break;
case ResolveCritical:
// fgSplitEdge may add one or two BasicBlocks. It returns the block that splits
// the edge from 'fromBlock' and 'toBlock', but if it inserts that block right after
// a block with a fall-through it will have to create another block to handle that edge.
// These new blocks can be mapped to existing blocks in order to correctly handle
// the calls to recordVarLocationsAtStartOfBB() from codegen. That mapping is handled
// in resolveEdges(), after all the edge resolution has been done (by calling this
// method for each edge).
block = compiler->fgSplitEdge(fromBlock, toBlock);
// Split edges are counted against fromBlock.
INTRACK_STATS(updateLsraStat(STAT_SPLIT_EDGE, fromBlock->bbNum));
break;
default:
unreached();
break;
}
#ifndef TARGET_XARCH
// We record tempregs for beginning and end of each block.
// For amd64/x86 we only need a tempReg for float - we'll use xchg for int.
// TODO-Throughput: It would be better to determine the tempRegs on demand, but the code below
// modifies the varToRegMaps so we don't have all the correct registers at the time
// we need to get the tempReg.
regNumber tempRegInt =
(resolveType == ResolveSharedCritical) ? REG_NA : getTempRegForResolution(fromBlock, toBlock, TYP_INT);
#endif // !TARGET_XARCH
regNumber tempRegFlt = REG_NA;
#ifdef TARGET_ARM
regNumber tempRegDbl = REG_NA;
#endif
if ((compiler->compFloatingPointUsed) && (resolveType != ResolveSharedCritical))
{
#ifdef TARGET_ARM
// Try to reserve a double register for TYP_DOUBLE and use it for TYP_FLOAT too if available.
tempRegDbl = getTempRegForResolution(fromBlock, toBlock, TYP_DOUBLE);
if (tempRegDbl != REG_NA)
{
tempRegFlt = tempRegDbl;
}
else
#endif // TARGET_ARM
{
tempRegFlt = getTempRegForResolution(fromBlock, toBlock, TYP_FLOAT);
}
}
regMaskTP targetRegsToDo = RBM_NONE;
regMaskTP targetRegsReady = RBM_NONE;
regMaskTP targetRegsFromStack = RBM_NONE;
// The following arrays capture the location of the registers as they are moved:
// - location[reg] gives the current location of the var that was originally in 'reg'.
// (Note that a var may be moved more than once.)
// - source[reg] gives the original location of the var that needs to be moved to 'reg'.
// For example, if a var is in rax and needs to be moved to rsi, then we would start with:
// location[rax] == rax
// source[rsi] == rax -- this doesn't change
// Then, if for some reason we need to move it temporary to rbx, we would have:
// location[rax] == rbx
// Once we have completed the move, we will have:
// location[rax] == REG_NA
// This indicates that the var originally in rax is now in its target register.
regNumberSmall location[REG_COUNT];
C_ASSERT(sizeof(char) == sizeof(regNumberSmall)); // for memset to work
memset(location, REG_NA, REG_COUNT);
regNumberSmall source[REG_COUNT];
memset(source, REG_NA, REG_COUNT);
// What interval is this register associated with?
// (associated with incoming reg)
Interval* sourceIntervals[REG_COUNT];
memset(&sourceIntervals, 0, sizeof(sourceIntervals));
// Intervals for vars that need to be loaded from the stack
Interval* stackToRegIntervals[REG_COUNT];
memset(&stackToRegIntervals, 0, sizeof(stackToRegIntervals));
// Get the starting insertion point for the "to" resolution
GenTree* insertionPoint = nullptr;
if (resolveType == ResolveSplit || resolveType == ResolveCritical)
{
insertionPoint = LIR::AsRange(block).FirstNode();
}
// If this is an edge between EH regions, we may have "extra" live-out EH vars.
// If we are adding resolution at the end of the block, we need to create "virtual" moves
// for these so that their registers are freed and can be reused.
if ((resolveType == ResolveJoin) && (compiler->compHndBBtabCount > 0))
{
VARSET_TP extraLiveSet(VarSetOps::Diff(compiler, block->bbLiveOut, toBlock->bbLiveIn));
VarSetOps::IntersectionD(compiler, extraLiveSet, exceptVars);
VarSetOps::Iter iter(compiler, extraLiveSet);
unsigned extraVarIndex = 0;
while (iter.NextElem(&extraVarIndex))
{
Interval* interval = getIntervalForLocalVar(extraVarIndex);
assert(interval->isWriteThru);
regNumber fromReg = getVarReg(fromVarToRegMap, extraVarIndex);
if (fromReg != REG_STK)
{
addResolution(block, insertionPoint, interval, REG_STK, fromReg);
JITDUMP(" (EH DUMMY)\n");
setVarReg(fromVarToRegMap, extraVarIndex, REG_STK);
}
}
}
// First:
// - Perform all moves from reg to stack (no ordering needed on these)
// - For reg to reg moves, record the current location, associating their
// source location with the target register they need to go into
// - For stack to reg moves (done last, no ordering needed between them)
// record the interval associated with the target reg
// TODO-Throughput: We should be looping over the liveIn and liveOut registers, since
// that will scale better than the live variables
VarSetOps::Iter iter(compiler, liveSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
Interval* interval = getIntervalForLocalVar(varIndex);
regNumber fromReg = getVarReg(fromVarToRegMap, varIndex);
regNumber toReg = getVarReg(toVarToRegMap, varIndex);
if (fromReg == toReg)
{
continue;
}
if (interval->isWriteThru && (toReg == REG_STK))
{
// We don't actually move a writeThru var back to the stack, as its stack value is always valid.
// However, if this is a Join edge (i.e. the move is happening at the bottom of the block),
// and it is a "normal" flow edge, we will go ahead and generate a mov instruction, which will be
// a NOP but will cause the variable to be removed from being live in the register.
if ((resolveType == ResolveSplit) || block->hasEHBoundaryOut())
{
continue;
}
}
// For Critical edges, the location will not change on either side of the edge,
// since we'll add a new block to do the move.
if (resolveType == ResolveSplit)
{
setVarReg(toVarToRegMap, varIndex, fromReg);
}
else if (resolveType == ResolveJoin || resolveType == ResolveSharedCritical)
{
setVarReg(fromVarToRegMap, varIndex, toReg);
}
assert(fromReg < UCHAR_MAX && toReg < UCHAR_MAX);
if (fromReg == REG_STK)
{
stackToRegIntervals[toReg] = interval;
targetRegsFromStack |= genRegMask(toReg);
}
else if (toReg == REG_STK)
{
// Do the reg to stack moves now
addResolution(block, insertionPoint, interval, REG_STK, fromReg);
JITDUMP(" (%s)\n",
(interval->isWriteThru && (toReg == REG_STK)) ? "EH DUMMY" : resolveTypeName[resolveType]);
}
else
{
location[fromReg] = (regNumberSmall)fromReg;
source[toReg] = (regNumberSmall)fromReg;
sourceIntervals[fromReg] = interval;
targetRegsToDo |= genRegMask(toReg);
}
}
// REGISTER to REGISTER MOVES
// First, find all the ones that are ready to move now
regMaskTP targetCandidates = targetRegsToDo;
while (targetCandidates != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetCandidates);
targetCandidates &= ~targetRegMask;
regNumber targetReg = genRegNumFromMask(targetRegMask);
if (location[targetReg] == REG_NA)
{
#ifdef TARGET_ARM
regNumber sourceReg = (regNumber)source[targetReg];
Interval* interval = sourceIntervals[sourceReg];
if (interval->registerType == TYP_DOUBLE)
{
// For ARM32, make sure that both of the float halves of the double register are available.
assert(genIsValidDoubleReg(targetReg));
regNumber anotherHalfRegNum = REG_NEXT(targetReg);
if (location[anotherHalfRegNum] == REG_NA)
{
targetRegsReady |= targetRegMask;
}
}
else
#endif // TARGET_ARM
{
targetRegsReady |= targetRegMask;
}
}
}
// Perform reg to reg moves
while (targetRegsToDo != RBM_NONE)
{
while (targetRegsReady != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetRegsReady);
targetRegsToDo &= ~targetRegMask;
targetRegsReady &= ~targetRegMask;
regNumber targetReg = genRegNumFromMask(targetRegMask);
assert(location[targetReg] != targetReg);
assert(targetReg < REG_COUNT);
regNumber sourceReg = (regNumber)source[targetReg];
assert(sourceReg < REG_COUNT);
regNumber fromReg = (regNumber)location[sourceReg];
// stack to reg movs should be done last as part of "targetRegsFromStack"
assert(fromReg < REG_STK);
Interval* interval = sourceIntervals[sourceReg];
assert(interval != nullptr);
addResolution(block, insertionPoint, interval, targetReg, fromReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
sourceIntervals[sourceReg] = nullptr;
location[sourceReg] = REG_NA;
regMaskTP fromRegMask = genRegMask(fromReg);
// Do we have a free targetReg?
if (fromReg == sourceReg)
{
if (source[fromReg] != REG_NA && ((targetRegsFromStack & fromRegMask) != fromRegMask))
{
targetRegsReady |= fromRegMask;
#ifdef TARGET_ARM
if (genIsValidDoubleReg(fromReg))
{
// Ensure that either:
// - the Interval targeting fromReg is not double, or
// - the other half of the double is free.
Interval* otherInterval = sourceIntervals[source[fromReg]];
regNumber upperHalfReg = REG_NEXT(fromReg);
if ((otherInterval->registerType == TYP_DOUBLE) && (location[upperHalfReg] != REG_NA))
{
targetRegsReady &= ~fromRegMask;
}
}
}
else if (genIsValidFloatReg(fromReg) && !genIsValidDoubleReg(fromReg))
{
// We may have freed up the other half of a double where the lower half
// was already free.
regNumber lowerHalfReg = REG_PREV(fromReg);
regNumber lowerHalfSrcReg = (regNumber)source[lowerHalfReg];
regNumber lowerHalfSrcLoc = (regNumber)location[lowerHalfReg];
regMaskTP lowerHalfRegMask = genRegMask(lowerHalfReg);
// Necessary conditions:
// - There is a source register for this reg (lowerHalfSrcReg != REG_NA)
// - It is currently free (lowerHalfSrcLoc == REG_NA)
// - The source interval isn't yet completed (sourceIntervals[lowerHalfSrcReg] != nullptr)
// - It's not in the ready set ((targetRegsReady & lowerHalfRegMask) ==
// RBM_NONE)
// - It's not resolved from stack ((targetRegsFromStack & lowerHalfRegMask) !=
// lowerHalfRegMask)
if ((lowerHalfSrcReg != REG_NA) && (lowerHalfSrcLoc == REG_NA) &&
(sourceIntervals[lowerHalfSrcReg] != nullptr) &&
((targetRegsReady & lowerHalfRegMask) == RBM_NONE) &&
((targetRegsFromStack & lowerHalfRegMask) != lowerHalfRegMask))
{
// This must be a double interval, otherwise it would be in targetRegsReady, or already
// completed.
assert(sourceIntervals[lowerHalfSrcReg]->registerType == TYP_DOUBLE);
targetRegsReady |= lowerHalfRegMask;
}
#endif // TARGET_ARM
}
}
}
if (targetRegsToDo != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetRegsToDo);
regNumber targetReg = genRegNumFromMask(targetRegMask);
// Is it already there due to other moves?
// If not, move it to the temp reg, OR swap it with another register
regNumber sourceReg = (regNumber)source[targetReg];
regNumber fromReg = (regNumber)location[sourceReg];
if (targetReg == fromReg)
{
targetRegsToDo &= ~targetRegMask;
}
else
{
regNumber tempReg = REG_NA;
bool useSwap = false;
if (emitter::isFloatReg(targetReg))
{
#ifdef TARGET_ARM
if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE)
{
// ARM32 requires a double temp register for TYP_DOUBLE.
tempReg = tempRegDbl;
}
else
#endif // TARGET_ARM
tempReg = tempRegFlt;
}
#ifdef TARGET_XARCH
else
{
useSwap = true;
}
#else // !TARGET_XARCH
else
{
tempReg = tempRegInt;
}
#endif // !TARGET_XARCH
if (useSwap || tempReg == REG_NA)
{
// First, we have to figure out the destination register for what's currently in fromReg,
// so that we can find its sourceInterval.
regNumber otherTargetReg = REG_NA;
// By chance, is fromReg going where it belongs?
if (location[source[fromReg]] == targetReg)
{
otherTargetReg = fromReg;
// If we can swap, we will be done with otherTargetReg as well.
// Otherwise, we'll spill it to the stack and reload it later.
if (useSwap)
{
regMaskTP fromRegMask = genRegMask(fromReg);
targetRegsToDo &= ~fromRegMask;
}
}
else
{
// Look at the remaining registers from targetRegsToDo (which we expect to be relatively
// small at this point) to find out what's currently in targetReg.
regMaskTP mask = targetRegsToDo;
while (mask != RBM_NONE && otherTargetReg == REG_NA)
{
regMaskTP nextRegMask = genFindLowestBit(mask);
regNumber nextReg = genRegNumFromMask(nextRegMask);
mask &= ~nextRegMask;
if (location[source[nextReg]] == targetReg)
{
otherTargetReg = nextReg;
}
}
}
assert(otherTargetReg != REG_NA);
if (useSwap)
{
// Generate a "swap" of fromReg and targetReg
insertSwap(block, insertionPoint, sourceIntervals[source[otherTargetReg]]->varNum, targetReg,
sourceIntervals[sourceReg]->varNum, fromReg);
location[sourceReg] = REG_NA;
location[source[otherTargetReg]] = (regNumberSmall)fromReg;
INTRACK_STATS(updateLsraStat(STAT_RESOLUTION_MOV, block->bbNum));
}
else
{
// Spill "targetReg" to the stack and add its eventual target (otherTargetReg)
// to "targetRegsFromStack", which will be handled below.
// NOTE: This condition is very rare. Setting COMPlus_JitStressRegs=0x203
// has been known to trigger it in JIT SH.
// First, spill "otherInterval" from targetReg to the stack.
Interval* otherInterval = sourceIntervals[source[otherTargetReg]];
setIntervalAsSpilled(otherInterval);
addResolution(block, insertionPoint, otherInterval, REG_STK, targetReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[source[otherTargetReg]] = REG_STK;
regMaskTP otherTargetRegMask = genRegMask(otherTargetReg);
targetRegsFromStack |= otherTargetRegMask;
stackToRegIntervals[otherTargetReg] = otherInterval;
targetRegsToDo &= ~otherTargetRegMask;
// Now, move the interval that is going to targetReg.
addResolution(block, insertionPoint, sourceIntervals[sourceReg], targetReg, fromReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[sourceReg] = REG_NA;
// Add its "fromReg" to "targetRegsReady", only if:
// - It was one of the target register we originally determined.
// - It is not the eventual target (otherTargetReg) because its
// value will be retrieved from STK.
if (source[fromReg] != REG_NA && fromReg != otherTargetReg)
{
regMaskTP fromRegMask = genRegMask(fromReg);
targetRegsReady |= fromRegMask;
#ifdef TARGET_ARM
if (genIsValidDoubleReg(fromReg))
{
// Ensure that either:
// - the Interval targeting fromReg is not double, or
// - the other half of the double is free.
Interval* otherInterval = sourceIntervals[source[fromReg]];
regNumber upperHalfReg = REG_NEXT(fromReg);
if ((otherInterval->registerType == TYP_DOUBLE) && (location[upperHalfReg] != REG_NA))
{
targetRegsReady &= ~fromRegMask;
}
}
#endif // TARGET_ARM
}
}
targetRegsToDo &= ~targetRegMask;
}
else
{
compiler->codeGen->regSet.rsSetRegsModified(genRegMask(tempReg) DEBUGARG(true));
#ifdef TARGET_ARM
if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE)
{
assert(genIsValidDoubleReg(targetReg));
assert(genIsValidDoubleReg(tempReg));
addResolutionForDouble(block, insertionPoint, sourceIntervals, location, tempReg, targetReg,
resolveType);
}
else
#endif // TARGET_ARM
{
assert(sourceIntervals[targetReg] != nullptr);
addResolution(block, insertionPoint, sourceIntervals[targetReg], tempReg, targetReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
location[targetReg] = (regNumberSmall)tempReg;
}
targetRegsReady |= targetRegMask;
}
}
}
}
// Finally, perform stack to reg moves
// All the target regs will be empty at this point
while (targetRegsFromStack != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetRegsFromStack);
targetRegsFromStack &= ~targetRegMask;
regNumber targetReg = genRegNumFromMask(targetRegMask);
Interval* interval = stackToRegIntervals[targetReg];
assert(interval != nullptr);
addResolution(block, insertionPoint, interval, targetReg, REG_STK);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
}
}
#if TRACK_LSRA_STATS
const char* LinearScan::getStatName(unsigned stat)
{
LsraStat lsraStat = (LsraStat)stat;
assert(lsraStat != LsraStat::COUNT);
static const char* const lsraStatNames[] = {
#define LSRA_STAT_DEF(stat, name) name,
#include "lsra_stats.h"
#undef LSRA_STAT_DEF
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) #stat,
#include "lsra_score.h"
#undef REG_SEL_DEF
};
assert(stat < ArrLen(lsraStatNames));
return lsraStatNames[lsraStat];
}
LsraStat LinearScan::getLsraStatFromScore(RegisterScore registerScore)
{
switch (registerScore)
{
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) \
case RegisterScore::stat: \
return LsraStat::STAT_##stat;
#include "lsra_score.h"
#undef REG_SEL_DEF
default:
return LsraStat::STAT_FREE;
}
}
// ----------------------------------------------------------
// updateLsraStat: Increment LSRA stat counter.
//
// Arguments:
// stat - LSRA stat enum
// bbNum - Basic block to which LSRA stat needs to be
// associated with.
//
void LinearScan::updateLsraStat(LsraStat stat, unsigned bbNum)
{
if (bbNum > bbNumMaxBeforeResolution)
{
// This is a newly created basic block as part of resolution.
// These blocks contain resolution moves that are already accounted.
return;
}
++(blockInfo[bbNum].stats[(unsigned)stat]);
}
// -----------------------------------------------------------
// dumpLsraStats - dumps Lsra stats to given file.
//
// Arguments:
// file - file to which stats are to be written.
//
void LinearScan::dumpLsraStats(FILE* file)
{
unsigned sumStats[LsraStat::COUNT] = {0};
weight_t wtdStats[LsraStat::COUNT] = {0};
fprintf(file, "----------\n");
fprintf(file, "LSRA Stats");
#ifdef DEBUG
if (!VERBOSE)
{
fprintf(file, " : %s\n", compiler->info.compFullName);
}
else
{
// In verbose mode no need to print full name
// while printing lsra stats.
fprintf(file, "\n");
}
#else
fprintf(file, " : %s\n", compiler->eeGetMethodFullName(compiler->info.compCompHnd));
#endif
fprintf(file, "----------\n");
#ifdef DEBUG
fprintf(file, "Register selection order: %S\n",
JitConfig.JitLsraOrdering() == nullptr ? W("ABCDEFGHIJKLMNOPQ") : JitConfig.JitLsraOrdering());
#endif
fprintf(file, "Total Tracked Vars: %d\n", compiler->lvaTrackedCount);
fprintf(file, "Total Reg Cand Vars: %d\n", regCandidateVarCount);
fprintf(file, "Total number of Intervals: %d\n",
static_cast<unsigned>((intervals.size() == 0 ? 0 : (intervals.size() - 1))));
fprintf(file, "Total number of RefPositions: %d\n", static_cast<unsigned>(refPositions.size() - 1));
// compute total number of spill temps created
unsigned numSpillTemps = 0;
for (int i = 0; i < TYP_COUNT; i++)
{
numSpillTemps += maxSpill[i];
}
fprintf(file, "Total Number of spill temps created: %d\n", numSpillTemps);
fprintf(file, "..........\n");
bool addedBlockHeader = false;
bool anyNonZeroStat = false;
// Iterate for block 0
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
unsigned lsraStat = blockInfo[0].stats[statIndex];
if (lsraStat != 0)
{
if (!addedBlockHeader)
{
addedBlockHeader = true;
fprintf(file, FMT_BB " [%8.2f]: ", 0, blockInfo[0].weight);
fprintf(file, "%s = %d", getStatName(statIndex), lsraStat);
}
else
{
fprintf(file, ", %s = %d", getStatName(statIndex), lsraStat);
}
sumStats[statIndex] += lsraStat;
wtdStats[statIndex] += (lsraStat * blockInfo[0].weight);
anyNonZeroStat = true;
}
}
if (anyNonZeroStat)
{
fprintf(file, "\n");
}
// Iterate for remaining blocks
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
addedBlockHeader = false;
anyNonZeroStat = false;
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex];
if (lsraStat != 0)
{
if (!addedBlockHeader)
{
addedBlockHeader = true;
fprintf(file, FMT_BB " [%8.2f]: ", block->bbNum, block->bbWeight);
fprintf(file, "%s = %d", getStatName(statIndex), lsraStat);
}
else
{
fprintf(file, ", %s = %d", getStatName(statIndex), lsraStat);
}
sumStats[statIndex] += lsraStat;
wtdStats[statIndex] += (lsraStat * block->bbWeight);
anyNonZeroStat = true;
}
}
if (anyNonZeroStat)
{
fprintf(file, "\n");
}
}
fprintf(file, "..........\n");
for (int regSelectI = 0; regSelectI < LsraStat::COUNT; regSelectI++)
{
if (regSelectI == firstRegSelStat)
{
fprintf(file, "..........\n");
}
if ((regSelectI < firstRegSelStat) || (sumStats[regSelectI] != 0))
{
// Print register selection stats
if (regSelectI >= firstRegSelStat)
{
fprintf(file, "Total %s [#%2d] : %d Weighted: %f\n", getStatName(regSelectI),
(regSelectI - firstRegSelStat + 1), sumStats[regSelectI], wtdStats[regSelectI]);
}
else
{
fprintf(file, "Total %s : %d Weighted: %f\n", getStatName(regSelectI), sumStats[regSelectI],
wtdStats[regSelectI]);
}
}
}
printf("\n");
}
// -----------------------------------------------------------
// dumpLsraStatsCsvFormat - dumps Lsra stats to given file in csv format.
//
// Arguments:
// file - file to which stats are to be written.
//
void LinearScan::dumpLsraStatsCsv(FILE* file)
{
unsigned sumStats[LsraStat::COUNT] = {0};
// Write the header if the file is empty
if (ftell(file) == 0)
{
// header
fprintf(file, "\"Method Name\"");
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
fprintf(file, ",\"%s\"", LinearScan::getStatName(statIndex));
}
fprintf(file, ",\"PerfScore\"\n");
}
// bbNum == 0
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
sumStats[statIndex] += blockInfo[0].stats[statIndex];
}
// blocks
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
sumStats[statIndex] += blockInfo[block->bbNum].stats[statIndex];
}
}
fprintf(file, "\"%s\"", compiler->info.compFullName);
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
fprintf(file, ",%u", sumStats[statIndex]);
}
fprintf(file, ",%.2f\n", compiler->info.compPerfScore);
}
// -----------------------------------------------------------
// dumpLsraStatsSummary - dumps Lsra stats summary to given file
//
// Arguments:
// file - file to which stats are to be written.
//
void LinearScan::dumpLsraStatsSummary(FILE* file)
{
unsigned sumStats[LsraStat::STAT_FREE] = {0};
weight_t wtdStats[LsraStat::STAT_FREE] = {0.0};
// Iterate for block 0
for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++)
{
unsigned lsraStat = blockInfo[0].stats[statIndex];
sumStats[statIndex] += lsraStat;
wtdStats[statIndex] += (lsraStat * blockInfo[0].weight);
}
// Iterate for remaining blocks
for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++)
{
unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex];
sumStats[statIndex] += lsraStat;
wtdStats[statIndex] += (lsraStat * block->bbWeight);
}
}
for (int regSelectI = 0; regSelectI < LsraStat::STAT_FREE; regSelectI++)
{
fprintf(file, ", %s %u %sWt %f", getStatName(regSelectI), sumStats[regSelectI], getStatName(regSelectI),
wtdStats[regSelectI]);
}
}
#endif // TRACK_LSRA_STATS
#ifdef DEBUG
void dumpRegMask(regMaskTP regs)
{
if (regs == RBM_ALLINT)
{
printf("[allInt]");
}
else if (regs == (RBM_ALLINT & ~RBM_FPBASE))
{
printf("[allIntButFP]");
}
else if (regs == RBM_ALLFLOAT)
{
printf("[allFloat]");
}
else if (regs == RBM_ALLDOUBLE)
{
printf("[allDouble]");
}
else
{
dspRegMask(regs);
}
}
static const char* getRefTypeName(RefType refType)
{
switch (refType)
{
#define DEF_REFTYPE(memberName, memberValue, shortName) \
case memberName: \
return #memberName;
#include "lsra_reftypes.h"
#undef DEF_REFTYPE
default:
return nullptr;
}
}
static const char* getRefTypeShortName(RefType refType)
{
switch (refType)
{
#define DEF_REFTYPE(memberName, memberValue, shortName) \
case memberName: \
return shortName;
#include "lsra_reftypes.h"
#undef DEF_REFTYPE
default:
return nullptr;
}
}
//------------------------------------------------------------------------
// getScoreName: Returns the texual name of register score
const char* LinearScan::getScoreName(RegisterScore score)
{
switch (score)
{
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) \
case stat: \
return shortname;
#include "lsra_score.h"
#undef REG_SEL_DEF
default:
return " - ";
}
}
void RefPosition::dump(LinearScan* linearScan)
{
printf("<RefPosition #%-3u @%-3u", rpNum, nodeLocation);
printf(" %s ", getRefTypeName(refType));
if (this->IsPhysRegRef())
{
this->getReg()->tinyDump();
}
else if (getInterval())
{
this->getInterval()->tinyDump();
}
if (this->treeNode)
{
printf("%s", treeNode->OpName(treeNode->OperGet()));
if (this->treeNode->IsMultiRegNode())
{
printf("[%d]", this->multiRegIdx);
}
}
printf(" " FMT_BB " ", this->bbNum);
printf("regmask=");
dumpRegMask(registerAssignment);
printf(" minReg=%d", minRegCandidateCount);
if (this->lastUse)
{
printf(" last");
}
if (this->reload)
{
printf(" reload");
}
if (this->spillAfter)
{
printf(" spillAfter");
}
if (this->singleDefSpill)
{
printf(" singleDefSpill");
}
if (this->writeThru)
{
printf(" writeThru");
}
if (this->moveReg)
{
printf(" move");
}
if (this->copyReg)
{
printf(" copy");
}
if (this->isFixedRegRef)
{
printf(" fixed");
}
if (this->isLocalDefUse)
{
printf(" local");
}
if (this->delayRegFree)
{
printf(" delay");
}
if (this->outOfOrder)
{
printf(" outOfOrder");
}
if (this->RegOptional())
{
printf(" regOptional");
}
printf(" wt=%.2f", linearScan->getWeight(this));
printf(">\n");
}
void RegRecord::dump()
{
tinyDump();
}
void Interval::dump()
{
printf("Interval %2u:", intervalIndex);
if (isLocalVar)
{
printf(" (V%02u)", varNum);
}
else if (IsUpperVector())
{
assert(relatedInterval != nullptr);
printf(" (U%02u)", relatedInterval->varNum);
}
printf(" %s", varTypeName(registerType));
if (isInternal)
{
printf(" (INTERNAL)");
}
if (isSpilled)
{
printf(" (SPILLED)");
}
if (isSplit)
{
printf(" (SPLIT)");
}
if (isStructField)
{
printf(" (field)");
}
if (isPromotedStruct)
{
printf(" (promoted struct)");
}
if (hasConflictingDefUse)
{
printf(" (def-use conflict)");
}
if (hasInterferingUses)
{
printf(" (interfering uses)");
}
if (isSpecialPutArg)
{
printf(" (specialPutArg)");
}
if (isConstant)
{
printf(" (constant)");
}
if (isWriteThru)
{
printf(" (writeThru)");
}
printf(" RefPositions {");
for (RefPosition* refPosition = this->firstRefPosition; refPosition != nullptr;
refPosition = refPosition->nextRefPosition)
{
printf("#%u@%u", refPosition->rpNum, refPosition->nodeLocation);
if (refPosition->nextRefPosition)
{
printf(" ");
}
}
printf("}");
// this is not used (yet?)
// printf(" SpillOffset %d", this->spillOffset);
printf(" physReg:%s", getRegName(physReg));
printf(" Preferences=");
dumpRegMask(this->registerPreferences);
if (relatedInterval)
{
printf(" RelatedInterval ");
relatedInterval->microDump();
}
printf("\n");
}
// print out very concise representation
void Interval::tinyDump()
{
printf("<Ivl:%u", intervalIndex);
if (isLocalVar)
{
printf(" V%02u", varNum);
}
else if (IsUpperVector())
{
assert(relatedInterval != nullptr);
printf(" (U%02u)", relatedInterval->varNum);
}
else if (isInternal)
{
printf(" internal");
}
printf("> ");
}
// print out extremely concise representation
void Interval::microDump()
{
if (isLocalVar)
{
printf("<V%02u/L%u>", varNum, intervalIndex);
return;
}
else if (IsUpperVector())
{
assert(relatedInterval != nullptr);
printf(" (U%02u)", relatedInterval->varNum);
}
char intervalTypeChar = 'I';
if (isInternal)
{
intervalTypeChar = 'T';
}
printf("<%c%u>", intervalTypeChar, intervalIndex);
}
void RegRecord::tinyDump()
{
printf("<Reg:%-3s> ", getRegName(regNum));
}
void LinearScan::dumpDefList()
{
if (!VERBOSE)
{
return;
}
JITDUMP("DefList: { ");
bool first = true;
for (RefInfoListNode *listNode = defList.Begin(), *end = defList.End(); listNode != end;
listNode = listNode->Next())
{
GenTree* node = listNode->treeNode;
JITDUMP("%sN%03u.t%d. %s", first ? "" : "; ", node->gtSeqNum, node->gtTreeID, GenTree::OpName(node->OperGet()));
first = false;
}
JITDUMP(" }\n");
}
void LinearScan::lsraDumpIntervals(const char* msg)
{
printf("\nLinear scan intervals %s:\n", msg);
for (Interval& interval : intervals)
{
// only dump something if it has references
// if (interval->firstRefPosition)
interval.dump();
}
printf("\n");
}
// Dumps a tree node as a destination or source operand, with the style
// of dump dependent on the mode
void LinearScan::lsraGetOperandString(GenTree* tree,
LsraTupleDumpMode mode,
char* operandString,
unsigned operandStringLength)
{
const char* lastUseChar = "";
if (tree->OperIsScalarLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
lastUseChar = "*";
}
switch (mode)
{
case LinearScan::LSRA_DUMP_PRE:
case LinearScan::LSRA_DUMP_REFPOS:
_snprintf_s(operandString, operandStringLength, operandStringLength, "t%d%s", tree->gtTreeID, lastUseChar);
break;
case LinearScan::LSRA_DUMP_POST:
{
Compiler* compiler = JitTls::GetCompiler();
if (!tree->gtHasReg(compiler))
{
_snprintf_s(operandString, operandStringLength, operandStringLength, "STK%s", lastUseChar);
}
else
{
int charCount = _snprintf_s(operandString, operandStringLength, operandStringLength, "%s%s",
getRegName(tree->GetRegNum()), lastUseChar);
operandString += charCount;
operandStringLength -= charCount;
if (tree->IsMultiRegNode())
{
unsigned regCount = tree->GetMultiRegCount(compiler);
for (unsigned regIndex = 1; regIndex < regCount; regIndex++)
{
charCount = _snprintf_s(operandString, operandStringLength, operandStringLength, ",%s%s",
getRegName(tree->GetRegByIndex(regIndex)), lastUseChar);
operandString += charCount;
operandStringLength -= charCount;
}
}
}
}
break;
default:
printf("ERROR: INVALID TUPLE DUMP MODE\n");
break;
}
}
void LinearScan::lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest)
{
Compiler* compiler = JitTls::GetCompiler();
const unsigned operandStringLength = 6 * MAX_MULTIREG_COUNT + 1;
char operandString[operandStringLength];
const char* emptyDestOperand = " ";
char spillChar = ' ';
if (mode == LinearScan::LSRA_DUMP_POST)
{
if ((tree->gtFlags & GTF_SPILL) != 0)
{
spillChar = 'S';
}
if (!hasDest && tree->gtHasReg(compiler))
{
// A node can define a register, but not produce a value for a parent to consume,
// i.e. in the "localDefUse" case.
// There used to be an assert here that we wouldn't spill such a node.
// However, we can have unused lclVars that wind up being the node at which
// it is spilled. This probably indicates a bug, but we don't realy want to
// assert during a dump.
if (spillChar == 'S')
{
spillChar = '$';
}
else
{
spillChar = '*';
}
hasDest = true;
}
}
printf("%c N%03u. ", spillChar, tree->gtSeqNum);
LclVarDsc* varDsc = nullptr;
unsigned varNum = UINT_MAX;
if (tree->IsLocal())
{
varNum = tree->AsLclVarCommon()->GetLclNum();
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvLRACandidate)
{
hasDest = false;
}
}
if (hasDest)
{
if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED)
{
assert(tree->gtHasReg(compiler));
}
lsraGetOperandString(tree, mode, operandString, operandStringLength);
printf("%-15s =", operandString);
}
else
{
printf("%-15s ", emptyDestOperand);
}
if (varDsc != nullptr)
{
if (varDsc->lvLRACandidate)
{
if (mode == LSRA_DUMP_REFPOS)
{
printf(" V%02u(L%d)", varNum, getIntervalForLocalVar(varDsc->lvVarIndex)->intervalIndex);
}
else
{
lsraGetOperandString(tree, mode, operandString, operandStringLength);
printf(" V%02u(%s)", varNum, operandString);
if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED)
{
printf("R");
}
}
}
else
{
printf(" V%02u MEM", varNum);
}
}
else if (tree->OperIs(GT_ASG))
{
assert(!tree->gtHasReg(compiler));
printf(" asg%s ", GenTree::OpName(tree->OperGet()));
}
else
{
compiler->gtDispNodeName(tree);
if (tree->OperKind() & GTK_LEAF)
{
compiler->gtDispLeaf(tree, nullptr);
}
}
}
//------------------------------------------------------------------------
// DumpOperandDefs: dumps the registers defined by a node.
//
// Arguments:
// operand - The operand for which to compute a register count.
//
// Returns:
// The number of registers defined by `operand`.
//
void LinearScan::DumpOperandDefs(
GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength)
{
assert(operand != nullptr);
assert(operandString != nullptr);
if (operand->OperIs(GT_ARGPLACE))
{
return;
}
int dstCount = ComputeOperandDstCount(operand);
if (dstCount != 0)
{
// This operand directly produces registers; print it.
if (!first)
{
printf(",");
}
lsraGetOperandString(operand, mode, operandString, operandStringLength);
printf("%s", operandString);
first = false;
}
else if (operand->isContained())
{
// This is a contained node. Dump the defs produced by its operands.
for (GenTree* op : operand->Operands())
{
DumpOperandDefs(op, first, mode, operandString, operandStringLength);
}
}
}
void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
{
BasicBlock* block;
LsraLocation currentLoc = 1; // 0 is the entry
const unsigned operandStringLength = 6 * MAX_MULTIREG_COUNT + 1;
char operandString[operandStringLength];
// currentRefPosition is not used for LSRA_DUMP_PRE
// We keep separate iterators for defs, so that we can print them
// on the lhs of the dump
RefPositionIterator refPosIterator = refPositions.begin();
RefPosition* currentRefPosition = &refPosIterator;
switch (mode)
{
case LSRA_DUMP_PRE:
printf("TUPLE STYLE DUMP BEFORE LSRA\n");
break;
case LSRA_DUMP_REFPOS:
printf("TUPLE STYLE DUMP WITH REF POSITIONS\n");
break;
case LSRA_DUMP_POST:
printf("TUPLE STYLE DUMP WITH REGISTER ASSIGNMENTS\n");
break;
default:
printf("ERROR: INVALID TUPLE DUMP MODE\n");
return;
}
if (mode != LSRA_DUMP_PRE)
{
printf("Incoming Parameters: ");
for (; refPosIterator != refPositions.end() && currentRefPosition->refType != RefTypeBB;
++refPosIterator, currentRefPosition = &refPosIterator)
{
Interval* interval = currentRefPosition->getInterval();
assert(interval != nullptr && interval->isLocalVar);
printf(" V%02d", interval->varNum);
if (mode == LSRA_DUMP_POST)
{
regNumber reg;
if (currentRefPosition->registerAssignment == RBM_NONE)
{
reg = REG_STK;
}
else
{
reg = currentRefPosition->assignedReg();
}
const LclVarDsc* varDsc = compiler->lvaGetDesc(interval->varNum);
printf("(");
regNumber assignedReg = varDsc->GetRegNum();
regNumber argReg = (varDsc->lvIsRegArg) ? varDsc->GetArgReg() : REG_STK;
assert(reg == assignedReg || varDsc->lvRegister == false);
if (reg != argReg)
{
printf(getRegName(argReg));
printf("=>");
}
printf("%s)", getRegName(reg));
}
}
printf("\n");
}
for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
currentLoc += 2;
if (mode == LSRA_DUMP_REFPOS)
{
bool printedBlockHeader = false;
// We should find the boundary RefPositions in the order of exposed uses, dummy defs, and the blocks
for (; refPosIterator != refPositions.end() &&
(currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef ||
(currentRefPosition->refType == RefTypeBB && !printedBlockHeader));
++refPosIterator, currentRefPosition = &refPosIterator)
{
Interval* interval = nullptr;
if (currentRefPosition->isIntervalRef())
{
interval = currentRefPosition->getInterval();
}
switch (currentRefPosition->refType)
{
case RefTypeExpUse:
assert(interval != nullptr);
assert(interval->isLocalVar);
printf(" Exposed use of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum);
break;
case RefTypeDummyDef:
assert(interval != nullptr);
assert(interval->isLocalVar);
printf(" Dummy def of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum);
break;
case RefTypeBB:
block->dspBlockHeader(compiler);
printedBlockHeader = true;
printf("=====\n");
break;
default:
printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum);
break;
}
}
}
else
{
block->dspBlockHeader(compiler);
printf("=====\n");
}
if (enregisterLocalVars && mode == LSRA_DUMP_POST && block != compiler->fgFirstBB &&
block->bbNum <= bbNumMaxBeforeResolution)
{
printf("Predecessor for variable locations: " FMT_BB "\n", blockInfo[block->bbNum].predBBNum);
dumpInVarToRegMap(block);
}
if (block->bbNum > bbNumMaxBeforeResolution)
{
SplitEdgeInfo splitEdgeInfo;
splitBBNumToTargetBBNumMap->Lookup(block->bbNum, &splitEdgeInfo);
assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution);
assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution);
printf("New block introduced for resolution from " FMT_BB " to " FMT_BB "\n", splitEdgeInfo.fromBBNum,
splitEdgeInfo.toBBNum);
}
for (GenTree* node : LIR::AsRange(block))
{
GenTree* tree = node;
int produce = tree->IsValue() ? ComputeOperandDstCount(tree) : 0;
int consume = ComputeAvailableSrcCount(tree);
lsraDispNode(tree, mode, produce != 0 && mode != LSRA_DUMP_REFPOS);
if (mode != LSRA_DUMP_REFPOS)
{
if (consume > 0)
{
printf("; ");
bool first = true;
for (GenTree* operand : tree->Operands())
{
DumpOperandDefs(operand, first, mode, operandString, operandStringLength);
}
}
}
else
{
// Print each RefPosition on a new line, but
// printing all the kills for each node on a single line
// and combining the fixed regs with their associated def or use
bool killPrinted = false;
RefPosition* lastFixedRegRefPos = nullptr;
for (; refPosIterator != refPositions.end() &&
(currentRefPosition->refType == RefTypeUse || currentRefPosition->refType == RefTypeFixedReg ||
currentRefPosition->refType == RefTypeKill || currentRefPosition->refType == RefTypeDef) &&
(currentRefPosition->nodeLocation == tree->gtSeqNum ||
currentRefPosition->nodeLocation == tree->gtSeqNum + 1);
++refPosIterator, currentRefPosition = &refPosIterator)
{
Interval* interval = nullptr;
if (currentRefPosition->isIntervalRef())
{
interval = currentRefPosition->getInterval();
}
switch (currentRefPosition->refType)
{
case RefTypeUse:
if (currentRefPosition->IsPhysRegRef())
{
printf("\n Use:R%d(#%d)",
currentRefPosition->getReg()->regNum, currentRefPosition->rpNum);
}
else
{
assert(interval != nullptr);
printf("\n Use:");
interval->microDump();
printf("(#%d)", currentRefPosition->rpNum);
if (currentRefPosition->isFixedRegRef && !interval->isInternal)
{
assert(genMaxOneBit(currentRefPosition->registerAssignment));
assert(lastFixedRegRefPos != nullptr);
printf(" Fixed:%s(#%d)", getRegName(currentRefPosition->assignedReg()),
lastFixedRegRefPos->rpNum);
lastFixedRegRefPos = nullptr;
}
if (currentRefPosition->isLocalDefUse)
{
printf(" LocalDefUse");
}
if (currentRefPosition->lastUse)
{
printf(" *");
}
}
break;
case RefTypeDef:
{
// Print each def on a new line
assert(interval != nullptr);
printf("\n Def:");
interval->microDump();
printf("(#%d)", currentRefPosition->rpNum);
if (currentRefPosition->isFixedRegRef)
{
assert(genMaxOneBit(currentRefPosition->registerAssignment));
printf(" %s", getRegName(currentRefPosition->assignedReg()));
}
if (currentRefPosition->isLocalDefUse)
{
printf(" LocalDefUse");
}
if (currentRefPosition->lastUse)
{
printf(" *");
}
if (interval->relatedInterval != nullptr)
{
printf(" Pref:");
interval->relatedInterval->microDump();
}
}
break;
case RefTypeKill:
if (!killPrinted)
{
printf("\n Kill: ");
killPrinted = true;
}
printf(getRegName(currentRefPosition->assignedReg()));
printf(" ");
break;
case RefTypeFixedReg:
lastFixedRegRefPos = currentRefPosition;
break;
default:
printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum);
break;
}
}
}
printf("\n");
}
if (enregisterLocalVars && mode == LSRA_DUMP_POST)
{
dumpOutVarToRegMap(block);
}
printf("\n");
}
printf("\n\n");
}
void LinearScan::dumpLsraAllocationEvent(
LsraDumpEvent event, Interval* interval, regNumber reg, BasicBlock* currentBlock, RegisterScore registerScore)
{
if (!(VERBOSE))
{
return;
}
if ((interval != nullptr) && (reg != REG_NA) && (reg != REG_STK))
{
registersToDump |= getRegMask(reg, interval->registerType);
dumpRegRecordTitleIfNeeded();
}
switch (event)
{
// Conflicting def/use
case LSRA_EVENT_DEFUSE_CONFLICT:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("DUconflict ");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE1:
printf(indentFormat, " Case #1 use defRegAssignment");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE2:
printf(indentFormat, " Case #2 use useRegAssignment");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE3:
printf(indentFormat, " Case #3 use useRegAssignment");
dumpRegRecords();
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE4:
printf(indentFormat, " Case #4 use defRegAssignment");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE5:
printf(indentFormat, " Case #5 set def to all regs");
dumpRegRecords();
break;
case LSRA_EVENT_DEFUSE_CASE6:
printf(indentFormat, " Case #6 need a copy");
dumpRegRecords();
if (interval == nullptr)
{
printf(indentFormat, " NULL interval");
dumpRegRecords();
}
else if (interval->firstRefPosition->multiRegIdx != 0)
{
printf(indentFormat, " (multiReg)");
dumpRegRecords();
}
break;
case LSRA_EVENT_SPILL:
dumpRefPositionShort(activeRefPosition, currentBlock);
assert(interval != nullptr && interval->assignedReg != nullptr);
printf("Spill %-4s ", getRegName(interval->assignedReg->regNum));
dumpRegRecords();
break;
// Restoring the previous register
case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL:
case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL:
assert(interval != nullptr);
if ((activeRefPosition == nullptr) || (activeRefPosition->refType == RefTypeBB))
{
printf(emptyRefPositionFormat, "");
}
else
{
dumpRefPositionShort(activeRefPosition, currentBlock);
}
printf((event == LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL) ? "Restr %-4s " : "SRstr %-4s ",
getRegName(reg));
dumpRegRecords();
break;
case LSRA_EVENT_DONE_KILL_GC_REFS:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("Done ");
break;
case LSRA_EVENT_NO_GC_KILLS:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("None ");
break;
// Block boundaries
case LSRA_EVENT_START_BB:
// The RefTypeBB comes after the RefTypeDummyDefs associated with that block,
// so we may have a RefTypeDummyDef at the time we dump this event.
// In that case we'll have another "EVENT" associated with it, so we need to
// print the full line now.
if (activeRefPosition->refType != RefTypeBB)
{
dumpNewBlock(currentBlock, activeRefPosition->nodeLocation);
dumpRegRecords();
}
else
{
dumpRefPositionShort(activeRefPosition, currentBlock);
}
break;
// Allocation decisions
case LSRA_EVENT_NEEDS_NEW_REG:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("Free %-4s ", getRegName(reg));
dumpRegRecords();
break;
case LSRA_EVENT_ZERO_REF:
assert(interval != nullptr && interval->isLocalVar);
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("NoRef ");
dumpRegRecords();
break;
case LSRA_EVENT_FIXED_REG:
case LSRA_EVENT_EXP_USE:
case LSRA_EVENT_KEPT_ALLOCATION:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("Keep %-4s ", getRegName(reg));
break;
case LSRA_EVENT_COPY_REG:
assert(interval != nullptr && interval->recentRefPosition != nullptr);
dumpRefPositionShort(activeRefPosition, currentBlock);
if (allocationPassComplete || (registerScore == 0))
{
printf("Copy %-4s ", getRegName(reg));
}
else
{
printf("%-5s(C) %-4s ", getScoreName(registerScore), getRegName(reg));
}
break;
case LSRA_EVENT_MOVE_REG:
assert(interval != nullptr && interval->recentRefPosition != nullptr);
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("Move %-4s ", getRegName(reg));
dumpRegRecords();
break;
case LSRA_EVENT_ALLOC_REG:
dumpRefPositionShort(activeRefPosition, currentBlock);
if (allocationPassComplete || (registerScore == 0))
{
printf("Alloc %-4s ", getRegName(reg));
}
else
{
printf("%-5s(A) %-4s ", getScoreName(registerScore), getRegName(reg));
}
break;
case LSRA_EVENT_REUSE_REG:
dumpRefPositionShort(activeRefPosition, currentBlock);
if (allocationPassComplete || (registerScore == 0))
{
printf("Reuse %-4s ", getRegName(reg));
}
else
{
printf("%-5s(A) %-4s ", getScoreName(registerScore), getRegName(reg));
}
break;
case LSRA_EVENT_NO_ENTRY_REG_ALLOCATED:
assert(interval != nullptr && interval->isLocalVar);
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("LoRef ");
break;
case LSRA_EVENT_NO_REG_ALLOCATED:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("NoReg ");
break;
case LSRA_EVENT_RELOAD:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("ReLod %-4s ", getRegName(reg));
dumpRegRecords();
break;
case LSRA_EVENT_SPECIAL_PUTARG:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("PtArg %-4s ", getRegName(reg));
break;
case LSRA_EVENT_UPPER_VECTOR_SAVE:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("UVSav %-4s ", getRegName(reg));
break;
case LSRA_EVENT_UPPER_VECTOR_RESTORE:
dumpRefPositionShort(activeRefPosition, currentBlock);
printf("UVRes %-4s ", getRegName(reg));
break;
// We currently don't dump anything for these events.
case LSRA_EVENT_DEFUSE_FIXED_DELAY_USE:
case LSRA_EVENT_SPILL_EXTENDED_LIFETIME:
case LSRA_EVENT_END_BB:
case LSRA_EVENT_FREE_REGS:
case LSRA_EVENT_INCREMENT_RANGE_END:
case LSRA_EVENT_LAST_USE:
case LSRA_EVENT_LAST_USE_DELAYED:
break;
default:
printf("????? %-4s ", getRegName(reg));
dumpRegRecords();
break;
}
}
//------------------------------------------------------------------------
// dumpRegRecordHeader: Dump the header for a column-based dump of the register state.
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Assumptions:
// Reg names fit in 4 characters (minimum width of the columns)
//
// Notes:
// In order to make the table as dense as possible (for ease of reading the dumps),
// we determine the minimum regColumnWidth width required to represent:
// regs, by name (e.g. eax or xmm0) - this is fixed at 4 characters.
// intervals, as Vnn for lclVar intervals, or as I<num> for other intervals.
// The table is indented by the amount needed for dumpRefPositionShort, which is
// captured in shortRefPositionDumpWidth.
//
void LinearScan::dumpRegRecordHeader()
{
printf("The following table has one or more rows for each RefPosition that is handled during allocation.\n"
"The first column provides the basic information about the RefPosition, with its type (e.g. Def,\n"
"Use, Fixd) followed by a '*' if it is a last use, and a 'D' if it is delayRegFree, and then the\n"
"action taken during allocation (e.g. Alloc a new register, or Keep an existing one).\n"
"The subsequent columns show the Interval occupying each register, if any, followed by 'a' if it is\n"
"active, a 'p' if it is a large vector that has been partially spilled, and 'i'if it is inactive.\n"
"Columns are only printed up to the last modifed register, which may increase during allocation,\n"
"in which case additional columns will appear. \n"
"Registers which are not marked modified have ---- in their column.\n\n");
// First, determine the width of each register column (which holds a reg name in the
// header, and an interval name in each subsequent row).
int intervalNumberWidth = (int)log10((double)intervals.size()) + 1;
// The regColumnWidth includes the identifying character (I or V) and an 'i', 'p' or 'a' (inactive,
// partially-spilled or active)
regColumnWidth = intervalNumberWidth + 2;
if (regColumnWidth < 4)
{
regColumnWidth = 4;
}
sprintf_s(intervalNameFormat, MAX_FORMAT_CHARS, "%%c%%-%dd", regColumnWidth - 2);
sprintf_s(regNameFormat, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth);
// Next, determine the width of the short RefPosition (see dumpRefPositionShort()).
// This is in the form:
// nnn.#mmm NAME TYPEld
// Where:
// nnn is the Location, right-justified to the width needed for the highest location.
// mmm is the RefPosition rpNum, left-justified to the width needed for the highest rpNum.
// NAME is dumped by dumpReferentName(), and is "regColumnWidth".
// TYPE is RefTypeNameShort, and is 4 characters
// l is either '*' (if a last use) or ' ' (otherwise)
// d is either 'D' (if a delayed use) or ' ' (otherwise)
maxNodeLocation = (maxNodeLocation == 0)
? 1
: maxNodeLocation; // corner case of a method with an infinite loop without any gentree nodes
assert(maxNodeLocation >= 1);
assert(refPositions.size() >= 1);
int nodeLocationWidth = (int)log10((double)maxNodeLocation) + 1;
int refPositionWidth = (int)log10((double)refPositions.size()) + 1;
int refTypeInfoWidth = 4 /*TYPE*/ + 2 /* last-use and delayed */ + 1 /* space */;
int locationAndRPNumWidth = nodeLocationWidth + 2 /* .# */ + refPositionWidth + 1 /* space */;
int shortRefPositionDumpWidth = locationAndRPNumWidth + regColumnWidth + 1 /* space */ + refTypeInfoWidth;
sprintf_s(shortRefPositionFormat, MAX_FORMAT_CHARS, "%%%dd.#%%-%dd ", nodeLocationWidth, refPositionWidth);
sprintf_s(emptyRefPositionFormat, MAX_FORMAT_CHARS, "%%-%ds", shortRefPositionDumpWidth);
// The width of the "allocation info"
// - a 8-character allocation decision
// - a space
// - a 4-character register
// - a space
int allocationInfoWidth = 8 + 1 + 4 + 1;
// Next, determine the width of the legend for each row. This includes:
// - a short RefPosition dump (shortRefPositionDumpWidth), which includes a space
// - the allocation info (allocationInfoWidth), which also includes a space
regTableIndent = shortRefPositionDumpWidth + allocationInfoWidth;
// BBnn printed left-justified in the NAME Typeld and allocationInfo space.
int bbNumWidth = (int)log10((double)compiler->fgBBNumMax) + 1;
// In the unlikely event that BB numbers overflow the space, we'll simply omit the predBB
int predBBNumDumpSpace = regTableIndent - locationAndRPNumWidth - bbNumWidth - 9; // 'BB' + ' PredBB'
if (predBBNumDumpSpace < bbNumWidth)
{
sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd", shortRefPositionDumpWidth - 2);
}
else
{
sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd PredBB%%-%dd", bbNumWidth, predBBNumDumpSpace);
}
if (compiler->shouldDumpASCIITrees())
{
columnSeparator = "|";
line = "-";
leftBox = "+";
middleBox = "+";
rightBox = "+";
}
else
{
columnSeparator = "\xe2\x94\x82";
line = "\xe2\x94\x80";
leftBox = "\xe2\x94\x9c";
middleBox = "\xe2\x94\xbc";
rightBox = "\xe2\x94\xa4";
}
sprintf_s(indentFormat, MAX_FORMAT_CHARS, "%%-%ds", regTableIndent);
// Now, set up the legend format for the RefPosition info
sprintf_s(legendFormat, MAX_LEGEND_FORMAT_CHARS, "%%-%d.%ds%%-%d.%ds%%-%ds%%s", nodeLocationWidth + 1,
nodeLocationWidth + 1, refPositionWidth + 2, refPositionWidth + 2, regColumnWidth + 1);
// Print a "title row" including the legend and the reg names.
lastDumpedRegisters = RBM_NONE;
dumpRegRecordTitleIfNeeded();
}
void LinearScan::dumpRegRecordTitleIfNeeded()
{
if ((lastDumpedRegisters != registersToDump) || (rowCountSinceLastTitle > MAX_ROWS_BETWEEN_TITLES))
{
lastUsedRegNumIndex = 0;
int lastRegNumIndex = compiler->compFloatingPointUsed ? REG_FP_LAST : REG_INT_LAST;
for (int regNumIndex = 0; regNumIndex <= lastRegNumIndex; regNumIndex++)
{
if ((registersToDump & genRegMask((regNumber)regNumIndex)) != 0)
{
lastUsedRegNumIndex = regNumIndex;
}
}
dumpRegRecordTitle();
lastDumpedRegisters = registersToDump;
}
}
void LinearScan::dumpRegRecordTitleLines()
{
for (int i = 0; i < regTableIndent; i++)
{
printf("%s", line);
}
for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
{
regNumber regNum = (regNumber)regNumIndex;
if (shouldDumpReg(regNum))
{
printf("%s", middleBox);
for (int i = 0; i < regColumnWidth; i++)
{
printf("%s", line);
}
}
}
printf("%s\n", rightBox);
}
void LinearScan::dumpRegRecordTitle()
{
dumpRegRecordTitleLines();
// Print out the legend for the RefPosition info
printf(legendFormat, "Loc ", "RP# ", "Name ", "Type Action Reg ");
// Print out the register name column headers
char columnFormatArray[MAX_FORMAT_CHARS];
sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%s%%-%d.%ds", columnSeparator, regColumnWidth, regColumnWidth);
for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
{
regNumber regNum = (regNumber)regNumIndex;
if (shouldDumpReg(regNum))
{
const char* regName = getRegName(regNum);
printf(columnFormatArray, regName);
}
}
printf("%s\n", columnSeparator);
rowCountSinceLastTitle = 0;
dumpRegRecordTitleLines();
}
void LinearScan::dumpRegRecords()
{
static char columnFormatArray[18];
for (regNumber regNum = REG_FIRST; regNum <= (regNumber)lastUsedRegNumIndex; regNum = REG_NEXT(regNum))
{
if (shouldDumpReg(regNum))
{
printf("%s", columnSeparator);
RegRecord& regRecord = physRegs[regNum];
Interval* interval = regRecord.assignedInterval;
if (interval != nullptr)
{
dumpIntervalName(interval);
char activeChar = interval->isActive ? 'a' : 'i';
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (interval->isPartiallySpilled)
{
activeChar = 'p';
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
printf("%c", activeChar);
}
else if ((genRegMask(regNum) & regsBusyUntilKill) != RBM_NONE)
{
printf(columnFormatArray, "Busy");
}
else
{
sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth);
printf(columnFormatArray, "");
}
}
}
printf("%s\n", columnSeparator);
rowCountSinceLastTitle++;
}
void LinearScan::dumpIntervalName(Interval* interval)
{
if (interval->isLocalVar)
{
printf(intervalNameFormat, 'V', interval->varNum);
}
else if (interval->IsUpperVector())
{
printf(intervalNameFormat, 'U', interval->relatedInterval->varNum);
}
else if (interval->isConstant)
{
printf(intervalNameFormat, 'C', interval->intervalIndex);
}
else
{
printf(intervalNameFormat, 'I', interval->intervalIndex);
}
}
void LinearScan::dumpEmptyRefPosition()
{
printf(emptyRefPositionFormat, "");
}
//------------------------------------------------------------------------
// dumpNewBlock: Dump a line for a new block in a column-based dump of the register state.
//
// Arguments:
// currentBlock - the new block to be dumped
//
void LinearScan::dumpNewBlock(BasicBlock* currentBlock, LsraLocation location)
{
if (!VERBOSE)
{
return;
}
// Always print a title row before a RefTypeBB (except for the first, because we
// will already have printed it before the parameters)
if ((currentBlock != compiler->fgFirstBB) && (currentBlock != nullptr))
{
dumpRegRecordTitle();
}
// If the activeRefPosition is a DummyDef, then don't print anything further (printing the
// title line makes it clearer that we're "about to" start the next block).
if (activeRefPosition->refType == RefTypeDummyDef)
{
dumpEmptyRefPosition();
printf("DDefs ");
printf(regNameFormat, "");
return;
}
printf(shortRefPositionFormat, location, activeRefPosition->rpNum);
if (currentBlock == nullptr)
{
printf(regNameFormat, "END");
printf(" ");
printf(regNameFormat, "");
}
else
{
printf(bbRefPosFormat, currentBlock->bbNum,
currentBlock == compiler->fgFirstBB ? 0 : blockInfo[currentBlock->bbNum].predBBNum);
}
}
// Note that the size of this dump is computed in dumpRegRecordHeader().
//
void LinearScan::dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock)
{
static RefPosition* lastPrintedRefPosition = nullptr;
if (refPosition == lastPrintedRefPosition)
{
dumpEmptyRefPosition();
return;
}
lastPrintedRefPosition = refPosition;
if (refPosition->refType == RefTypeBB)
{
dumpNewBlock(currentBlock, refPosition->nodeLocation);
return;
}
printf(shortRefPositionFormat, refPosition->nodeLocation, refPosition->rpNum);
if (refPosition->isIntervalRef())
{
Interval* interval = refPosition->getInterval();
dumpIntervalName(interval);
char lastUseChar = ' ';
char delayChar = ' ';
if (refPosition->lastUse)
{
lastUseChar = '*';
if (refPosition->delayRegFree)
{
delayChar = 'D';
}
}
printf(" %s%c%c ", getRefTypeShortName(refPosition->refType), lastUseChar, delayChar);
}
else if (refPosition->IsPhysRegRef())
{
RegRecord* regRecord = refPosition->getReg();
printf(regNameFormat, getRegName(regRecord->regNum));
printf(" %s ", getRefTypeShortName(refPosition->refType));
}
else
{
assert(refPosition->refType == RefTypeKillGCRefs);
// There's no interval or reg name associated with this.
printf(regNameFormat, " ");
printf(" %s ", getRefTypeShortName(refPosition->refType));
}
}
//------------------------------------------------------------------------
// LinearScan::IsResolutionMove:
// Returns true if the given node is a move inserted by LSRA
// resolution.
//
// Arguments:
// node - the node to check.
//
bool LinearScan::IsResolutionMove(GenTree* node)
{
if (!IsLsraAdded(node))
{
return false;
}
switch (node->OperGet())
{
case GT_LCL_VAR:
case GT_COPY:
return node->IsUnusedValue();
case GT_SWAP:
return true;
default:
return false;
}
}
//------------------------------------------------------------------------
// LinearScan::IsResolutionNode:
// Returns true if the given node is either a move inserted by LSRA
// resolution or an operand to such a move.
//
// Arguments:
// containingRange - the range that contains the node to check.
// node - the node to check.
//
bool LinearScan::IsResolutionNode(LIR::Range& containingRange, GenTree* node)
{
for (;;)
{
if (IsResolutionMove(node))
{
return true;
}
if (!IsLsraAdded(node) || (node->OperGet() != GT_LCL_VAR))
{
return false;
}
LIR::Use use;
bool foundUse = containingRange.TryGetUse(node, &use);
assert(foundUse);
node = use.User();
}
}
//------------------------------------------------------------------------
// verifyFinalAllocation: Traverse the RefPositions and verify various invariants.
//
// Arguments:
// None.
//
// Return Value:
// None.
//
// Notes:
// If verbose is set, this will also dump a table of the final allocations.
void LinearScan::verifyFinalAllocation()
{
if (VERBOSE)
{
printf("\nFinal allocation\n");
}
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
for (Interval& interval : intervals)
{
interval.assignedReg = nullptr;
interval.physReg = REG_NA;
}
DBEXEC(VERBOSE, dumpRegRecordTitle());
BasicBlock* currentBlock = nullptr;
GenTree* firstBlockEndResolutionNode = nullptr;
LsraLocation currentLocation = MinLocation;
for (RefPosition& refPosition : refPositions)
{
RefPosition* currentRefPosition = &refPosition;
Interval* interval = nullptr;
RegRecord* regRecord = nullptr;
regNumber regNum = REG_NA;
activeRefPosition = currentRefPosition;
if (currentRefPosition->refType != RefTypeBB)
{
if (currentRefPosition->IsPhysRegRef())
{
regRecord = currentRefPosition->getReg();
regRecord->recentRefPosition = currentRefPosition;
regNum = regRecord->regNum;
}
else if (currentRefPosition->isIntervalRef())
{
interval = currentRefPosition->getInterval();
interval->recentRefPosition = currentRefPosition;
if (currentRefPosition->registerAssignment != RBM_NONE)
{
if (!genMaxOneBit(currentRefPosition->registerAssignment))
{
assert(currentRefPosition->refType == RefTypeExpUse ||
currentRefPosition->refType == RefTypeDummyDef);
}
else
{
regNum = currentRefPosition->assignedReg();
regRecord = getRegisterRecord(regNum);
}
}
}
}
LsraLocation newLocation = currentRefPosition->nodeLocation;
currentLocation = newLocation;
switch (currentRefPosition->refType)
{
case RefTypeBB:
{
if (currentBlock == nullptr)
{
currentBlock = startBlockSequence();
}
else
{
// Verify the resolution moves at the end of the previous block.
for (GenTree* node = firstBlockEndResolutionNode; node != nullptr; node = node->gtNext)
{
assert(enregisterLocalVars);
// Only verify nodes that are actually moves; don't bother with the nodes that are
// operands to moves.
if (IsResolutionMove(node))
{
verifyResolutionMove(node, currentLocation);
}
}
// Validate the locations at the end of the previous block.
if (enregisterLocalVars)
{
VarToRegMap outVarToRegMap = outVarToRegMaps[currentBlock->bbNum];
VarSetOps::Iter iter(compiler, currentBlock->bbLiveOut);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (localVarIntervals[varIndex] == nullptr)
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
continue;
}
regNumber regNum = getVarReg(outVarToRegMap, varIndex);
interval = getIntervalForLocalVar(varIndex);
if (interval->physReg != regNum)
{
assert(regNum == REG_STK);
assert((interval->physReg == REG_NA) || interval->isWriteThru);
}
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
interval->isActive = false;
}
}
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
// Now, record the locations at the beginning of this block.
currentBlock = moveToNextBlock();
}
if (currentBlock != nullptr)
{
if (enregisterLocalVars)
{
VarToRegMap inVarToRegMap = inVarToRegMaps[currentBlock->bbNum];
VarSetOps::Iter iter(compiler, currentBlock->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (localVarIntervals[varIndex] == nullptr)
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
continue;
}
regNumber regNum = getVarReg(inVarToRegMap, varIndex);
interval = getIntervalForLocalVar(varIndex);
interval->physReg = regNum;
interval->assignedReg = &(physRegs[regNum]);
interval->isActive = true;
physRegs[regNum].assignedInterval = interval;
}
}
if (VERBOSE)
{
dumpRefPositionShort(currentRefPosition, currentBlock);
dumpRegRecords();
}
// Finally, handle the resolution moves, if any, at the beginning of the next block.
firstBlockEndResolutionNode = nullptr;
bool foundNonResolutionNode = false;
LIR::Range& currentBlockRange = LIR::AsRange(currentBlock);
for (GenTree* node : currentBlockRange)
{
if (IsResolutionNode(currentBlockRange, node))
{
assert(enregisterLocalVars);
if (foundNonResolutionNode)
{
firstBlockEndResolutionNode = node;
break;
}
else if (IsResolutionMove(node))
{
// Only verify nodes that are actually moves; don't bother with the nodes that are
// operands to moves.
verifyResolutionMove(node, currentLocation);
}
}
else
{
foundNonResolutionNode = true;
}
}
}
}
break;
case RefTypeKill:
assert(regRecord != nullptr);
assert(regRecord->assignedInterval == nullptr);
dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
break;
case RefTypeFixedReg:
assert(regRecord != nullptr);
dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
break;
case RefTypeUpperVectorSave:
dumpLsraAllocationEvent(LSRA_EVENT_UPPER_VECTOR_SAVE, nullptr, REG_NA, currentBlock);
break;
case RefTypeUpperVectorRestore:
dumpLsraAllocationEvent(LSRA_EVENT_UPPER_VECTOR_RESTORE, nullptr, REG_NA, currentBlock);
break;
case RefTypeDef:
case RefTypeUse:
case RefTypeParamDef:
case RefTypeZeroInit:
assert(interval != nullptr);
if (interval->isSpecialPutArg)
{
dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, interval, regNum);
break;
}
if (currentRefPosition->reload)
{
interval->isActive = true;
assert(regNum != REG_NA);
interval->physReg = regNum;
interval->assignedReg = regRecord;
regRecord->assignedInterval = interval;
dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, nullptr, regRecord->regNum, currentBlock);
}
if (regNum == REG_NA)
{
// If this interval is still assigned to a register
if (interval->physReg != REG_NA)
{
// then unassign it if no new register was assigned to the RefTypeDef
if (RefTypeIsDef(currentRefPosition->refType))
{
assert(interval->assignedReg != nullptr);
if (interval->assignedReg->assignedInterval == interval)
{
interval->assignedReg->assignedInterval = nullptr;
}
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
}
}
dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, interval);
}
else if (RefTypeIsDef(currentRefPosition->refType))
{
interval->isActive = true;
if (VERBOSE)
{
if (interval->isConstant && (currentRefPosition->treeNode != nullptr) &&
currentRefPosition->treeNode->IsReuseRegVal())
{
dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, nullptr, regRecord->regNum, currentBlock);
}
else
{
dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, nullptr, regRecord->regNum, currentBlock);
}
}
}
else if (currentRefPosition->copyReg)
{
dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, interval, regRecord->regNum, currentBlock);
}
else if (currentRefPosition->moveReg)
{
assert(interval->assignedReg != nullptr);
interval->assignedReg->assignedInterval = nullptr;
interval->physReg = regNum;
interval->assignedReg = regRecord;
regRecord->assignedInterval = interval;
if (VERBOSE)
{
dumpEmptyRefPosition();
printf("Move %-4s ", getRegName(regRecord->regNum));
}
}
else
{
dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
}
if (currentRefPosition->lastUse || (currentRefPosition->spillAfter && !currentRefPosition->writeThru))
{
interval->isActive = false;
}
if (regNum != REG_NA)
{
if (currentRefPosition->spillAfter)
{
if (VERBOSE)
{
// If refPos is marked as copyReg, then the reg that is spilled
// is the homeReg of the interval not the reg currently assigned
// to refPos.
regNumber spillReg = regNum;
if (currentRefPosition->copyReg)
{
assert(interval != nullptr);
spillReg = interval->physReg;
}
dumpRegRecords();
dumpEmptyRefPosition();
if (currentRefPosition->writeThru)
{
printf("WThru %-4s ", getRegName(spillReg));
}
else
{
printf("Spill %-4s ", getRegName(spillReg));
}
}
}
else if (currentRefPosition->copyReg)
{
regRecord->assignedInterval = interval;
}
else
{
if (RefTypeIsDef(currentRefPosition->refType))
{
// Interval was assigned to a different register.
// Clear the assigned interval of current register.
if (interval->physReg != REG_NA && interval->physReg != regNum)
{
interval->assignedReg->assignedInterval = nullptr;
}
}
interval->physReg = regNum;
interval->assignedReg = regRecord;
regRecord->assignedInterval = interval;
}
}
break;
case RefTypeKillGCRefs:
// No action to take.
// However, we will assert that, at resolution time, no registers contain GC refs.
{
DBEXEC(VERBOSE, printf(" "));
regMaskTP candidateRegs = currentRefPosition->registerAssignment;
while (candidateRegs != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
candidateRegs &= ~nextRegBit;
regNumber nextReg = genRegNumFromMask(nextRegBit);
RegRecord* regRecord = getRegisterRecord(nextReg);
Interval* assignedInterval = regRecord->assignedInterval;
assert(assignedInterval == nullptr || !varTypeIsGC(assignedInterval->registerType));
}
}
break;
case RefTypeExpUse:
case RefTypeDummyDef:
// Do nothing; these will be handled by the RefTypeBB.
DBEXEC(VERBOSE, dumpRefPositionShort(currentRefPosition, currentBlock));
DBEXEC(VERBOSE, printf(" "));
break;
case RefTypeInvalid:
// for these 'currentRefPosition->refType' values, No action to take
break;
}
if (currentRefPosition->refType != RefTypeBB)
{
DBEXEC(VERBOSE, dumpRegRecords());
if (interval != nullptr)
{
if (currentRefPosition->copyReg)
{
assert(interval->physReg != regNum);
regRecord->assignedInterval = nullptr;
assert(interval->assignedReg != nullptr);
regRecord = interval->assignedReg;
}
if (currentRefPosition->spillAfter || currentRefPosition->lastUse)
{
assert(!currentRefPosition->spillAfter || currentRefPosition->IsActualRef());
if (RefTypeIsDef(currentRefPosition->refType))
{
// If an interval got assigned to a different register (while the different
// register got spilled), then clear the assigned interval of current register.
if (interval->physReg != REG_NA && interval->physReg != regNum)
{
interval->assignedReg->assignedInterval = nullptr;
}
}
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
// regRegcord could be null if the RefPosition does not require a register.
if (regRecord != nullptr)
{
regRecord->assignedInterval = nullptr;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
else if (interval->isUpperVector && !currentRefPosition->RegOptional())
{
// These only require a register if they are not RegOptional, and their lclVar
// interval is living in a register and not already partially spilled.
if ((currentRefPosition->refType == RefTypeUpperVectorSave) ||
(currentRefPosition->refType == RefTypeUpperVectorRestore))
{
Interval* lclVarInterval = interval->relatedInterval;
assert((lclVarInterval->physReg == REG_NA) || lclVarInterval->isPartiallySpilled);
}
}
#endif
else
{
assert(currentRefPosition->RegOptional());
}
}
}
}
}
// Now, verify the resolution blocks.
// Currently these are nearly always at the end of the method, but that may not always be the case.
// So, we'll go through all the BBs looking for blocks whose bbNum is greater than bbNumMaxBeforeResolution.
for (BasicBlock* const currentBlock : compiler->Blocks())
{
if (currentBlock->bbNum > bbNumMaxBeforeResolution)
{
// If we haven't enregistered an lclVars, we have no resolution blocks.
assert(enregisterLocalVars);
if (VERBOSE)
{
dumpRegRecordTitle();
printf(shortRefPositionFormat, 0, 0);
assert(currentBlock->bbPreds != nullptr && currentBlock->bbPreds->getBlock() != nullptr);
printf(bbRefPosFormat, currentBlock->bbNum, currentBlock->bbPreds->getBlock()->bbNum);
dumpRegRecords();
}
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
// Set the incoming register assignments
VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum);
VarSetOps::Iter iter(compiler, currentBlock->bbLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (localVarIntervals[varIndex] == nullptr)
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
continue;
}
regNumber regNum = getVarReg(inVarToRegMap, varIndex);
Interval* interval = getIntervalForLocalVar(varIndex);
interval->physReg = regNum;
interval->assignedReg = &(physRegs[regNum]);
interval->isActive = true;
physRegs[regNum].assignedInterval = interval;
}
// Verify the moves in this block
LIR::Range& currentBlockRange = LIR::AsRange(currentBlock);
for (GenTree* node : currentBlockRange)
{
assert(IsResolutionNode(currentBlockRange, node));
if (IsResolutionMove(node))
{
// Only verify nodes that are actually moves; don't bother with the nodes that are
// operands to moves.
verifyResolutionMove(node, currentLocation);
}
}
// Verify the outgoing register assignments
{
VarToRegMap outVarToRegMap = getOutVarToRegMap(currentBlock->bbNum);
VarSetOps::Iter iter(compiler, currentBlock->bbLiveOut);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
if (localVarIntervals[varIndex] == nullptr)
{
assert(!compiler->lvaGetDescByTrackedIndex(varIndex)->lvLRACandidate);
continue;
}
regNumber regNum = getVarReg(outVarToRegMap, varIndex);
Interval* interval = getIntervalForLocalVar(varIndex);
// Either the register assignments match, or the outgoing assignment is on the stack
// and this is a write-thru interval.
assert(interval->physReg == regNum || (interval->physReg == REG_NA && regNum == REG_STK) ||
(interval->isWriteThru && regNum == REG_STK));
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
interval->isActive = false;
}
}
}
}
DBEXEC(VERBOSE, printf("\n"));
}
//------------------------------------------------------------------------
// verifyResolutionMove: Verify a resolution statement. Called by verifyFinalAllocation()
//
// Arguments:
// resolutionMove - A GenTree* that must be a resolution move.
// currentLocation - The LsraLocation of the most recent RefPosition that has been verified.
//
// Return Value:
// None.
//
// Notes:
// If verbose is set, this will also dump the moves into the table of final allocations.
void LinearScan::verifyResolutionMove(GenTree* resolutionMove, LsraLocation currentLocation)
{
GenTree* dst = resolutionMove;
assert(IsResolutionMove(dst));
if (dst->OperGet() == GT_SWAP)
{
GenTreeLclVarCommon* left = dst->gtGetOp1()->AsLclVarCommon();
GenTreeLclVarCommon* right = dst->gtGetOp2()->AsLclVarCommon();
regNumber leftRegNum = left->GetRegNum();
regNumber rightRegNum = right->GetRegNum();
LclVarDsc* leftVarDsc = compiler->lvaGetDesc(left);
LclVarDsc* rightVarDsc = compiler->lvaGetDesc(right);
Interval* leftInterval = getIntervalForLocalVar(leftVarDsc->lvVarIndex);
Interval* rightInterval = getIntervalForLocalVar(rightVarDsc->lvVarIndex);
assert(leftInterval->physReg == leftRegNum && rightInterval->physReg == rightRegNum);
leftInterval->physReg = rightRegNum;
rightInterval->physReg = leftRegNum;
leftInterval->assignedReg = &physRegs[rightRegNum];
rightInterval->assignedReg = &physRegs[leftRegNum];
physRegs[rightRegNum].assignedInterval = leftInterval;
physRegs[leftRegNum].assignedInterval = rightInterval;
if (VERBOSE)
{
printf(shortRefPositionFormat, currentLocation, 0);
dumpIntervalName(leftInterval);
printf(" Swap ");
printf(" %-4s ", getRegName(rightRegNum));
dumpRegRecords();
printf(shortRefPositionFormat, currentLocation, 0);
dumpIntervalName(rightInterval);
printf(" \" ");
printf(" %-4s ", getRegName(leftRegNum));
dumpRegRecords();
}
return;
}
regNumber dstRegNum = dst->GetRegNum();
regNumber srcRegNum;
GenTreeLclVarCommon* lcl;
if (dst->OperGet() == GT_COPY)
{
lcl = dst->gtGetOp1()->AsLclVarCommon();
srcRegNum = lcl->GetRegNum();
}
else
{
lcl = dst->AsLclVarCommon();
if ((lcl->gtFlags & GTF_SPILLED) != 0)
{
srcRegNum = REG_STK;
}
else
{
assert((lcl->gtFlags & GTF_SPILL) != 0);
srcRegNum = dstRegNum;
dstRegNum = REG_STK;
}
}
Interval* interval = getIntervalForLocalVarNode(lcl);
assert(interval->physReg == srcRegNum || (srcRegNum == REG_STK && interval->physReg == REG_NA));
if (srcRegNum != REG_STK)
{
physRegs[srcRegNum].assignedInterval = nullptr;
}
if (dstRegNum != REG_STK)
{
interval->physReg = dstRegNum;
interval->assignedReg = &(physRegs[dstRegNum]);
physRegs[dstRegNum].assignedInterval = interval;
interval->isActive = true;
}
else
{
interval->physReg = REG_NA;
interval->assignedReg = nullptr;
interval->isActive = false;
}
if (VERBOSE)
{
printf(shortRefPositionFormat, currentLocation, 0);
dumpIntervalName(interval);
printf(" Move ");
printf(" %-4s ", getRegName(dstRegNum));
dumpRegRecords();
}
}
#endif // DEBUG
LinearScan::RegisterSelection::RegisterSelection(LinearScan* linearScan)
{
this->linearScan = linearScan;
#ifdef DEBUG
mappingTable = new ScoreMappingTable(linearScan->compiler->getAllocator(CMK_LSRA));
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) \
mappingTable->Set(stat, &LinearScan::RegisterSelection::try_##stat);
#include "lsra_score.h"
#undef REG_SEL_DEF
LPCWSTR ordering = JitConfig.JitLsraOrdering();
if (ordering == nullptr)
{
ordering = W("ABCDEFGHIJKLMNOPQ");
}
for (int orderId = 0; orderId < REGSELECT_HEURISTIC_COUNT; orderId++)
{
// Make sure we do not set repeated entries
assert(RegSelectionOrder[orderId] == NONE);
switch (ordering[orderId])
{
#define REG_SEL_DEF(enum_name, value, shortname, orderSeqId) \
case orderSeqId: \
RegSelectionOrder[orderId] = enum_name; \
break;
#include "lsra_score.h"
#undef REG_SEL_DEF
default:
assert(!"Invalid lsraOrdering value.");
}
}
#endif // DEBUG
}
// ----------------------------------------------------------
// reset: Resets the values of all the fields used for register selection.
//
void LinearScan::RegisterSelection::reset(Interval* interval, RefPosition* refPos)
{
currentInterval = interval;
refPosition = refPos;
score = 0;
regType = linearScan->getRegisterType(currentInterval, refPosition);
currentLocation = refPosition->nodeLocation;
nextRefPos = refPosition->nextRefPosition;
candidates = refPosition->registerAssignment;
preferences = currentInterval->registerPreferences;
// This is not actually a preference, it's merely to track the lclVar that this
// "specialPutArg" is using.
relatedInterval = currentInterval->isSpecialPutArg ? nullptr : currentInterval->relatedInterval;
relatedPreferences = (relatedInterval == nullptr) ? RBM_NONE : relatedInterval->getCurrentPreferences();
rangeEndLocation = refPosition->getRangeEndLocation();
relatedLastLocation = rangeEndLocation;
preferCalleeSave = currentInterval->preferCalleeSave;
rangeEndRefPosition = nullptr;
lastRefPosition = currentInterval->lastRefPosition;
lastLocation = MinLocation;
prevRegRec = currentInterval->assignedReg;
// These are used in the post-selection updates, and must be set for any selection.
freeCandidates = RBM_NONE;
matchingConstants = RBM_NONE;
unassignedSet = RBM_NONE;
coversSet = RBM_NONE;
preferenceSet = RBM_NONE;
coversRelatedSet = RBM_NONE;
coversFullSet = RBM_NONE;
foundRegBit = REG_NA;
found = false;
skipAllocation = false;
coversSetsCalculated = false;
}
// ----------------------------------------------------------
// applySelection: Apply the heuristic to the candidates.
//
// Arguments:
// selectionScore: The score corresponding to the heuristics we apply.
// selectionCandidates: The possible candidates for the heuristic to apply.
//
// Return Values:
// 'true' if there was a single register candidate available after the heuristic is applied.
//
bool LinearScan::RegisterSelection::applySelection(int selectionScore, regMaskTP selectionCandidates)
{
regMaskTP newCandidates = candidates & selectionCandidates;
if (newCandidates != RBM_NONE)
{
score += selectionScore;
candidates = newCandidates;
return LinearScan::isSingleRegister(candidates);
}
return false;
}
// ----------------------------------------------------------
// applySingleRegSelection: Select a single register, if it is in the candidate set.
//
// Arguments:
// selectionScore: The score corresponding to the heuristics we apply.
// selectionCandidates: The possible candidates for the heuristic to apply.
//
// Return Values:
// 'true' if there was a single register candidate available after the heuristic is applied.
//
bool LinearScan::RegisterSelection::applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate)
{
assert(LinearScan::isSingleRegister(selectionCandidate));
regMaskTP newCandidates = candidates & selectionCandidate;
if (newCandidates != RBM_NONE)
{
candidates = newCandidates;
return true;
}
return false;
}
// ----------------------------------------------------------
// try_FREE: Apply the FREE heuristic.
//
void LinearScan::RegisterSelection::try_FREE()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
found = applySelection(FREE, freeCandidates);
}
// ----------------------------------------------------------
// try_CONST_AVAILABLE: Apply the CONST_AVAILABLE (matching constant) heuristic.
//
// Note: we always need to define the 'matchingConstants' set.
//
void LinearScan::RegisterSelection::try_CONST_AVAILABLE()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType))
{
found = applySelection(CONST_AVAILABLE, matchingConstants);
}
}
// ----------------------------------------------------------
// try_THIS_ASSIGNED: Apply the THIS_ASSIGNED heuristic.
//
void LinearScan::RegisterSelection::try_THIS_ASSIGNED()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
if (prevRegRec != nullptr)
{
found = applySelection(THIS_ASSIGNED, freeCandidates & preferences & prevRegBit);
}
}
// ----------------------------------------------------------
// try_COVERS: Apply the COVERS heuristic.
//
void LinearScan::RegisterSelection::try_COVERS()
{
assert(!found);
calculateCoversSets();
found = applySelection(COVERS, coversSet & preferenceSet);
}
// ----------------------------------------------------------
// try_OWN_PREFERENCE: Apply the OWN_PREFERENCE heuristic.
//
// Note: 'preferenceSet' already includes only freeCandidates.
//
void LinearScan::RegisterSelection::try_OWN_PREFERENCE()
{
assert(!found);
#ifdef DEBUG
calculateCoversSets();
#endif
found = applySelection(OWN_PREFERENCE, (preferenceSet & freeCandidates));
}
// ----------------------------------------------------------
// try_COVERS_RELATED: Apply the COVERS_RELATED heuristic.
//
void LinearScan::RegisterSelection::try_COVERS_RELATED()
{
assert(!found);
#ifdef DEBUG
calculateCoversSets();
#endif
found = applySelection(COVERS_RELATED, (coversRelatedSet & freeCandidates));
}
// ----------------------------------------------------------
// try_RELATED_PREFERENCE: Apply the RELATED_PREFERENCE heuristic.
//
void LinearScan::RegisterSelection::try_RELATED_PREFERENCE()
{
assert(!found);
found = applySelection(RELATED_PREFERENCE, relatedPreferences & freeCandidates);
}
// ----------------------------------------------------------
// try_CALLER_CALLEE: Apply the CALLER_CALLEE heuristic.
//
void LinearScan::RegisterSelection::try_CALLER_CALLEE()
{
assert(!found);
found = applySelection(CALLER_CALLEE, callerCalleePrefs & freeCandidates);
}
// ----------------------------------------------------------
// try_UNASSIGNED: Apply the UNASSIGNED heuristic.
//
void LinearScan::RegisterSelection::try_UNASSIGNED()
{
assert(!found);
#ifdef DEBUG
calculateCoversSets();
#endif
found = applySelection(UNASSIGNED, unassignedSet);
}
// ----------------------------------------------------------
// try_COVERS_FULL: Apply the COVERS_FULL heuristic.
//
void LinearScan::RegisterSelection::try_COVERS_FULL()
{
assert(!found);
#ifdef DEBUG
calculateCoversSets();
#endif
found = applySelection(COVERS_FULL, (coversFullSet & freeCandidates));
}
// ----------------------------------------------------------
// try_BEST_FIT: Apply the BEST_FIT heuristic.
//
void LinearScan::RegisterSelection::try_BEST_FIT()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
regMaskTP bestFitSet = RBM_NONE;
// If the best score includes COVERS_FULL, pick the one that's killed soonest.
// If none cover the full range, the BEST_FIT is the one that's killed later.
bool earliestIsBest = ((score & COVERS_FULL) != 0);
LsraLocation bestFitLocation = earliestIsBest ? MaxLocation : MinLocation;
for (regMaskTP bestFitCandidates = candidates; bestFitCandidates != RBM_NONE;)
{
regMaskTP bestFitCandidateBit = genFindLowestBit(bestFitCandidates);
bestFitCandidates &= ~bestFitCandidateBit;
regNumber bestFitCandidateRegNum = genRegNumFromMask(bestFitCandidateBit);
// Find the next RefPosition of the register.
LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(bestFitCandidateRegNum, regType);
LsraLocation nextPhysRefLocation = linearScan->getNextFixedRef(bestFitCandidateRegNum, regType);
nextPhysRefLocation = Min(nextPhysRefLocation, nextIntervalLocation);
// If the nextPhysRefLocation is a fixedRef for the rangeEndRefPosition, increment it so that
// we don't think it isn't covering the live range.
// This doesn't handle the case where earlier RefPositions for this Interval are also
// FixedRefs of this regNum, but at least those are only interesting in the case where those
// are "local last uses" of the Interval - otherwise the liveRange would interfere with the reg.
// TODO: This duplicates code in an earlier loop, and is basically here to duplicate previous
// behavior; see if we can avoid this.
if (nextPhysRefLocation == rangeEndLocation && rangeEndRefPosition->isFixedRefOfReg(bestFitCandidateRegNum))
{
INDEBUG(linearScan->dumpLsraAllocationEvent(LSRA_EVENT_INCREMENT_RANGE_END, currentInterval));
nextPhysRefLocation++;
}
if (nextPhysRefLocation == bestFitLocation)
{
bestFitSet |= bestFitCandidateBit;
}
else
{
bool isBetter = false;
if (nextPhysRefLocation > lastLocation)
{
// This covers the full range; favor it if the other doesn't, or if it's a closer match.
if ((bestFitLocation <= lastLocation) || (nextPhysRefLocation < bestFitLocation))
{
isBetter = true;
}
}
else
{
// This doesn't cover the full range; favor it if the other doesn't either, but this ends later.
if ((bestFitLocation <= lastLocation) && (nextPhysRefLocation > bestFitLocation))
{
isBetter = true;
}
}
if (isBetter)
{
bestFitSet = bestFitCandidateBit;
bestFitLocation = nextPhysRefLocation;
}
}
}
assert(bestFitSet != RBM_NONE);
found = applySelection(BEST_FIT, bestFitSet);
}
// ----------------------------------------------------------
// try_IS_PREV_REG: Apply the IS_PREV_REG heuristic.
//
// Note: Oddly, the previous heuristics only considered this if it covered the range.
// TODO: Check if Only applies if we have freeCandidates.
//
void LinearScan::RegisterSelection::try_IS_PREV_REG()
{
// TODO: We do not check found here.
if ((prevRegRec != nullptr) && ((score & COVERS_FULL) != 0))
{
found = applySingleRegSelection(IS_PREV_REG, prevRegBit);
}
}
// ----------------------------------------------------------
// try_REG_ORDER: Apply the REG_ORDER heuristic. Only applies if we have freeCandidates.
//
void LinearScan::RegisterSelection::try_REG_ORDER()
{
assert(!found);
if (freeCandidates == RBM_NONE)
{
return;
}
// This will always result in a single candidate. That is, it is the tie-breaker
// for free candidates, and doesn't make sense as anything other than the last
// heuristic for free registers.
unsigned lowestRegOrder = UINT_MAX;
regMaskTP lowestRegOrderBit = RBM_NONE;
for (regMaskTP regOrderCandidates = candidates; regOrderCandidates != RBM_NONE;)
{
regMaskTP regOrderCandidateBit = genFindLowestBit(regOrderCandidates);
regOrderCandidates &= ~regOrderCandidateBit;
regNumber regOrderCandidateRegNum = genRegNumFromMask(regOrderCandidateBit);
unsigned thisRegOrder = linearScan->getRegisterRecord(regOrderCandidateRegNum)->regOrder;
if (thisRegOrder < lowestRegOrder)
{
lowestRegOrder = thisRegOrder;
lowestRegOrderBit = regOrderCandidateBit;
}
}
assert(lowestRegOrderBit != RBM_NONE);
found = applySingleRegSelection(REG_ORDER, lowestRegOrderBit);
}
// ----------------------------------------------------------
// try_SPILL_COST: Apply the SPILL_COST heuristic.
//
void LinearScan::RegisterSelection::try_SPILL_COST()
{
assert(!found);
// The set of registers with the lowest spill weight.
regMaskTP lowestCostSpillSet = RBM_NONE;
// Apply the SPILL_COST heuristic and eliminate regs that can't be spilled.
// The spill weight for 'refPosition' (the one we're allocating now).
weight_t thisSpillWeight = linearScan->getWeight(refPosition);
// The spill weight for the best candidate we've found so far.
weight_t bestSpillWeight = FloatingPointUtils::infinite_double();
// True if we found registers with lower spill weight than this refPosition.
bool foundLowerSpillWeight = false;
for (regMaskTP spillCandidates = candidates; spillCandidates != RBM_NONE;)
{
regMaskTP spillCandidateBit = genFindLowestBit(spillCandidates);
spillCandidates &= ~spillCandidateBit;
regNumber spillCandidateRegNum = genRegNumFromMask(spillCandidateBit);
RegRecord* spillCandidateRegRecord = &linearScan->physRegs[spillCandidateRegNum];
Interval* assignedInterval = spillCandidateRegRecord->assignedInterval;
// Can and should the interval in this register be spilled for this one,
// if we don't find a better alternative?
if ((linearScan->getNextIntervalRef(spillCandidateRegNum, regType) == currentLocation) &&
!assignedInterval->getNextRefPosition()->RegOptional())
{
continue;
}
if (!linearScan->isSpillCandidate(currentInterval, refPosition, spillCandidateRegRecord))
{
continue;
}
weight_t currentSpillWeight = 0;
RefPosition* recentRefPosition = assignedInterval != nullptr ? assignedInterval->recentRefPosition : nullptr;
if ((recentRefPosition != nullptr) &&
(recentRefPosition->RegOptional() && !(assignedInterval->isLocalVar && recentRefPosition->IsActualRef())))
{
// We do not "spillAfter" if previous (recent) refPosition was regOptional or if it
// is not an actual ref. In those cases, we will reload in future (next) refPosition.
// For such cases, consider the spill cost of next refposition.
// See notes in "spillInterval()".
RefPosition* reloadRefPosition = assignedInterval->getNextRefPosition();
if (reloadRefPosition != nullptr)
{
currentSpillWeight = linearScan->getWeight(reloadRefPosition);
}
}
// Only consider spillCost if we were not able to calculate weight of reloadRefPosition.
if (currentSpillWeight == 0)
{
currentSpillWeight = linearScan->spillCost[spillCandidateRegNum];
#ifdef TARGET_ARM
if (currentInterval->registerType == TYP_DOUBLE)
{
currentSpillWeight = max(currentSpillWeight, linearScan->spillCost[REG_NEXT(spillCandidateRegNum)]);
}
#endif
}
if (currentSpillWeight < bestSpillWeight)
{
bestSpillWeight = currentSpillWeight;
lowestCostSpillSet = spillCandidateBit;
}
else if (currentSpillWeight == bestSpillWeight)
{
lowestCostSpillSet |= spillCandidateBit;
}
}
if (lowestCostSpillSet == RBM_NONE)
{
return;
}
// We won't spill if this refPosition is RegOptional() and we have no candidates
// with a lower spill cost.
if ((bestSpillWeight >= thisSpillWeight) && refPosition->RegOptional())
{
currentInterval->assignedReg = nullptr;
skipAllocation = true;
found = true;
}
// We must have at least one with the lowest spill cost.
assert(lowestCostSpillSet != RBM_NONE);
found = applySelection(SPILL_COST, lowestCostSpillSet);
}
// ----------------------------------------------------------
// try_FAR_NEXT_REF: Apply the FAR_NEXT_REF heuristic.
//
void LinearScan::RegisterSelection::try_FAR_NEXT_REF()
{
assert(!found);
LsraLocation farthestLocation = MinLocation;
regMaskTP farthestSet = RBM_NONE;
for (regMaskTP farthestCandidates = candidates; farthestCandidates != RBM_NONE;)
{
regMaskTP farthestCandidateBit = genFindLowestBit(farthestCandidates);
farthestCandidates &= ~farthestCandidateBit;
regNumber farthestCandidateRegNum = genRegNumFromMask(farthestCandidateBit);
// Find the next RefPosition of the register.
LsraLocation nextIntervalLocation =
linearScan->getNextIntervalRef(farthestCandidateRegNum, currentInterval->registerType);
LsraLocation nextPhysRefLocation = Min(linearScan->nextFixedRef[farthestCandidateRegNum], nextIntervalLocation);
if (nextPhysRefLocation == farthestLocation)
{
farthestSet |= farthestCandidateBit;
}
else if (nextPhysRefLocation > farthestLocation)
{
farthestSet = farthestCandidateBit;
farthestLocation = nextPhysRefLocation;
}
}
// We must have at least one with the lowest spill cost.
assert(farthestSet != RBM_NONE);
found = applySelection(FAR_NEXT_REF, farthestSet);
}
// ----------------------------------------------------------
// try_PREV_REG_OPT: Apply the PREV_REG_OPT heuristic.
//
void LinearScan::RegisterSelection::try_PREV_REG_OPT()
{
assert(!found);
regMaskTP prevRegOptSet = RBM_NONE;
for (regMaskTP prevRegOptCandidates = candidates; prevRegOptCandidates != RBM_NONE;)
{
regMaskTP prevRegOptCandidateBit = genFindLowestBit(prevRegOptCandidates);
prevRegOptCandidates &= ~prevRegOptCandidateBit;
regNumber prevRegOptCandidateRegNum = genRegNumFromMask(prevRegOptCandidateBit);
Interval* assignedInterval = linearScan->physRegs[prevRegOptCandidateRegNum].assignedInterval;
bool foundPrevRegOptReg = true;
#ifdef DEBUG
bool hasAssignedInterval = false;
#endif
if ((assignedInterval != nullptr) && (assignedInterval->recentRefPosition != nullptr))
{
foundPrevRegOptReg &=
(assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional());
#ifdef DEBUG
hasAssignedInterval = true;
#endif
}
#ifndef TARGET_ARM
else
{
foundPrevRegOptReg = false;
}
#endif
#ifdef TARGET_ARM
// If current interval is TYP_DOUBLE, verify if the other half register matches the heuristics.
// We have three cases:
// 1. One of the register of the pair have an assigned interval: Check if that register's refPosition
// matches the heuristics. If yes, add it to the set.
// 2. Both registers of the pair have an assigned interval: Conservatively "and" conditions for
// heuristics of their corresponding refPositions. If both register's heuristic matches, add them
// to the set. TODO-CQ-ARM: We may implement a better condition later.
// 3. None of the register have an assigned interval: Skip adding register and assert.
if (currentInterval->registerType == TYP_DOUBLE)
{
regNumber anotherHalfRegNum = linearScan->findAnotherHalfRegNum(prevRegOptCandidateRegNum);
assignedInterval = linearScan->physRegs[anotherHalfRegNum].assignedInterval;
if ((assignedInterval != nullptr) && (assignedInterval->recentRefPosition != nullptr))
{
if (assignedInterval->recentRefPosition->reload && assignedInterval->recentRefPosition->RegOptional())
{
foundPrevRegOptReg &= (assignedInterval->recentRefPosition->reload &&
assignedInterval->recentRefPosition->RegOptional());
}
#ifdef DEBUG
hasAssignedInterval = true;
#endif
}
}
#endif
if (foundPrevRegOptReg)
{
// TODO-Cleanup: Previously, we always used the highest regNum with a previous regOptional
// RefPosition, which is not really consistent with the way other selection criteria are
// applied. should probably be: prevRegOptSet |= prevRegOptCandidateBit;
prevRegOptSet = prevRegOptCandidateBit;
}
#ifdef DEBUG
// The assigned should be non-null, and should have a recentRefPosition, however since
// this is a heuristic, we don't want a fatal error, so we just assert (not noway_assert).
if (!hasAssignedInterval)
{
assert(!"Spill candidate has no assignedInterval recentRefPosition");
}
#endif
}
found = applySelection(PREV_REG_OPT, prevRegOptSet);
}
// ----------------------------------------------------------
// try_REG_NUM: Apply the REG_NUM heuristic.
//
void LinearScan::RegisterSelection::try_REG_NUM()
{
assert(!found);
found = applySingleRegSelection(REG_NUM, genFindLowestBit(candidates));
}
// ----------------------------------------------------------
// calculateCoversSets: Calculate the necessary covers set registers to be used
// for heuristics lke COVERS, COVERS_RELATED, COVERS_FULL.
//
void LinearScan::RegisterSelection::calculateCoversSets()
{
if (freeCandidates == RBM_NONE || coversSetsCalculated)
{
return;
}
preferenceSet = (candidates & preferences);
regMaskTP coversCandidates = (preferenceSet == RBM_NONE) ? candidates : preferenceSet;
for (; coversCandidates != RBM_NONE;)
{
regMaskTP coversCandidateBit = genFindLowestBit(coversCandidates);
coversCandidates &= ~coversCandidateBit;
regNumber coversCandidateRegNum = genRegNumFromMask(coversCandidateBit);
// If we have a single candidate we don't need to compute the preference-related sets, but we
// do need to compute the unassignedSet.
if (!found)
{
// Find the next RefPosition of the register.
LsraLocation nextIntervalLocation = linearScan->getNextIntervalRef(coversCandidateRegNum, regType);
LsraLocation nextPhysRefLocation = linearScan->getNextFixedRef(coversCandidateRegNum, regType);
LsraLocation coversCandidateLocation = Min(nextPhysRefLocation, nextIntervalLocation);
// If the nextPhysRefLocation is a fixedRef for the rangeEndRefPosition, increment it so that
// we don't think it isn't covering the live range.
// This doesn't handle the case where earlier RefPositions for this Interval are also
// FixedRefs of this regNum, but at least those are only interesting in the case where those
// are "local last uses" of the Interval - otherwise the liveRange would interfere with the reg.
if (coversCandidateLocation == rangeEndLocation &&
rangeEndRefPosition->isFixedRefOfReg(coversCandidateRegNum))
{
INDEBUG(linearScan->dumpLsraAllocationEvent(LSRA_EVENT_INCREMENT_RANGE_END, currentInterval));
coversCandidateLocation++;
}
if (coversCandidateLocation > rangeEndLocation)
{
coversSet |= coversCandidateBit;
}
if ((coversCandidateBit & relatedPreferences) != RBM_NONE)
{
if (coversCandidateLocation > relatedLastLocation)
{
coversRelatedSet |= coversCandidateBit;
}
}
else if (coversCandidateBit == refPosition->registerAssignment)
{
// If we had a fixed-reg def of a reg that will be killed before the use, prefer it to any other
// registers with the same score. (Note that we haven't changed the original registerAssignment
// on the RefPosition).
// Overload the RELATED_PREFERENCE value.
// TODO-CQ: Consider if this should be split out.
coversRelatedSet |= coversCandidateBit;
}
// Does this cover the full range of the interval?
if (coversCandidateLocation > lastLocation)
{
coversFullSet |= coversCandidateBit;
}
}
// The register is considered unassigned if it has no assignedInterval, OR
// if its next reference is beyond the range of this interval.
if (linearScan->nextIntervalRef[coversCandidateRegNum] > lastLocation)
{
unassignedSet |= coversCandidateBit;
}
}
coversSetsCalculated = true;
}
// ----------------------------------------------------------
// select: For given `currentInterval` and `refPosition`, selects a register to be assigned.
//
// Arguments:
// currentInterval - Current interval for which register needs to be selected.
// refPosition - Refposition within the interval for which register needs to be selected.
//
// Return Values:
// Register bit selected (a single register) and REG_NA if no register was selected.
//
regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval,
RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore))
{
#ifdef DEBUG
*registerScore = NONE;
#endif
reset(currentInterval, refPosition);
// process data-structures
if (RefTypeIsDef(refPosition->refType))
{
if (currentInterval->hasConflictingDefUse)
{
linearScan->resolveConflictingDefAndUse(currentInterval, refPosition);
candidates = refPosition->registerAssignment;
}
// Otherwise, check for the case of a fixed-reg def of a reg that will be killed before the
// use, or interferes at the point of use (which shouldn't happen, but Lower doesn't mark
// the contained nodes as interfering).
// Note that we may have a ParamDef RefPosition that is marked isFixedRegRef, but which
// has had its registerAssignment changed to no longer be a single register.
else if (refPosition->isFixedRegRef && nextRefPos != nullptr && RefTypeIsUse(nextRefPos->refType) &&
!nextRefPos->isFixedRegRef && genMaxOneBit(refPosition->registerAssignment))
{
regNumber defReg = refPosition->assignedReg();
RegRecord* defRegRecord = linearScan->getRegisterRecord(defReg);
RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition;
assert(currFixedRegRefPosition != nullptr &&
currFixedRegRefPosition->nodeLocation == refPosition->nodeLocation);
// If there is another fixed reference to this register before the use, change the candidates
// on this RefPosition to include that of nextRefPos.
RefPosition* nextFixedRegRefPosition = defRegRecord->getNextRefPosition();
if (nextFixedRegRefPosition != nullptr &&
nextFixedRegRefPosition->nodeLocation <= nextRefPos->getRefEndLocation())
{
candidates |= nextRefPos->registerAssignment;
if (preferences == refPosition->registerAssignment)
{
preferences = candidates;
}
}
}
}
preferences &= candidates;
if (preferences == RBM_NONE)
{
preferences = candidates;
}
#ifdef DEBUG
candidates = linearScan->stressLimitRegs(refPosition, candidates);
#endif
assert(candidates != RBM_NONE);
Interval* nextRelatedInterval = relatedInterval;
Interval* finalRelatedInterval = relatedInterval;
Interval* rangeEndInterval = relatedInterval;
bool avoidByteRegs = false;
#ifdef TARGET_X86
if ((relatedPreferences & ~RBM_BYTE_REGS) != RBM_NONE)
{
avoidByteRegs = true;
}
#endif
// Follow the chain of related intervals, as long as:
// - The next reference is a def. We don't want to use the relatedInterval for preferencing if its next reference
// is not a new definition (as it either is or will become live).
// - The next (def) reference is downstream. Otherwise we could iterate indefinitely because the preferences can be
// circular.
// - The intersection of preferenced registers is non-empty.
//
while (nextRelatedInterval != nullptr)
{
RefPosition* nextRelatedRefPosition = nextRelatedInterval->getNextRefPosition();
// Only use the relatedInterval for preferencing if the related interval's next reference
// is a new definition.
if ((nextRelatedRefPosition != nullptr) && RefTypeIsDef(nextRelatedRefPosition->refType))
{
finalRelatedInterval = nextRelatedInterval;
nextRelatedInterval = nullptr;
// First, get the preferences for this interval
regMaskTP thisRelatedPreferences = finalRelatedInterval->getCurrentPreferences();
// Now, determine if they are compatible and update the relatedPreferences that we'll consider.
regMaskTP newRelatedPreferences = thisRelatedPreferences & relatedPreferences;
if (newRelatedPreferences != RBM_NONE && (!avoidByteRegs || thisRelatedPreferences != RBM_BYTE_REGS))
{
// TODO-CQ: The following isFree() check doesn't account for the possibility that there's an
// assignedInterval whose recentRefPosition was delayFree. It also fails to account for
// the TYP_DOUBLE case on ARM. It would be better to replace the call to isFree with
// isRegAvailable(genRegNumFromMask(newRelatedPreferences), regType)), but this is retained
// to achieve zero diffs.
//
bool thisIsSingleReg = isSingleRegister(newRelatedPreferences);
if (!thisIsSingleReg ||
(finalRelatedInterval->isLocalVar &&
linearScan->isFree(linearScan->getRegisterRecord(genRegNumFromMask(newRelatedPreferences)))))
{
relatedPreferences = newRelatedPreferences;
// If this Interval has a downstream def without a single-register preference, continue to iterate.
if (nextRelatedRefPosition->nodeLocation > rangeEndLocation)
{
preferCalleeSave = (preferCalleeSave || finalRelatedInterval->preferCalleeSave);
rangeEndLocation = nextRelatedRefPosition->getRangeEndLocation();
rangeEndInterval = finalRelatedInterval;
nextRelatedInterval = finalRelatedInterval->relatedInterval;
}
}
}
}
else
{
if (nextRelatedInterval == relatedInterval)
{
relatedInterval = nullptr;
relatedPreferences = RBM_NONE;
}
nextRelatedInterval = nullptr;
}
}
// For floating point, we want to be less aggressive about using callee-save registers.
// So in that case, we just need to ensure that the current RefPosition is covered.
if (useFloatReg(currentInterval->registerType))
{
rangeEndRefPosition = refPosition;
preferCalleeSave = currentInterval->preferCalleeSave;
}
else if (currentInterval->isWriteThru && refPosition->spillAfter)
{
// This is treated as a last use of the register, as there is an upcoming EH boundary.
rangeEndRefPosition = refPosition;
}
else
{
rangeEndRefPosition = refPosition->getRangeEndRef();
// If we have a chain of related intervals, and a finalRelatedInterval that
// is not currently occupying a register, and whose lifetime begins after this one,
// we want to try to select a register that will cover its lifetime.
if ((rangeEndInterval != nullptr) && (rangeEndInterval->assignedReg == nullptr) &&
!rangeEndInterval->isWriteThru &&
(rangeEndInterval->getNextRefLocation() >= rangeEndRefPosition->nodeLocation))
{
lastRefPosition = rangeEndInterval->lastRefPosition;
}
}
if ((relatedInterval != nullptr) && !relatedInterval->isWriteThru)
{
relatedLastLocation = relatedInterval->lastRefPosition->nodeLocation;
}
if (preferCalleeSave)
{
regMaskTP calleeSaveCandidates = calleeSaveRegs(currentInterval->registerType);
if (currentInterval->isWriteThru)
{
// We'll only prefer a callee-save register if it's already been used.
regMaskTP unusedCalleeSaves =
calleeSaveCandidates & ~(linearScan->compiler->codeGen->regSet.rsGetModifiedRegsMask());
callerCalleePrefs = calleeSaveCandidates & ~unusedCalleeSaves;
preferences &= ~unusedCalleeSaves;
}
else
{
callerCalleePrefs = calleeSaveCandidates;
}
}
else
{
callerCalleePrefs = callerSaveRegs(currentInterval->registerType);
}
// If this has a delayed use (due to being used in a rmw position of a
// non-commutative operator), its endLocation is delayed until the "def"
// position, which is one location past the use (getRefEndLocation() takes care of this).
rangeEndLocation = rangeEndRefPosition->getRefEndLocation();
lastLocation = lastRefPosition->getRefEndLocation();
// We'll set this to short-circuit remaining heuristics when we have a single candidate.
found = false;
// Is this a fixedReg?
regMaskTP fixedRegMask = RBM_NONE;
if (refPosition->isFixedRegRef)
{
assert(genMaxOneBit(refPosition->registerAssignment));
fixedRegMask = refPosition->registerAssignment;
if (candidates == refPosition->registerAssignment)
{
found = true;
if (linearScan->nextIntervalRef[genRegNumFromMask(candidates)] > lastLocation)
{
unassignedSet = candidates;
}
}
}
// Eliminate candidates that are in-use or busy.
if (!found)
{
regMaskTP busyRegs = linearScan->regsBusyUntilKill | linearScan->regsInUseThisLocation;
candidates &= ~busyRegs;
// Also eliminate as busy any register with a conflicting fixed reference at this or
// the next location.
// Note that this will eliminate the fixedReg, if any, but we'll add it back below.
regMaskTP checkConflictMask = candidates & linearScan->fixedRegs;
while (checkConflictMask != RBM_NONE)
{
regMaskTP checkConflictBit = genFindLowestBit(checkConflictMask);
checkConflictMask &= ~checkConflictBit;
regNumber checkConflictReg = genRegNumFromMask(checkConflictBit);
LsraLocation checkConflictLocation = linearScan->nextFixedRef[checkConflictReg];
if ((checkConflictLocation == currentLocation) ||
(refPosition->delayRegFree && (checkConflictLocation == (currentLocation + 1))))
{
candidates &= ~checkConflictBit;
}
}
candidates |= fixedRegMask;
found = isSingleRegister(candidates);
}
// By chance, is prevRegRec already holding this interval, as a copyReg or having
// been restored as inactive after a kill?
// NOTE: this is not currently considered one of the selection criteria - it always wins
// if it is the assignedInterval of 'prevRegRec'.
if (!found && (prevRegRec != nullptr))
{
prevRegBit = genRegMask(prevRegRec->regNum);
if ((prevRegRec->assignedInterval == currentInterval) && ((candidates & prevRegBit) != RBM_NONE))
{
candidates = prevRegBit;
found = true;
#ifdef DEBUG
*registerScore = THIS_ASSIGNED;
#endif
}
}
else
{
prevRegBit = RBM_NONE;
}
if (!found && (candidates == RBM_NONE))
{
assert(refPosition->RegOptional());
currentInterval->assignedReg = nullptr;
return RBM_NONE;
}
// TODO-Cleanup: Previously, the "reverseSelect" stress mode reversed the order of the heuristics.
// It needs to be re-engineered with this refactoring.
// In non-debug builds, this will simply get optimized away
bool reverseSelect = false;
#ifdef DEBUG
reverseSelect = linearScan->doReverseSelect();
#endif // DEBUG
freeCandidates = linearScan->getFreeCandidates(candidates, regType);
// If no free candidates, then double check if refPosition is an actual ref.
if (freeCandidates == RBM_NONE)
{
// We won't spill if this refPosition is not an actual ref.
if (!refPosition->IsActualRef())
{
currentInterval->assignedReg = nullptr;
return RBM_NONE;
}
}
else
{
// Set the 'matchingConstants' set.
if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType))
{
matchingConstants = linearScan->getMatchingConstants(candidates, currentInterval, refPosition);
}
}
#define IF_FOUND_GOTO_DONE \
if (found) \
goto Selection_Done;
#ifdef DEBUG
HeuristicFn fn;
for (int orderId = 0; orderId < REGSELECT_HEURISTIC_COUNT; orderId++)
{
IF_FOUND_GOTO_DONE
RegisterScore heuristicToApply = RegSelectionOrder[orderId];
if (mappingTable->Lookup(heuristicToApply, &fn))
{
(this->*fn)();
if (found)
{
*registerScore = heuristicToApply;
}
#if TRACK_LSRA_STATS
INTRACK_STATS_IF(found, linearScan->updateLsraStat(linearScan->getLsraStatFromScore(heuristicToApply),
refPosition->bbNum));
#endif // TRACK_LSRA_STATS
}
else
{
assert(!"Unexpected heuristic value!");
}
}
#else // RELEASE
// In release, just invoke the default order
#define REG_SEL_DEF(stat, value, shortname, orderSeqId) \
try_##stat(); \
IF_FOUND_GOTO_DONE
#include "lsra_score.h"
#undef REG_SEL_DEF
#endif // DEBUG
#undef IF_FOUND_GOTO_DONE
Selection_Done:
if (skipAllocation)
{
return RBM_NONE;
}
calculateCoversSets();
assert(found && isSingleRegister(candidates));
foundRegBit = candidates;
return candidates;
}
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/lsraarm64.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Register Requirements for ARM64 XX
XX XX
XX This encapsulates all the logic for setting register requirements for XX
XX the ARM64 architecture. XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARM64
#include "jit.h"
#include "sideeffects.h"
#include "lower.h"
//------------------------------------------------------------------------
// BuildNode: Build the RefPositions for for a node
//
// Arguments:
// treeNode - the node of interest
//
// Return Value:
// The number of sources consumed by this node.
//
// Notes:
// Preconditions:
// LSRA Has been initialized.
//
// Postconditions:
// RefPositions have been built for all the register defs and uses required
// for this node.
//
int LinearScan::BuildNode(GenTree* tree)
{
assert(!tree->isContained());
int srcCount;
int dstCount = 0;
regMaskTP dstCandidates = RBM_NONE;
regMaskTP killMask = RBM_NONE;
bool isLocalDefUse = false;
// Reset the build-related members of LinearScan.
clearBuildState();
// Set the default dstCount. This may be modified below.
if (tree->IsValue())
{
dstCount = 1;
if (tree->IsUnusedValue())
{
isLocalDefUse = true;
}
}
else
{
dstCount = 0;
}
switch (tree->OperGet())
{
default:
srcCount = BuildSimple(tree);
break;
case GT_LCL_VAR:
// We make a final determination about whether a GT_LCL_VAR is a candidate or contained
// after liveness. In either case we don't build any uses or defs. Otherwise, this is a
// load of a stack-based local into a register and we'll fall through to the general
// local case below.
if (checkContainedOrCandidateLclVar(tree->AsLclVar()))
{
return 0;
}
FALLTHROUGH;
case GT_LCL_FLD:
{
srcCount = 0;
#ifdef FEATURE_SIMD
// Need an additional register to read upper 4 bytes of Vector3.
if (tree->TypeGet() == TYP_SIMD12)
{
// We need an internal register different from targetReg in which 'tree' produces its result
// because both targetReg and internal reg will be in use at the same time.
buildInternalFloatRegisterDefForNode(tree, allSIMDRegs());
setInternalRegsDelayFree = true;
buildInternalRegisterUses();
}
#endif
BuildDef(tree);
}
break;
case GT_STORE_LCL_VAR:
if (tree->IsMultiRegLclVar() && isCandidateMultiRegLclVar(tree->AsLclVar()))
{
dstCount = compiler->lvaGetDesc(tree->AsLclVar())->lvFieldCnt;
}
FALLTHROUGH;
case GT_STORE_LCL_FLD:
srcCount = BuildStoreLoc(tree->AsLclVarCommon());
break;
case GT_FIELD_LIST:
// These should always be contained. We don't correctly allocate or
// generate code for a non-contained GT_FIELD_LIST.
noway_assert(!"Non-contained GT_FIELD_LIST");
srcCount = 0;
break;
case GT_ARGPLACE:
case GT_NO_OP:
case GT_START_NONGC:
srcCount = 0;
assert(dstCount == 0);
break;
case GT_PROF_HOOK:
srcCount = 0;
assert(dstCount == 0);
killMask = getKillSetForProfilerHook();
BuildDefsWithKills(tree, 0, RBM_NONE, killMask);
break;
case GT_START_PREEMPTGC:
// This kills GC refs in callee save regs
srcCount = 0;
assert(dstCount == 0);
BuildDefsWithKills(tree, 0, RBM_NONE, RBM_NONE);
break;
case GT_CNS_DBL:
{
GenTreeDblCon* dblConst = tree->AsDblCon();
double constValue = dblConst->AsDblCon()->gtDconVal;
if (emitter::emitIns_valid_imm_for_fmov(constValue))
{
// Directly encode constant to instructions.
}
else
{
// Reserve int to load constant from memory (IF_LARGELDC)
buildInternalIntRegisterDefForNode(tree);
buildInternalRegisterUses();
}
}
FALLTHROUGH;
case GT_CNS_INT:
{
srcCount = 0;
assert(dstCount == 1);
RefPosition* def = BuildDef(tree);
def->getInterval()->isConstant = true;
}
break;
case GT_BOX:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
srcCount = 0;
assert(dstCount == 0);
unreached();
break;
case GT_RETURN:
srcCount = BuildReturn(tree);
killMask = getKillSetForReturn();
BuildDefsWithKills(tree, 0, RBM_NONE, killMask);
break;
case GT_RETFILT:
assert(dstCount == 0);
if (tree->TypeGet() == TYP_VOID)
{
srcCount = 0;
}
else
{
assert(tree->TypeGet() == TYP_INT);
srcCount = 1;
BuildUse(tree->gtGetOp1(), RBM_INTRET);
}
break;
case GT_NOP:
// A GT_NOP is either a passthrough (if it is void, or if it has
// a child), but must be considered to produce a dummy value if it
// has a type but no child.
srcCount = 0;
if (tree->TypeGet() != TYP_VOID && tree->gtGetOp1() == nullptr)
{
assert(dstCount == 1);
BuildDef(tree);
}
else
{
assert(dstCount == 0);
}
break;
case GT_KEEPALIVE:
assert(dstCount == 0);
srcCount = BuildOperandUses(tree->gtGetOp1());
break;
case GT_JTRUE:
srcCount = 0;
assert(dstCount == 0);
break;
case GT_JMP:
srcCount = 0;
assert(dstCount == 0);
break;
case GT_SWITCH:
// This should never occur since switch nodes must not be visible at this
// point in the JIT.
srcCount = 0;
noway_assert(!"Switch must be lowered at this point");
break;
case GT_JMPTABLE:
srcCount = 0;
assert(dstCount == 1);
BuildDef(tree);
break;
case GT_SWITCH_TABLE:
buildInternalIntRegisterDefForNode(tree);
srcCount = BuildBinaryUses(tree->AsOp());
assert(dstCount == 0);
break;
case GT_ASG:
noway_assert(!"We should never hit any assignment operator in lowering");
srcCount = 0;
break;
case GT_ADD:
case GT_SUB:
if (varTypeIsFloating(tree->TypeGet()))
{
// overflow operations aren't supported on float/double types.
assert(!tree->gtOverflow());
// No implicit conversions at this stage as the expectation is that
// everything is made explicit by adding casts.
assert(tree->gtGetOp1()->TypeGet() == tree->gtGetOp2()->TypeGet());
}
FALLTHROUGH;
case GT_ADDEX:
case GT_AND:
case GT_AND_NOT:
case GT_OR:
case GT_XOR:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROR:
srcCount = BuildBinaryUses(tree->AsOp());
assert(dstCount == 1);
BuildDef(tree);
break;
case GT_BFIZ:
assert(tree->gtGetOp1()->OperIs(GT_CAST));
srcCount = BuildOperandUses(tree->gtGetOp1()->gtGetOp1());
BuildDef(tree);
break;
case GT_RETURNTRAP:
// this just turns into a compare of its child with an int
// + a conditional call
BuildUse(tree->gtGetOp1());
srcCount = 1;
assert(dstCount == 0);
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC);
BuildDefsWithKills(tree, 0, RBM_NONE, killMask);
break;
case GT_MOD:
case GT_UMOD:
NYI_IF(varTypeIsFloating(tree->TypeGet()), "FP Remainder in ARM64");
assert(!"Shouldn't see an integer typed GT_MOD node in ARM64");
srcCount = 0;
break;
case GT_MUL:
if (tree->gtOverflow())
{
// Need a register different from target reg to check for overflow.
buildInternalIntRegisterDefForNode(tree);
setInternalRegsDelayFree = true;
}
FALLTHROUGH;
case GT_DIV:
case GT_MULHI:
case GT_MUL_LONG:
case GT_UDIV:
{
srcCount = BuildBinaryUses(tree->AsOp());
buildInternalRegisterUses();
assert(dstCount == 1);
BuildDef(tree);
}
break;
case GT_INTRINSIC:
{
switch (tree->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Max:
case NI_System_Math_Min:
assert(varTypeIsFloating(tree->gtGetOp1()));
assert(varTypeIsFloating(tree->gtGetOp2()));
assert(tree->gtGetOp1()->TypeIs(tree->TypeGet()));
srcCount = BuildBinaryUses(tree->AsOp());
assert(dstCount == 1);
BuildDef(tree);
break;
case NI_System_Math_Abs:
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
case NI_System_Math_Sqrt:
assert(varTypeIsFloating(tree->gtGetOp1()));
assert(tree->gtGetOp1()->TypeIs(tree->TypeGet()));
BuildUse(tree->gtGetOp1());
srcCount = 1;
assert(dstCount == 1);
BuildDef(tree);
break;
default:
unreached();
}
}
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
srcCount = BuildSIMD(tree->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
srcCount = BuildHWIntrinsic(tree->AsHWIntrinsic(), &dstCount);
break;
#endif // FEATURE_HW_INTRINSICS
case GT_CAST:
assert(dstCount == 1);
srcCount = BuildCast(tree->AsCast());
break;
case GT_NEG:
case GT_NOT:
BuildUse(tree->gtGetOp1());
srcCount = 1;
assert(dstCount == 1);
BuildDef(tree);
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_JCMP:
srcCount = BuildCmp(tree);
break;
case GT_CKFINITE:
srcCount = 1;
assert(dstCount == 1);
buildInternalIntRegisterDefForNode(tree);
BuildUse(tree->gtGetOp1());
BuildDef(tree);
buildInternalRegisterUses();
break;
case GT_CMPXCHG:
{
GenTreeCmpXchg* cmpXchgNode = tree->AsCmpXchg();
srcCount = cmpXchgNode->gtOpComparand->isContained() ? 2 : 3;
assert(dstCount == 1);
if (!compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
// For ARMv8 exclusives requires a single internal register
buildInternalIntRegisterDefForNode(tree);
}
// For ARMv8 exclusives the lifetime of the addr and data must be extended because
// it may be used used multiple during retries
// For ARMv8.1 atomic cas the lifetime of the addr and data must be extended to prevent
// them being reused as the target register which must be destroyed early
RefPosition* locationUse = BuildUse(tree->AsCmpXchg()->gtOpLocation);
setDelayFree(locationUse);
RefPosition* valueUse = BuildUse(tree->AsCmpXchg()->gtOpValue);
setDelayFree(valueUse);
if (!cmpXchgNode->gtOpComparand->isContained())
{
RefPosition* comparandUse = BuildUse(tree->AsCmpXchg()->gtOpComparand);
// For ARMv8 exclusives the lifetime of the comparand must be extended because
// it may be used used multiple during retries
if (!compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
setDelayFree(comparandUse);
}
}
// Internals may not collide with target
setInternalRegsDelayFree = true;
buildInternalRegisterUses();
BuildDef(tree);
}
break;
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
case GT_XCHG:
{
assert(dstCount == (tree->TypeGet() == TYP_VOID) ? 0 : 1);
srcCount = tree->gtGetOp2()->isContained() ? 1 : 2;
if (!compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
// GT_XCHG requires a single internal register; the others require two.
buildInternalIntRegisterDefForNode(tree);
if (tree->OperGet() != GT_XCHG)
{
buildInternalIntRegisterDefForNode(tree);
}
}
else if (tree->OperIs(GT_XAND))
{
// for ldclral we need an internal register.
buildInternalIntRegisterDefForNode(tree);
}
assert(!tree->gtGetOp1()->isContained());
RefPosition* op1Use = BuildUse(tree->gtGetOp1());
RefPosition* op2Use = nullptr;
if (!tree->gtGetOp2()->isContained())
{
op2Use = BuildUse(tree->gtGetOp2());
}
// For ARMv8 exclusives the lifetime of the addr and data must be extended because
// it may be used used multiple during retries
if (!compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
// Internals may not collide with target
if (dstCount == 1)
{
setDelayFree(op1Use);
if (op2Use != nullptr)
{
setDelayFree(op2Use);
}
setInternalRegsDelayFree = true;
}
buildInternalRegisterUses();
}
if (dstCount == 1)
{
BuildDef(tree);
}
}
break;
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
srcCount = BuildPutArgSplit(tree->AsPutArgSplit());
dstCount = tree->AsPutArgSplit()->gtNumRegs;
break;
#endif // FEATURE_ARG_SPLIT
case GT_PUTARG_STK:
srcCount = BuildPutArgStk(tree->AsPutArgStk());
break;
case GT_PUTARG_REG:
srcCount = BuildPutArgReg(tree->AsUnOp());
break;
case GT_CALL:
srcCount = BuildCall(tree->AsCall());
if (tree->AsCall()->HasMultiRegRetVal())
{
dstCount = tree->AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
break;
case GT_ADDR:
{
// For a GT_ADDR, the child node should not be evaluated into a register
GenTree* child = tree->gtGetOp1();
assert(!isCandidateLocalRef(child));
assert(child->isContained());
assert(dstCount == 1);
srcCount = 0;
BuildDef(tree);
}
break;
case GT_BLK:
// These should all be eliminated prior to Lowering.
assert(!"Non-store block node in Lowering");
srcCount = 0;
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
srcCount = BuildBlockStore(tree->AsBlk());
break;
case GT_INIT_VAL:
// Always a passthrough of its child's value.
assert(!"INIT_VAL should always be contained");
srcCount = 0;
break;
case GT_LCLHEAP:
{
assert(dstCount == 1);
// Need a variable number of temp regs (see genLclHeap() in codegenarm64.cpp):
// Here '-' means don't care.
//
// Size? Init Memory? # temp regs
// 0 - 0
// const and <=UnrollLimit - 0
// const and <PageSize No 0
// >UnrollLimit Yes 0
// Non-const Yes 0
// Non-const No 2
//
GenTree* size = tree->gtGetOp1();
if (size->IsCnsIntOrI())
{
assert(size->isContained());
srcCount = 0;
size_t sizeVal = size->AsIntCon()->gtIconVal;
if (sizeVal != 0)
{
// Compute the amount of memory to properly STACK_ALIGN.
// Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size.
// This should also help in debugging as we can examine the original size specified with
// localloc.
sizeVal = AlignUp(sizeVal, STACK_ALIGN);
if (sizeVal <= LCLHEAP_UNROLL_LIMIT)
{
// Need no internal registers
}
else if (!compiler->info.compInitMem)
{
// No need to initialize allocated stack space.
if (sizeVal < compiler->eeGetPageSize())
{
// Need no internal registers
}
else
{
// We need two registers: regCnt and RegTmp
buildInternalIntRegisterDefForNode(tree);
buildInternalIntRegisterDefForNode(tree);
}
}
}
}
else
{
srcCount = 1;
if (!compiler->info.compInitMem)
{
buildInternalIntRegisterDefForNode(tree);
buildInternalIntRegisterDefForNode(tree);
}
}
if (!size->isContained())
{
BuildUse(size);
}
buildInternalRegisterUses();
BuildDef(tree);
}
break;
case GT_BOUNDS_CHECK:
{
GenTreeBoundsChk* node = tree->AsBoundsChk();
// Consumes arrLen & index - has no result
assert(dstCount == 0);
srcCount = BuildOperandUses(node->GetIndex());
srcCount += BuildOperandUses(node->GetArrayLength());
}
break;
case GT_ARR_ELEM:
// These must have been lowered to GT_ARR_INDEX
noway_assert(!"We should never see a GT_ARR_ELEM in lowering");
srcCount = 0;
assert(dstCount == 0);
break;
case GT_ARR_INDEX:
{
srcCount = 2;
assert(dstCount == 1);
buildInternalIntRegisterDefForNode(tree);
setInternalRegsDelayFree = true;
// For GT_ARR_INDEX, the lifetime of the arrObj must be extended because it is actually used multiple
// times while the result is being computed.
RefPosition* arrObjUse = BuildUse(tree->AsArrIndex()->ArrObj());
setDelayFree(arrObjUse);
BuildUse(tree->AsArrIndex()->IndexExpr());
buildInternalRegisterUses();
BuildDef(tree);
}
break;
case GT_ARR_OFFSET:
// This consumes the offset, if any, the arrObj and the effective index,
// and produces the flattened offset for this dimension.
srcCount = 2;
if (!tree->AsArrOffs()->gtOffset->isContained())
{
BuildUse(tree->AsArrOffs()->gtOffset);
srcCount++;
}
BuildUse(tree->AsArrOffs()->gtIndex);
BuildUse(tree->AsArrOffs()->gtArrObj);
assert(dstCount == 1);
buildInternalIntRegisterDefForNode(tree);
buildInternalRegisterUses();
BuildDef(tree);
break;
case GT_LEA:
{
GenTreeAddrMode* lea = tree->AsAddrMode();
GenTree* base = lea->Base();
GenTree* index = lea->Index();
int cns = lea->Offset();
// This LEA is instantiating an address, so we set up the srcCount here.
srcCount = 0;
if (base != nullptr)
{
srcCount++;
BuildUse(base);
}
if (index != nullptr)
{
srcCount++;
if (index->OperIs(GT_BFIZ) && index->isContained())
{
GenTreeCast* cast = index->gtGetOp1()->AsCast();
assert(cast->isContained() && (cns == 0));
BuildUse(cast->CastOp());
}
else
{
BuildUse(index);
}
}
assert(dstCount == 1);
// On ARM64 we may need a single internal register
// (when both conditions are true then we still only need a single internal register)
if ((index != nullptr) && (cns != 0))
{
// ARM64 does not support both Index and offset so we need an internal register
buildInternalIntRegisterDefForNode(tree);
}
else if (!emitter::emitIns_valid_imm_for_add(cns, EA_8BYTE))
{
// This offset can't be contained in the add instruction, so we need an internal register
buildInternalIntRegisterDefForNode(tree);
}
buildInternalRegisterUses();
BuildDef(tree);
}
break;
case GT_STOREIND:
{
assert(dstCount == 0);
if (compiler->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(tree))
{
srcCount = BuildGCWriteBarrier(tree);
break;
}
srcCount = BuildIndir(tree->AsIndir());
if (!tree->gtGetOp2()->isContained())
{
BuildUse(tree->gtGetOp2());
srcCount++;
}
}
break;
case GT_NULLCHECK:
case GT_IND:
assert(dstCount == (tree->OperIs(GT_NULLCHECK) ? 0 : 1));
srcCount = BuildIndir(tree->AsIndir());
break;
case GT_CATCH_ARG:
srcCount = 0;
assert(dstCount == 1);
BuildDef(tree, RBM_EXCEPTION_OBJECT);
break;
case GT_CLS_VAR:
srcCount = 0;
// GT_CLS_VAR, by the time we reach the backend, must always
// be a pure use.
// It will produce a result of the type of the
// node, and use an internal register for the address.
assert(dstCount == 1);
assert((tree->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEASG)) == 0);
buildInternalIntRegisterDefForNode(tree);
buildInternalRegisterUses();
BuildDef(tree);
break;
case GT_INDEX_ADDR:
assert(dstCount == 1);
srcCount = BuildBinaryUses(tree->AsOp());
buildInternalIntRegisterDefForNode(tree);
buildInternalRegisterUses();
BuildDef(tree);
break;
} // end switch (tree->OperGet())
if (tree->IsUnusedValue() && (dstCount != 0))
{
isLocalDefUse = true;
}
// We need to be sure that we've set srcCount and dstCount appropriately
assert((dstCount < 2) || tree->IsMultiRegNode());
assert(isLocalDefUse == (tree->IsValue() && tree->IsUnusedValue()));
assert(!tree->IsUnusedValue() || (dstCount != 0));
assert(dstCount == tree->GetRegisterDstCount(compiler));
return srcCount;
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// BuildSIMD: Set the NodeInfo for a GT_SIMD tree.
//
// Arguments:
// tree - The GT_SIMD node of interest
//
// Return Value:
// The number of sources consumed by this node.
//
int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
{
int srcCount = 0;
// Only SIMDIntrinsicInit can be contained
if (simdTree->isContained())
{
assert(simdTree->GetSIMDIntrinsicId() == SIMDIntrinsicInit);
}
int dstCount = simdTree->IsValue() ? 1 : 0;
assert(dstCount == 1);
bool buildUses = true;
switch (simdTree->GetSIMDIntrinsicId())
{
case SIMDIntrinsicInit:
case SIMDIntrinsicCast:
// No special handling required.
break;
case SIMDIntrinsicSub:
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
// No special handling required.
break;
case SIMDIntrinsicInitN:
{
var_types baseType = simdTree->GetSimdBaseType();
srcCount = (short)(simdTree->GetSimdSize() / genTypeSize(baseType));
assert(simdTree->GetOperandCount() == static_cast<size_t>(srcCount));
if (varTypeIsFloating(simdTree->GetSimdBaseType()))
{
// Need an internal register to stitch together all the values into a single vector in a SIMD reg.
buildInternalFloatRegisterDefForNode(simdTree);
}
for (GenTree* operand : simdTree->Operands())
{
assert(operand->TypeIs(baseType));
assert(!operand->isContained());
BuildUse(operand);
}
buildUses = false;
break;
}
case SIMDIntrinsicInitArray:
// We have an array and an index, which may be contained.
break;
case SIMDIntrinsicInitArrayX:
case SIMDIntrinsicInitFixed:
case SIMDIntrinsicCopyToArray:
case SIMDIntrinsicCopyToArrayX:
case SIMDIntrinsicNone:
case SIMDIntrinsicHWAccel:
case SIMDIntrinsicInvalid:
assert(!"These intrinsics should not be seen during register allocation");
FALLTHROUGH;
default:
noway_assert(!"Unimplemented SIMD node type.");
unreached();
}
if (buildUses)
{
assert(srcCount == 0);
srcCount = BuildOperandUses(simdTree->Op(1));
if ((simdTree->GetOperandCount() == 2) && !simdTree->Op(2)->isContained())
{
srcCount += BuildOperandUses(simdTree->Op(2));
}
}
assert(internalCount <= MaxInternalCount);
buildInternalRegisterUses();
if (dstCount == 1)
{
BuildDef(simdTree);
}
else
{
assert(dstCount == 0);
}
return srcCount;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
#include "hwintrinsic.h"
//------------------------------------------------------------------------
// BuildHWIntrinsic: Set the NodeInfo for a GT_HWINTRINSIC tree.
//
// Arguments:
// tree - The GT_HWINTRINSIC node of interest
// pDstCount - OUT parameter - the number of registers defined for the given node
//
// Return Value:
// The number of sources consumed by this node.
//
int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCount)
{
assert(pDstCount != nullptr);
const HWIntrinsic intrin(intrinsicTree);
int srcCount = 0;
int dstCount = 0;
if (HWIntrinsicInfo::IsMultiReg(intrin.id))
{
dstCount = intrinsicTree->GetMultiRegCount();
}
else if (intrinsicTree->IsValue())
{
dstCount = 1;
}
const bool hasImmediateOperand = HWIntrinsicInfo::HasImmediateOperand(intrin.id);
if (hasImmediateOperand && !HWIntrinsicInfo::NoJmpTableImm(intrin.id))
{
// We may need to allocate an additional general-purpose register when an intrinsic has a non-const immediate
// operand and the intrinsic does not have an alternative non-const fallback form.
// However, for a case when the operand can take only two possible values - zero and one
// the codegen can use cbnz to do conditional branch, so such register is not needed.
bool needBranchTargetReg = false;
int immLowerBound = 0;
int immUpperBound = 0;
if (intrin.category == HW_Category_SIMDByIndexedElement)
{
var_types indexedElementOpType;
if (intrin.numOperands == 3)
{
indexedElementOpType = intrin.op2->TypeGet();
}
else
{
assert(intrin.numOperands == 4);
indexedElementOpType = intrin.op3->TypeGet();
}
assert(varTypeIsSIMD(indexedElementOpType));
const unsigned int indexedElementSimdSize = genTypeSize(indexedElementOpType);
HWIntrinsicInfo::lookupImmBounds(intrin.id, indexedElementSimdSize, intrin.baseType, &immLowerBound,
&immUpperBound);
}
else
{
HWIntrinsicInfo::lookupImmBounds(intrin.id, intrinsicTree->GetSimdSize(), intrin.baseType, &immLowerBound,
&immUpperBound);
}
if ((immLowerBound != 0) || (immUpperBound != 1))
{
if ((intrin.category == HW_Category_SIMDByIndexedElement) ||
(intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate))
{
switch (intrin.numOperands)
{
case 4:
needBranchTargetReg = !intrin.op4->isContainedIntOrIImmed();
break;
case 3:
needBranchTargetReg = !intrin.op3->isContainedIntOrIImmed();
break;
case 2:
needBranchTargetReg = !intrin.op2->isContainedIntOrIImmed();
break;
default:
unreached();
}
}
else
{
switch (intrin.id)
{
case NI_AdvSimd_DuplicateSelectedScalarToVector64:
case NI_AdvSimd_DuplicateSelectedScalarToVector128:
case NI_AdvSimd_Extract:
case NI_AdvSimd_Insert:
case NI_AdvSimd_InsertScalar:
case NI_AdvSimd_LoadAndInsertScalar:
case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128:
needBranchTargetReg = !intrin.op2->isContainedIntOrIImmed();
break;
case NI_AdvSimd_ExtractVector64:
case NI_AdvSimd_ExtractVector128:
case NI_AdvSimd_StoreSelectedScalar:
needBranchTargetReg = !intrin.op3->isContainedIntOrIImmed();
break;
case NI_AdvSimd_Arm64_InsertSelectedScalar:
assert(intrin.op2->isContainedIntOrIImmed());
assert(intrin.op4->isContainedIntOrIImmed());
break;
default:
unreached();
}
}
}
if (needBranchTargetReg)
{
buildInternalIntRegisterDefForNode(intrinsicTree);
}
}
// Determine whether this is an RMW operation where op2+ must be marked delayFree so that it
// is not allocated the same register as the target.
const bool isRMW = intrinsicTree->isRMWHWIntrinsic(compiler);
bool tgtPrefOp1 = false;
if (intrin.op1 != nullptr)
{
bool simdRegToSimdRegMove = false;
if ((intrin.id == NI_Vector64_CreateScalarUnsafe) || (intrin.id == NI_Vector128_CreateScalarUnsafe))
{
simdRegToSimdRegMove = varTypeIsFloating(intrin.op1);
}
else if (intrin.id == NI_AdvSimd_Arm64_DuplicateToVector64)
{
simdRegToSimdRegMove = (intrin.op1->TypeGet() == TYP_DOUBLE);
}
else if ((intrin.id == NI_Vector64_ToScalar) || (intrin.id == NI_Vector128_ToScalar))
{
simdRegToSimdRegMove = varTypeIsFloating(intrinsicTree);
}
// If we have an RMW intrinsic or an intrinsic with simple move semantic between two SIMD registers,
// we want to preference op1Reg to the target if op1 is not contained.
if (isRMW || simdRegToSimdRegMove)
{
tgtPrefOp1 = !intrin.op1->isContained();
}
if (intrinsicTree->OperIsMemoryLoadOrStore())
{
srcCount += BuildAddrUses(intrin.op1);
}
else if (tgtPrefOp1)
{
tgtPrefUse = BuildUse(intrin.op1);
srcCount++;
}
else
{
srcCount += BuildOperandUses(intrin.op1);
}
}
if ((intrin.category == HW_Category_SIMDByIndexedElement) && (genTypeSize(intrin.baseType) == 2))
{
// Some "Advanced SIMD scalar x indexed element" and "Advanced SIMD vector x indexed element" instructions (e.g.
// "MLA (by element)") have encoding that restricts what registers that can be used for the indexed element when
// the element size is H (i.e. 2 bytes).
assert(intrin.op2 != nullptr);
if ((intrin.op4 != nullptr) || ((intrin.op3 != nullptr) && !hasImmediateOperand))
{
if (isRMW)
{
srcCount += BuildDelayFreeUses(intrin.op2, nullptr);
srcCount += BuildDelayFreeUses(intrin.op3, nullptr, RBM_ASIMD_INDEXED_H_ELEMENT_ALLOWED_REGS);
}
else
{
srcCount += BuildOperandUses(intrin.op2);
srcCount += BuildOperandUses(intrin.op3, RBM_ASIMD_INDEXED_H_ELEMENT_ALLOWED_REGS);
}
if (intrin.op4 != nullptr)
{
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op4));
srcCount += BuildOperandUses(intrin.op4);
}
}
else
{
assert(!isRMW);
srcCount += BuildOperandUses(intrin.op2, RBM_ASIMD_INDEXED_H_ELEMENT_ALLOWED_REGS);
if (intrin.op3 != nullptr)
{
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op3));
srcCount += BuildOperandUses(intrin.op3);
}
}
}
else if (intrin.op2 != nullptr)
{
// RMW intrinsic operands doesn't have to be delayFree when they can be assigned the same register as op1Reg
// (i.e. a register that corresponds to read-modify-write operand) and one of them is the last use.
assert(intrin.op1 != nullptr);
bool forceOp2DelayFree = false;
if ((intrin.id == NI_Vector64_GetElement) || (intrin.id == NI_Vector128_GetElement))
{
if (!intrin.op2->IsCnsIntOrI() && (!intrin.op1->isContained() || intrin.op1->OperIsLocal()))
{
// If the index is not a constant and the object is not contained or is a local
// we will need a general purpose register to calculate the address
// internal register must not clobber input index
// TODO-Cleanup: An internal register will never clobber a source; this code actually
// ensures that the index (op2) doesn't interfere with the target.
buildInternalIntRegisterDefForNode(intrinsicTree);
forceOp2DelayFree = true;
}
if (!intrin.op2->IsCnsIntOrI() && !intrin.op1->isContained())
{
// If the index is not a constant or op1 is in register,
// we will use the SIMD temp location to store the vector.
var_types requiredSimdTempType = (intrin.id == NI_Vector64_GetElement) ? TYP_SIMD8 : TYP_SIMD16;
compiler->getSIMDInitTempVarNum(requiredSimdTempType);
}
}
if (forceOp2DelayFree)
{
srcCount += BuildDelayFreeUses(intrin.op2);
}
else
{
srcCount += isRMW ? BuildDelayFreeUses(intrin.op2, intrin.op1) : BuildOperandUses(intrin.op2);
}
if (intrin.op3 != nullptr)
{
srcCount += isRMW ? BuildDelayFreeUses(intrin.op3, intrin.op1) : BuildOperandUses(intrin.op3);
if (intrin.op4 != nullptr)
{
srcCount += isRMW ? BuildDelayFreeUses(intrin.op4, intrin.op1) : BuildOperandUses(intrin.op4);
}
}
}
buildInternalRegisterUses();
if ((dstCount == 1) || (dstCount == 2))
{
BuildDef(intrinsicTree);
if (dstCount == 2)
{
BuildDef(intrinsicTree, RBM_NONE, 1);
}
}
else
{
assert(dstCount == 0);
}
*pDstCount = dstCount;
return srcCount;
}
#endif
#endif // TARGET_ARM64
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Register Requirements for ARM64 XX
XX XX
XX This encapsulates all the logic for setting register requirements for XX
XX the ARM64 architecture. XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARM64
#include "jit.h"
#include "sideeffects.h"
#include "lower.h"
//------------------------------------------------------------------------
// BuildNode: Build the RefPositions for for a node
//
// Arguments:
// treeNode - the node of interest
//
// Return Value:
// The number of sources consumed by this node.
//
// Notes:
// Preconditions:
// LSRA Has been initialized.
//
// Postconditions:
// RefPositions have been built for all the register defs and uses required
// for this node.
//
int LinearScan::BuildNode(GenTree* tree)
{
assert(!tree->isContained());
int srcCount;
int dstCount = 0;
regMaskTP dstCandidates = RBM_NONE;
regMaskTP killMask = RBM_NONE;
bool isLocalDefUse = false;
// Reset the build-related members of LinearScan.
clearBuildState();
// Set the default dstCount. This may be modified below.
if (tree->IsValue())
{
dstCount = 1;
if (tree->IsUnusedValue())
{
isLocalDefUse = true;
}
}
else
{
dstCount = 0;
}
switch (tree->OperGet())
{
default:
srcCount = BuildSimple(tree);
break;
case GT_LCL_VAR:
// We make a final determination about whether a GT_LCL_VAR is a candidate or contained
// after liveness. In either case we don't build any uses or defs. Otherwise, this is a
// load of a stack-based local into a register and we'll fall through to the general
// local case below.
if (checkContainedOrCandidateLclVar(tree->AsLclVar()))
{
return 0;
}
FALLTHROUGH;
case GT_LCL_FLD:
{
srcCount = 0;
#ifdef FEATURE_SIMD
// Need an additional register to read upper 4 bytes of Vector3.
if (tree->TypeGet() == TYP_SIMD12)
{
// We need an internal register different from targetReg in which 'tree' produces its result
// because both targetReg and internal reg will be in use at the same time.
buildInternalFloatRegisterDefForNode(tree, allSIMDRegs());
setInternalRegsDelayFree = true;
buildInternalRegisterUses();
}
#endif
BuildDef(tree);
}
break;
case GT_STORE_LCL_VAR:
if (tree->IsMultiRegLclVar() && isCandidateMultiRegLclVar(tree->AsLclVar()))
{
dstCount = compiler->lvaGetDesc(tree->AsLclVar())->lvFieldCnt;
}
FALLTHROUGH;
case GT_STORE_LCL_FLD:
srcCount = BuildStoreLoc(tree->AsLclVarCommon());
break;
case GT_FIELD_LIST:
// These should always be contained. We don't correctly allocate or
// generate code for a non-contained GT_FIELD_LIST.
noway_assert(!"Non-contained GT_FIELD_LIST");
srcCount = 0;
break;
case GT_ARGPLACE:
case GT_NO_OP:
case GT_START_NONGC:
srcCount = 0;
assert(dstCount == 0);
break;
case GT_PROF_HOOK:
srcCount = 0;
assert(dstCount == 0);
killMask = getKillSetForProfilerHook();
BuildDefsWithKills(tree, 0, RBM_NONE, killMask);
break;
case GT_START_PREEMPTGC:
// This kills GC refs in callee save regs
srcCount = 0;
assert(dstCount == 0);
BuildDefsWithKills(tree, 0, RBM_NONE, RBM_NONE);
break;
case GT_CNS_DBL:
{
GenTreeDblCon* dblConst = tree->AsDblCon();
double constValue = dblConst->AsDblCon()->gtDconVal;
if (emitter::emitIns_valid_imm_for_fmov(constValue))
{
// Directly encode constant to instructions.
}
else
{
// Reserve int to load constant from memory (IF_LARGELDC)
buildInternalIntRegisterDefForNode(tree);
buildInternalRegisterUses();
}
}
FALLTHROUGH;
case GT_CNS_INT:
{
srcCount = 0;
assert(dstCount == 1);
RefPosition* def = BuildDef(tree);
def->getInterval()->isConstant = true;
}
break;
case GT_BOX:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
srcCount = 0;
assert(dstCount == 0);
unreached();
break;
case GT_RETURN:
srcCount = BuildReturn(tree);
killMask = getKillSetForReturn();
BuildDefsWithKills(tree, 0, RBM_NONE, killMask);
break;
case GT_RETFILT:
assert(dstCount == 0);
if (tree->TypeGet() == TYP_VOID)
{
srcCount = 0;
}
else
{
assert(tree->TypeGet() == TYP_INT);
srcCount = 1;
BuildUse(tree->gtGetOp1(), RBM_INTRET);
}
break;
case GT_NOP:
// A GT_NOP is either a passthrough (if it is void, or if it has
// a child), but must be considered to produce a dummy value if it
// has a type but no child.
srcCount = 0;
if (tree->TypeGet() != TYP_VOID && tree->gtGetOp1() == nullptr)
{
assert(dstCount == 1);
BuildDef(tree);
}
else
{
assert(dstCount == 0);
}
break;
case GT_KEEPALIVE:
assert(dstCount == 0);
srcCount = BuildOperandUses(tree->gtGetOp1());
break;
case GT_JTRUE:
srcCount = 0;
assert(dstCount == 0);
break;
case GT_JMP:
srcCount = 0;
assert(dstCount == 0);
break;
case GT_SWITCH:
// This should never occur since switch nodes must not be visible at this
// point in the JIT.
srcCount = 0;
noway_assert(!"Switch must be lowered at this point");
break;
case GT_JMPTABLE:
srcCount = 0;
assert(dstCount == 1);
BuildDef(tree);
break;
case GT_SWITCH_TABLE:
buildInternalIntRegisterDefForNode(tree);
srcCount = BuildBinaryUses(tree->AsOp());
assert(dstCount == 0);
break;
case GT_ASG:
noway_assert(!"We should never hit any assignment operator in lowering");
srcCount = 0;
break;
case GT_ADD:
case GT_SUB:
if (varTypeIsFloating(tree->TypeGet()))
{
// overflow operations aren't supported on float/double types.
assert(!tree->gtOverflow());
// No implicit conversions at this stage as the expectation is that
// everything is made explicit by adding casts.
assert(tree->gtGetOp1()->TypeGet() == tree->gtGetOp2()->TypeGet());
}
FALLTHROUGH;
case GT_ADDEX:
case GT_AND:
case GT_AND_NOT:
case GT_OR:
case GT_XOR:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROR:
srcCount = BuildBinaryUses(tree->AsOp());
assert(dstCount == 1);
BuildDef(tree);
break;
case GT_BFIZ:
assert(tree->gtGetOp1()->OperIs(GT_CAST));
srcCount = BuildOperandUses(tree->gtGetOp1()->gtGetOp1());
BuildDef(tree);
break;
case GT_RETURNTRAP:
// this just turns into a compare of its child with an int
// + a conditional call
BuildUse(tree->gtGetOp1());
srcCount = 1;
assert(dstCount == 0);
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC);
BuildDefsWithKills(tree, 0, RBM_NONE, killMask);
break;
case GT_MOD:
case GT_UMOD:
NYI_IF(varTypeIsFloating(tree->TypeGet()), "FP Remainder in ARM64");
assert(!"Shouldn't see an integer typed GT_MOD node in ARM64");
srcCount = 0;
break;
case GT_MUL:
if (tree->gtOverflow())
{
// Need a register different from target reg to check for overflow.
buildInternalIntRegisterDefForNode(tree);
setInternalRegsDelayFree = true;
}
FALLTHROUGH;
case GT_DIV:
case GT_MULHI:
case GT_MUL_LONG:
case GT_UDIV:
{
srcCount = BuildBinaryUses(tree->AsOp());
buildInternalRegisterUses();
assert(dstCount == 1);
BuildDef(tree);
}
break;
case GT_INTRINSIC:
{
switch (tree->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Max:
case NI_System_Math_Min:
assert(varTypeIsFloating(tree->gtGetOp1()));
assert(varTypeIsFloating(tree->gtGetOp2()));
assert(tree->gtGetOp1()->TypeIs(tree->TypeGet()));
srcCount = BuildBinaryUses(tree->AsOp());
assert(dstCount == 1);
BuildDef(tree);
break;
case NI_System_Math_Abs:
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
case NI_System_Math_Sqrt:
assert(varTypeIsFloating(tree->gtGetOp1()));
assert(tree->gtGetOp1()->TypeIs(tree->TypeGet()));
BuildUse(tree->gtGetOp1());
srcCount = 1;
assert(dstCount == 1);
BuildDef(tree);
break;
default:
unreached();
}
}
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
srcCount = BuildSIMD(tree->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
srcCount = BuildHWIntrinsic(tree->AsHWIntrinsic(), &dstCount);
break;
#endif // FEATURE_HW_INTRINSICS
case GT_CAST:
assert(dstCount == 1);
srcCount = BuildCast(tree->AsCast());
break;
case GT_NEG:
case GT_NOT:
BuildUse(tree->gtGetOp1());
srcCount = 1;
assert(dstCount == 1);
BuildDef(tree);
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_JCMP:
srcCount = BuildCmp(tree);
break;
case GT_CKFINITE:
srcCount = 1;
assert(dstCount == 1);
buildInternalIntRegisterDefForNode(tree);
BuildUse(tree->gtGetOp1());
BuildDef(tree);
buildInternalRegisterUses();
break;
case GT_CMPXCHG:
{
GenTreeCmpXchg* cmpXchgNode = tree->AsCmpXchg();
srcCount = cmpXchgNode->gtOpComparand->isContained() ? 2 : 3;
assert(dstCount == 1);
if (!compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
// For ARMv8 exclusives requires a single internal register
buildInternalIntRegisterDefForNode(tree);
}
// For ARMv8 exclusives the lifetime of the addr and data must be extended because
// it may be used used multiple during retries
// For ARMv8.1 atomic cas the lifetime of the addr and data must be extended to prevent
// them being reused as the target register which must be destroyed early
RefPosition* locationUse = BuildUse(tree->AsCmpXchg()->gtOpLocation);
setDelayFree(locationUse);
RefPosition* valueUse = BuildUse(tree->AsCmpXchg()->gtOpValue);
setDelayFree(valueUse);
if (!cmpXchgNode->gtOpComparand->isContained())
{
RefPosition* comparandUse = BuildUse(tree->AsCmpXchg()->gtOpComparand);
// For ARMv8 exclusives the lifetime of the comparand must be extended because
// it may be used used multiple during retries
if (!compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
setDelayFree(comparandUse);
}
}
// Internals may not collide with target
setInternalRegsDelayFree = true;
buildInternalRegisterUses();
BuildDef(tree);
}
break;
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
case GT_XCHG:
{
assert(dstCount == (tree->TypeGet() == TYP_VOID) ? 0 : 1);
srcCount = tree->gtGetOp2()->isContained() ? 1 : 2;
if (!compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
// GT_XCHG requires a single internal register; the others require two.
buildInternalIntRegisterDefForNode(tree);
if (tree->OperGet() != GT_XCHG)
{
buildInternalIntRegisterDefForNode(tree);
}
}
else if (tree->OperIs(GT_XAND))
{
// for ldclral we need an internal register.
buildInternalIntRegisterDefForNode(tree);
}
assert(!tree->gtGetOp1()->isContained());
RefPosition* op1Use = BuildUse(tree->gtGetOp1());
RefPosition* op2Use = nullptr;
if (!tree->gtGetOp2()->isContained())
{
op2Use = BuildUse(tree->gtGetOp2());
}
// For ARMv8 exclusives the lifetime of the addr and data must be extended because
// it may be used used multiple during retries
if (!compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
// Internals may not collide with target
if (dstCount == 1)
{
setDelayFree(op1Use);
if (op2Use != nullptr)
{
setDelayFree(op2Use);
}
setInternalRegsDelayFree = true;
}
buildInternalRegisterUses();
}
if (dstCount == 1)
{
BuildDef(tree);
}
}
break;
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
srcCount = BuildPutArgSplit(tree->AsPutArgSplit());
dstCount = tree->AsPutArgSplit()->gtNumRegs;
break;
#endif // FEATURE_ARG_SPLIT
case GT_PUTARG_STK:
srcCount = BuildPutArgStk(tree->AsPutArgStk());
break;
case GT_PUTARG_REG:
srcCount = BuildPutArgReg(tree->AsUnOp());
break;
case GT_CALL:
srcCount = BuildCall(tree->AsCall());
if (tree->AsCall()->HasMultiRegRetVal())
{
dstCount = tree->AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
break;
case GT_ADDR:
{
// For a GT_ADDR, the child node should not be evaluated into a register
GenTree* child = tree->gtGetOp1();
assert(!isCandidateLocalRef(child));
assert(child->isContained());
assert(dstCount == 1);
srcCount = 0;
BuildDef(tree);
}
break;
case GT_BLK:
// These should all be eliminated prior to Lowering.
assert(!"Non-store block node in Lowering");
srcCount = 0;
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
srcCount = BuildBlockStore(tree->AsBlk());
break;
case GT_INIT_VAL:
// Always a passthrough of its child's value.
assert(!"INIT_VAL should always be contained");
srcCount = 0;
break;
case GT_LCLHEAP:
{
assert(dstCount == 1);
// Need a variable number of temp regs (see genLclHeap() in codegenarm64.cpp):
// Here '-' means don't care.
//
// Size? Init Memory? # temp regs
// 0 - 0
// const and <=UnrollLimit - 0
// const and <PageSize No 0
// >UnrollLimit Yes 0
// Non-const Yes 0
// Non-const No 2
//
GenTree* size = tree->gtGetOp1();
if (size->IsCnsIntOrI())
{
assert(size->isContained());
srcCount = 0;
size_t sizeVal = size->AsIntCon()->gtIconVal;
if (sizeVal != 0)
{
// Compute the amount of memory to properly STACK_ALIGN.
// Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size.
// This should also help in debugging as we can examine the original size specified with
// localloc.
sizeVal = AlignUp(sizeVal, STACK_ALIGN);
if (sizeVal <= LCLHEAP_UNROLL_LIMIT)
{
// Need no internal registers
}
else if (!compiler->info.compInitMem)
{
// No need to initialize allocated stack space.
if (sizeVal < compiler->eeGetPageSize())
{
// Need no internal registers
}
else
{
// We need two registers: regCnt and RegTmp
buildInternalIntRegisterDefForNode(tree);
buildInternalIntRegisterDefForNode(tree);
}
}
}
}
else
{
srcCount = 1;
if (!compiler->info.compInitMem)
{
buildInternalIntRegisterDefForNode(tree);
buildInternalIntRegisterDefForNode(tree);
}
}
if (!size->isContained())
{
BuildUse(size);
}
buildInternalRegisterUses();
BuildDef(tree);
}
break;
case GT_BOUNDS_CHECK:
{
GenTreeBoundsChk* node = tree->AsBoundsChk();
// Consumes arrLen & index - has no result
assert(dstCount == 0);
srcCount = BuildOperandUses(node->GetIndex());
srcCount += BuildOperandUses(node->GetArrayLength());
}
break;
case GT_ARR_ELEM:
// These must have been lowered to GT_ARR_INDEX
noway_assert(!"We should never see a GT_ARR_ELEM in lowering");
srcCount = 0;
assert(dstCount == 0);
break;
case GT_ARR_INDEX:
{
srcCount = 2;
assert(dstCount == 1);
buildInternalIntRegisterDefForNode(tree);
setInternalRegsDelayFree = true;
// For GT_ARR_INDEX, the lifetime of the arrObj must be extended because it is actually used multiple
// times while the result is being computed.
RefPosition* arrObjUse = BuildUse(tree->AsArrIndex()->ArrObj());
setDelayFree(arrObjUse);
BuildUse(tree->AsArrIndex()->IndexExpr());
buildInternalRegisterUses();
BuildDef(tree);
}
break;
case GT_ARR_OFFSET:
// This consumes the offset, if any, the arrObj and the effective index,
// and produces the flattened offset for this dimension.
srcCount = 2;
if (!tree->AsArrOffs()->gtOffset->isContained())
{
BuildUse(tree->AsArrOffs()->gtOffset);
srcCount++;
}
BuildUse(tree->AsArrOffs()->gtIndex);
BuildUse(tree->AsArrOffs()->gtArrObj);
assert(dstCount == 1);
buildInternalIntRegisterDefForNode(tree);
buildInternalRegisterUses();
BuildDef(tree);
break;
case GT_LEA:
{
GenTreeAddrMode* lea = tree->AsAddrMode();
GenTree* base = lea->Base();
GenTree* index = lea->Index();
int cns = lea->Offset();
// This LEA is instantiating an address, so we set up the srcCount here.
srcCount = 0;
if (base != nullptr)
{
srcCount++;
BuildUse(base);
}
if (index != nullptr)
{
srcCount++;
if (index->OperIs(GT_BFIZ) && index->isContained())
{
GenTreeCast* cast = index->gtGetOp1()->AsCast();
assert(cast->isContained() && (cns == 0));
BuildUse(cast->CastOp());
}
else
{
BuildUse(index);
}
}
assert(dstCount == 1);
// On ARM64 we may need a single internal register
// (when both conditions are true then we still only need a single internal register)
if ((index != nullptr) && (cns != 0))
{
// ARM64 does not support both Index and offset so we need an internal register
buildInternalIntRegisterDefForNode(tree);
}
else if (!emitter::emitIns_valid_imm_for_add(cns, EA_8BYTE))
{
// This offset can't be contained in the add instruction, so we need an internal register
buildInternalIntRegisterDefForNode(tree);
}
buildInternalRegisterUses();
BuildDef(tree);
}
break;
case GT_STOREIND:
{
assert(dstCount == 0);
if (compiler->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(tree))
{
srcCount = BuildGCWriteBarrier(tree);
break;
}
srcCount = BuildIndir(tree->AsIndir());
if (!tree->gtGetOp2()->isContained())
{
BuildUse(tree->gtGetOp2());
srcCount++;
}
}
break;
case GT_NULLCHECK:
case GT_IND:
assert(dstCount == (tree->OperIs(GT_NULLCHECK) ? 0 : 1));
srcCount = BuildIndir(tree->AsIndir());
break;
case GT_CATCH_ARG:
srcCount = 0;
assert(dstCount == 1);
BuildDef(tree, RBM_EXCEPTION_OBJECT);
break;
case GT_CLS_VAR:
srcCount = 0;
// GT_CLS_VAR, by the time we reach the backend, must always
// be a pure use.
// It will produce a result of the type of the
// node, and use an internal register for the address.
assert(dstCount == 1);
assert((tree->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEASG)) == 0);
buildInternalIntRegisterDefForNode(tree);
buildInternalRegisterUses();
BuildDef(tree);
break;
case GT_INDEX_ADDR:
assert(dstCount == 1);
srcCount = BuildBinaryUses(tree->AsOp());
buildInternalIntRegisterDefForNode(tree);
buildInternalRegisterUses();
BuildDef(tree);
break;
} // end switch (tree->OperGet())
if (tree->IsUnusedValue() && (dstCount != 0))
{
isLocalDefUse = true;
}
// We need to be sure that we've set srcCount and dstCount appropriately
assert((dstCount < 2) || tree->IsMultiRegNode());
assert(isLocalDefUse == (tree->IsValue() && tree->IsUnusedValue()));
assert(!tree->IsUnusedValue() || (dstCount != 0));
assert(dstCount == tree->GetRegisterDstCount(compiler));
return srcCount;
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// BuildSIMD: Set the NodeInfo for a GT_SIMD tree.
//
// Arguments:
// tree - The GT_SIMD node of interest
//
// Return Value:
// The number of sources consumed by this node.
//
int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
{
int srcCount = 0;
// Only SIMDIntrinsicInit can be contained
if (simdTree->isContained())
{
assert(simdTree->GetSIMDIntrinsicId() == SIMDIntrinsicInit);
}
int dstCount = simdTree->IsValue() ? 1 : 0;
assert(dstCount == 1);
bool buildUses = true;
switch (simdTree->GetSIMDIntrinsicId())
{
case SIMDIntrinsicInit:
case SIMDIntrinsicCast:
// No special handling required.
break;
case SIMDIntrinsicSub:
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
// No special handling required.
break;
case SIMDIntrinsicInitN:
{
var_types baseType = simdTree->GetSimdBaseType();
srcCount = (short)(simdTree->GetSimdSize() / genTypeSize(baseType));
assert(simdTree->GetOperandCount() == static_cast<size_t>(srcCount));
if (varTypeIsFloating(simdTree->GetSimdBaseType()))
{
// Need an internal register to stitch together all the values into a single vector in a SIMD reg.
buildInternalFloatRegisterDefForNode(simdTree);
}
for (GenTree* operand : simdTree->Operands())
{
assert(operand->TypeIs(baseType));
assert(!operand->isContained());
BuildUse(operand);
}
buildUses = false;
break;
}
case SIMDIntrinsicInitArray:
// We have an array and an index, which may be contained.
break;
case SIMDIntrinsicInitArrayX:
case SIMDIntrinsicInitFixed:
case SIMDIntrinsicCopyToArray:
case SIMDIntrinsicCopyToArrayX:
case SIMDIntrinsicNone:
case SIMDIntrinsicHWAccel:
case SIMDIntrinsicInvalid:
assert(!"These intrinsics should not be seen during register allocation");
FALLTHROUGH;
default:
noway_assert(!"Unimplemented SIMD node type.");
unreached();
}
if (buildUses)
{
assert(srcCount == 0);
srcCount = BuildOperandUses(simdTree->Op(1));
if ((simdTree->GetOperandCount() == 2) && !simdTree->Op(2)->isContained())
{
srcCount += BuildOperandUses(simdTree->Op(2));
}
}
assert(internalCount <= MaxInternalCount);
buildInternalRegisterUses();
if (dstCount == 1)
{
BuildDef(simdTree);
}
else
{
assert(dstCount == 0);
}
return srcCount;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
#include "hwintrinsic.h"
//------------------------------------------------------------------------
// BuildHWIntrinsic: Set the NodeInfo for a GT_HWINTRINSIC tree.
//
// Arguments:
// tree - The GT_HWINTRINSIC node of interest
// pDstCount - OUT parameter - the number of registers defined for the given node
//
// Return Value:
// The number of sources consumed by this node.
//
int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCount)
{
assert(pDstCount != nullptr);
const HWIntrinsic intrin(intrinsicTree);
int srcCount = 0;
int dstCount = 0;
if (HWIntrinsicInfo::IsMultiReg(intrin.id))
{
dstCount = intrinsicTree->GetMultiRegCount(compiler);
}
else if (intrinsicTree->IsValue())
{
dstCount = 1;
}
const bool hasImmediateOperand = HWIntrinsicInfo::HasImmediateOperand(intrin.id);
if (hasImmediateOperand && !HWIntrinsicInfo::NoJmpTableImm(intrin.id))
{
// We may need to allocate an additional general-purpose register when an intrinsic has a non-const immediate
// operand and the intrinsic does not have an alternative non-const fallback form.
// However, for a case when the operand can take only two possible values - zero and one
// the codegen can use cbnz to do conditional branch, so such register is not needed.
bool needBranchTargetReg = false;
int immLowerBound = 0;
int immUpperBound = 0;
if (intrin.category == HW_Category_SIMDByIndexedElement)
{
var_types indexedElementOpType;
if (intrin.numOperands == 3)
{
indexedElementOpType = intrin.op2->TypeGet();
}
else
{
assert(intrin.numOperands == 4);
indexedElementOpType = intrin.op3->TypeGet();
}
assert(varTypeIsSIMD(indexedElementOpType));
const unsigned int indexedElementSimdSize = genTypeSize(indexedElementOpType);
HWIntrinsicInfo::lookupImmBounds(intrin.id, indexedElementSimdSize, intrin.baseType, &immLowerBound,
&immUpperBound);
}
else
{
HWIntrinsicInfo::lookupImmBounds(intrin.id, intrinsicTree->GetSimdSize(), intrin.baseType, &immLowerBound,
&immUpperBound);
}
if ((immLowerBound != 0) || (immUpperBound != 1))
{
if ((intrin.category == HW_Category_SIMDByIndexedElement) ||
(intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate))
{
switch (intrin.numOperands)
{
case 4:
needBranchTargetReg = !intrin.op4->isContainedIntOrIImmed();
break;
case 3:
needBranchTargetReg = !intrin.op3->isContainedIntOrIImmed();
break;
case 2:
needBranchTargetReg = !intrin.op2->isContainedIntOrIImmed();
break;
default:
unreached();
}
}
else
{
switch (intrin.id)
{
case NI_AdvSimd_DuplicateSelectedScalarToVector64:
case NI_AdvSimd_DuplicateSelectedScalarToVector128:
case NI_AdvSimd_Extract:
case NI_AdvSimd_Insert:
case NI_AdvSimd_InsertScalar:
case NI_AdvSimd_LoadAndInsertScalar:
case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128:
needBranchTargetReg = !intrin.op2->isContainedIntOrIImmed();
break;
case NI_AdvSimd_ExtractVector64:
case NI_AdvSimd_ExtractVector128:
case NI_AdvSimd_StoreSelectedScalar:
needBranchTargetReg = !intrin.op3->isContainedIntOrIImmed();
break;
case NI_AdvSimd_Arm64_InsertSelectedScalar:
assert(intrin.op2->isContainedIntOrIImmed());
assert(intrin.op4->isContainedIntOrIImmed());
break;
default:
unreached();
}
}
}
if (needBranchTargetReg)
{
buildInternalIntRegisterDefForNode(intrinsicTree);
}
}
// Determine whether this is an RMW operation where op2+ must be marked delayFree so that it
// is not allocated the same register as the target.
const bool isRMW = intrinsicTree->isRMWHWIntrinsic(compiler);
bool tgtPrefOp1 = false;
if (intrin.op1 != nullptr)
{
bool simdRegToSimdRegMove = false;
if ((intrin.id == NI_Vector64_CreateScalarUnsafe) || (intrin.id == NI_Vector128_CreateScalarUnsafe))
{
simdRegToSimdRegMove = varTypeIsFloating(intrin.op1);
}
else if (intrin.id == NI_AdvSimd_Arm64_DuplicateToVector64)
{
simdRegToSimdRegMove = (intrin.op1->TypeGet() == TYP_DOUBLE);
}
else if ((intrin.id == NI_Vector64_ToScalar) || (intrin.id == NI_Vector128_ToScalar))
{
simdRegToSimdRegMove = varTypeIsFloating(intrinsicTree);
}
// If we have an RMW intrinsic or an intrinsic with simple move semantic between two SIMD registers,
// we want to preference op1Reg to the target if op1 is not contained.
if (isRMW || simdRegToSimdRegMove)
{
tgtPrefOp1 = !intrin.op1->isContained();
}
if (intrinsicTree->OperIsMemoryLoadOrStore())
{
srcCount += BuildAddrUses(intrin.op1);
}
else if (tgtPrefOp1)
{
tgtPrefUse = BuildUse(intrin.op1);
srcCount++;
}
else
{
srcCount += BuildOperandUses(intrin.op1);
}
}
if ((intrin.category == HW_Category_SIMDByIndexedElement) && (genTypeSize(intrin.baseType) == 2))
{
// Some "Advanced SIMD scalar x indexed element" and "Advanced SIMD vector x indexed element" instructions (e.g.
// "MLA (by element)") have encoding that restricts what registers that can be used for the indexed element when
// the element size is H (i.e. 2 bytes).
assert(intrin.op2 != nullptr);
if ((intrin.op4 != nullptr) || ((intrin.op3 != nullptr) && !hasImmediateOperand))
{
if (isRMW)
{
srcCount += BuildDelayFreeUses(intrin.op2, nullptr);
srcCount += BuildDelayFreeUses(intrin.op3, nullptr, RBM_ASIMD_INDEXED_H_ELEMENT_ALLOWED_REGS);
}
else
{
srcCount += BuildOperandUses(intrin.op2);
srcCount += BuildOperandUses(intrin.op3, RBM_ASIMD_INDEXED_H_ELEMENT_ALLOWED_REGS);
}
if (intrin.op4 != nullptr)
{
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op4));
srcCount += BuildOperandUses(intrin.op4);
}
}
else
{
assert(!isRMW);
srcCount += BuildOperandUses(intrin.op2, RBM_ASIMD_INDEXED_H_ELEMENT_ALLOWED_REGS);
if (intrin.op3 != nullptr)
{
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op3));
srcCount += BuildOperandUses(intrin.op3);
}
}
}
else if (intrin.op2 != nullptr)
{
// RMW intrinsic operands doesn't have to be delayFree when they can be assigned the same register as op1Reg
// (i.e. a register that corresponds to read-modify-write operand) and one of them is the last use.
assert(intrin.op1 != nullptr);
bool forceOp2DelayFree = false;
if ((intrin.id == NI_Vector64_GetElement) || (intrin.id == NI_Vector128_GetElement))
{
if (!intrin.op2->IsCnsIntOrI() && (!intrin.op1->isContained() || intrin.op1->OperIsLocal()))
{
// If the index is not a constant and the object is not contained or is a local
// we will need a general purpose register to calculate the address
// internal register must not clobber input index
// TODO-Cleanup: An internal register will never clobber a source; this code actually
// ensures that the index (op2) doesn't interfere with the target.
buildInternalIntRegisterDefForNode(intrinsicTree);
forceOp2DelayFree = true;
}
if (!intrin.op2->IsCnsIntOrI() && !intrin.op1->isContained())
{
// If the index is not a constant or op1 is in register,
// we will use the SIMD temp location to store the vector.
var_types requiredSimdTempType = (intrin.id == NI_Vector64_GetElement) ? TYP_SIMD8 : TYP_SIMD16;
compiler->getSIMDInitTempVarNum(requiredSimdTempType);
}
}
if (forceOp2DelayFree)
{
srcCount += BuildDelayFreeUses(intrin.op2);
}
else
{
srcCount += isRMW ? BuildDelayFreeUses(intrin.op2, intrin.op1) : BuildOperandUses(intrin.op2);
}
if (intrin.op3 != nullptr)
{
srcCount += isRMW ? BuildDelayFreeUses(intrin.op3, intrin.op1) : BuildOperandUses(intrin.op3);
if (intrin.op4 != nullptr)
{
srcCount += isRMW ? BuildDelayFreeUses(intrin.op4, intrin.op1) : BuildOperandUses(intrin.op4);
}
}
}
buildInternalRegisterUses();
if ((dstCount == 1) || (dstCount == 2))
{
BuildDef(intrinsicTree);
if (dstCount == 2)
{
BuildDef(intrinsicTree, RBM_NONE, 1);
}
}
else
{
assert(dstCount == 0);
}
*pDstCount = dstCount;
return srcCount;
}
#endif
#endif // TARGET_ARM64
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/lsrabuild.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Interval and RefPosition Building XX
XX XX
XX This contains the logic for constructing Intervals and RefPositions that XX
XX is common across architectures. See lsra{arch}.cpp for the architecture- XX
XX specific methods for building. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "lsra.h"
//------------------------------------------------------------------------
// RefInfoList
//------------------------------------------------------------------------
// removeListNode - retrieve the RefInfoListNode for the given GenTree node
//
// Notes:
// The BuildNode methods use this helper to retrieve the RefPositions for child nodes
// from the useList being constructed. Note that, if the user knows the order of the operands,
// it is expected that they should just retrieve them directly.
RefInfoListNode* RefInfoList::removeListNode(GenTree* node)
{
RefInfoListNode* prevListNode = nullptr;
for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next())
{
if (listNode->treeNode == node)
{
assert(listNode->ref->getMultiRegIdx() == 0);
return removeListNode(listNode, prevListNode);
}
prevListNode = listNode;
}
assert(!"removeListNode didn't find the node");
unreached();
}
//------------------------------------------------------------------------
// removeListNode - retrieve the RefInfoListNode for one reg of the given multireg GenTree node
//
// Notes:
// The BuildNode methods use this helper to retrieve the RefPositions for child nodes
// from the useList being constructed. Note that, if the user knows the order of the operands,
// it is expected that they should just retrieve them directly.
RefInfoListNode* RefInfoList::removeListNode(GenTree* node, unsigned multiRegIdx)
{
RefInfoListNode* prevListNode = nullptr;
for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next())
{
if ((listNode->treeNode == node) && (listNode->ref->getMultiRegIdx() == multiRegIdx))
{
return removeListNode(listNode, prevListNode);
}
prevListNode = listNode;
}
assert(!"removeListNode didn't find the node");
unreached();
}
//------------------------------------------------------------------------
// RefInfoListNodePool::RefInfoListNodePool:
// Creates a pool of `RefInfoListNode` values.
//
// Arguments:
// compiler - The compiler context.
// preallocate - The number of nodes to preallocate.
//
RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocate) : m_compiler(compiler)
{
if (preallocate > 0)
{
RefInfoListNode* preallocatedNodes = compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(preallocate);
RefInfoListNode* head = preallocatedNodes;
head->m_next = nullptr;
for (unsigned i = 1; i < preallocate; i++)
{
RefInfoListNode* node = &preallocatedNodes[i];
node->m_next = head;
head = node;
}
m_freeList = head;
}
}
//------------------------------------------------------------------------
// RefInfoListNodePool::GetNode: Fetches an unused node from the
// pool.
//
// Arguments:
// r - The `RefPosition` for the `RefInfo` value.
// t - The IR node for the `RefInfo` value
//
// Returns:
// A pooled or newly-allocated `RefInfoListNode`, depending on the
// contents of the pool.
RefInfoListNode* RefInfoListNodePool::GetNode(RefPosition* r, GenTree* t)
{
RefInfoListNode* head = m_freeList;
if (head == nullptr)
{
head = m_compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(1);
}
else
{
m_freeList = head->m_next;
}
head->ref = r;
head->treeNode = t;
head->m_next = nullptr;
return head;
}
//------------------------------------------------------------------------
// RefInfoListNodePool::ReturnNode: Returns a list of nodes to the node
// pool and clears the given list.
//
// Arguments:
// list - The list to return.
//
void RefInfoListNodePool::ReturnNode(RefInfoListNode* listNode)
{
listNode->m_next = m_freeList;
m_freeList = listNode;
}
//------------------------------------------------------------------------
// newInterval: Create a new Interval of the given RegisterType.
//
// Arguments:
// theRegisterType - The type of Interval to create.
//
// TODO-Cleanup: Consider adding an overload that takes a varDsc, and can appropriately
// set such fields as isStructField
//
Interval* LinearScan::newInterval(RegisterType theRegisterType)
{
intervals.emplace_back(theRegisterType, allRegs(theRegisterType));
Interval* newInt = &intervals.back();
#ifdef DEBUG
newInt->intervalIndex = static_cast<unsigned>(intervals.size() - 1);
#endif // DEBUG
DBEXEC(VERBOSE, newInt->dump());
return newInt;
}
//------------------------------------------------------------------------
// newRefPositionRaw: Create a new RefPosition
//
// Arguments:
// nodeLocation - The location of the reference.
// treeNode - The GenTree of the reference.
// refType - The type of reference
//
// Notes:
// This is used to create RefPositions for both RegRecords and Intervals,
// so it does only the common initialization.
//
RefPosition* LinearScan::newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType)
{
refPositions.emplace_back(curBBNum, nodeLocation, treeNode, refType);
RefPosition* newRP = &refPositions.back();
#ifdef DEBUG
newRP->rpNum = static_cast<unsigned>(refPositions.size() - 1);
#endif // DEBUG
return newRP;
}
//------------------------------------------------------------------------
// resolveConflictingDefAndUse: Resolve the situation where we have conflicting def and use
// register requirements on a single-def, single-use interval.
//
// Arguments:
// defRefPosition - The interval definition
// useRefPosition - The (sole) interval use
//
// Return Value:
// None.
//
// Assumptions:
// The two RefPositions are for the same interval, which is a tree-temp.
//
// Notes:
// We require some special handling for the case where the use is a "delayRegFree" case of a fixedReg.
// In that case, if we change the registerAssignment on the useRefPosition, we will lose the fact that,
// even if we assign a different register (and rely on codegen to do the copy), that fixedReg also needs
// to remain busy until the Def register has been allocated. In that case, we don't allow Case 1 or Case 4
// below.
// Here are the cases we consider (in this order):
// 1. If The defRefPosition specifies a single register, and there are no conflicting
// FixedReg uses of it between the def and use, we use that register, and the code generator
// will insert the copy. Note that it cannot be in use because there is a FixedRegRef for the def.
// 2. If the useRefPosition specifies a single register, and it is not in use, and there are no
// conflicting FixedReg uses of it between the def and use, we use that register, and the code generator
// will insert the copy.
// 3. If the defRefPosition specifies a single register (but there are conflicts, as determined
// in 1.), and there are no conflicts with the useRefPosition register (if it's a single register),
/// we set the register requirements on the defRefPosition to the use registers, and the
// code generator will insert a copy on the def. We can't rely on the code generator to put a copy
// on the use if it has multiple possible candidates, as it won't know which one has been allocated.
// 4. If the useRefPosition specifies a single register, and there are no conflicts with the register
// on the defRefPosition, we leave the register requirements on the defRefPosition as-is, and set
// the useRefPosition to the def registers, for similar reasons to case #3.
// 5. If both the defRefPosition and the useRefPosition specify single registers, but both have conflicts,
// We set the candiates on defRefPosition to be all regs of the appropriate type, and since they are
// single registers, codegen can insert the copy.
// 6. Finally, if the RefPositions specify disjoint subsets of the registers (or the use is fixed but
// has a conflict), we must insert a copy. The copy will be inserted before the use if the
// use is not fixed (in the fixed case, the code generator will insert the use).
//
// TODO-CQ: We get bad register allocation in case #3 in the situation where no register is
// available for the lifetime. We end up allocating a register that must be spilled, and it probably
// won't be the register that is actually defined by the target instruction. So, we have to copy it
// and THEN spill it. In this case, we should be using the def requirement. But we need to change
// the interface to this method a bit to make that work (e.g. returning a candidate set to use, but
// leaving the registerAssignment as-is on the def, so that if we find that we need to spill anyway
// we can use the fixed-reg on the def.
//
void LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition)
{
assert(!interval->isLocalVar);
RefPosition* useRefPosition = defRefPosition->nextRefPosition;
regMaskTP defRegAssignment = defRefPosition->registerAssignment;
regMaskTP useRegAssignment = useRefPosition->registerAssignment;
RegRecord* defRegRecord = nullptr;
RegRecord* useRegRecord = nullptr;
regNumber defReg = REG_NA;
regNumber useReg = REG_NA;
bool defRegConflict = ((defRegAssignment & useRegAssignment) == RBM_NONE);
bool useRegConflict = defRegConflict;
// If the useRefPosition is a "delayRegFree", we can't change the registerAssignment
// on it, or we will fail to ensure that the fixedReg is busy at the time the target
// (of the node that uses this interval) is allocated.
bool canChangeUseAssignment = !useRefPosition->isFixedRegRef || !useRefPosition->delayRegFree;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CONFLICT));
if (!canChangeUseAssignment)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_FIXED_DELAY_USE));
}
if (defRefPosition->isFixedRegRef && !defRegConflict)
{
defReg = defRefPosition->assignedReg();
defRegRecord = getRegisterRecord(defReg);
if (canChangeUseAssignment)
{
RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition;
assert(currFixedRegRefPosition != nullptr &&
currFixedRegRefPosition->nodeLocation == defRefPosition->nodeLocation);
if (currFixedRegRefPosition->nextRefPosition == nullptr ||
currFixedRegRefPosition->nextRefPosition->nodeLocation > useRefPosition->getRefEndLocation())
{
// This is case #1. Use the defRegAssignment
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE1));
useRefPosition->registerAssignment = defRegAssignment;
return;
}
else
{
defRegConflict = true;
}
}
}
if (useRefPosition->isFixedRegRef && !useRegConflict)
{
useReg = useRefPosition->assignedReg();
useRegRecord = getRegisterRecord(useReg);
// We know that useRefPosition is a fixed use, so the nextRefPosition must not be null.
RefPosition* nextFixedRegRefPosition = useRegRecord->getNextRefPosition();
assert(nextFixedRegRefPosition != nullptr &&
nextFixedRegRefPosition->nodeLocation <= useRefPosition->nodeLocation);
// First, check to see if there are any conflicting FixedReg references between the def and use.
if (nextFixedRegRefPosition->nodeLocation == useRefPosition->nodeLocation)
{
// OK, no conflicting FixedReg references.
// Now, check to see whether it is currently in use.
if (useRegRecord->assignedInterval != nullptr)
{
RefPosition* possiblyConflictingRef = useRegRecord->assignedInterval->recentRefPosition;
LsraLocation possiblyConflictingRefLocation = possiblyConflictingRef->getRefEndLocation();
if (possiblyConflictingRefLocation >= defRefPosition->nodeLocation)
{
useRegConflict = true;
}
}
if (!useRegConflict)
{
// This is case #2. Use the useRegAssignment
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE2, interval));
defRefPosition->registerAssignment = useRegAssignment;
return;
}
}
else
{
useRegConflict = true;
}
}
if (defRegRecord != nullptr && !useRegConflict)
{
// This is case #3.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE3, interval));
defRefPosition->registerAssignment = useRegAssignment;
return;
}
if (useRegRecord != nullptr && !defRegConflict && canChangeUseAssignment)
{
// This is case #4.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE4, interval));
useRefPosition->registerAssignment = defRegAssignment;
return;
}
if (defRegRecord != nullptr && useRegRecord != nullptr)
{
// This is case #5.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE5, interval));
RegisterType regType = interval->registerType;
assert((getRegisterType(interval, defRefPosition) == regType) &&
(getRegisterType(interval, useRefPosition) == regType));
regMaskTP candidates = allRegs(regType);
defRefPosition->registerAssignment = candidates;
defRefPosition->isFixedRegRef = false;
return;
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE6, interval));
return;
}
//------------------------------------------------------------------------
// applyCalleeSaveHeuristics: Set register preferences for an interval based on the given RefPosition
//
// Arguments:
// rp - The RefPosition of interest
//
// Notes:
// This is slightly more general than its name applies, and updates preferences not just
// for callee-save registers.
//
void LinearScan::applyCalleeSaveHeuristics(RefPosition* rp)
{
#ifdef TARGET_AMD64
if (compiler->opts.compDbgEnC)
{
// We only use RSI and RDI for EnC code, so we don't want to favor callee-save regs.
return;
}
#endif // TARGET_AMD64
Interval* theInterval = rp->getInterval();
#ifdef DEBUG
if (!doReverseCallerCallee())
#endif // DEBUG
{
// Set preferences so that this register set will be preferred for earlier refs
theInterval->mergeRegisterPreferences(rp->registerAssignment);
}
}
//------------------------------------------------------------------------
// checkConflictingDefUse: Ensure that we have consistent def/use on SDSU temps.
//
// Arguments:
// useRP - The use RefPosition of a tree temp (SDSU Interval)
//
// Notes:
// There are a couple of cases where this may over-constrain allocation:
// 1. In the case of a non-commutative rmw def (in which the rmw source must be delay-free), or
// 2. In the case where the defining node requires a temp distinct from the target (also a
// delay-free case).
// In those cases, if we propagate a single-register restriction from the consumer to the producer
// the delayed uses will not see a fixed reference in the PhysReg at that position, and may
// incorrectly allocate that register.
// TODO-CQ: This means that we may often require a copy at the use of this node's result.
// This case could be moved to BuildRefPositionsForNode, at the point where the def RefPosition is
// created, causing a RefTypeFixedReg to be added at that location. This, however, results in
// more PhysReg RefPositions (a throughput impact), and a large number of diffs that require
// further analysis to determine benefit.
// See Issue #11274.
//
void LinearScan::checkConflictingDefUse(RefPosition* useRP)
{
assert(useRP->refType == RefTypeUse);
Interval* theInterval = useRP->getInterval();
assert(!theInterval->isLocalVar);
RefPosition* defRP = theInterval->firstRefPosition;
// All defs must have a valid treeNode, but we check it below to be conservative.
assert(defRP->treeNode != nullptr);
regMaskTP prevAssignment = defRP->registerAssignment;
regMaskTP newAssignment = (prevAssignment & useRP->registerAssignment);
if (newAssignment != RBM_NONE)
{
if (!isSingleRegister(newAssignment) || !theInterval->hasInterferingUses)
{
defRP->registerAssignment = newAssignment;
}
}
else
{
theInterval->hasConflictingDefUse = true;
}
}
//------------------------------------------------------------------------
// associateRefPosWithInterval: Update the Interval based on the given RefPosition.
//
// Arguments:
// rp - The RefPosition of interest
//
// Notes:
// This is called at the time when 'rp' has just been created, so it becomes
// the nextRefPosition of the recentRefPosition, and both the recentRefPosition
// and lastRefPosition of its referent.
//
void LinearScan::associateRefPosWithInterval(RefPosition* rp)
{
Referenceable* theReferent = rp->referent;
if (theReferent != nullptr)
{
// All RefPositions except the dummy ones at the beginning of blocks
if (rp->isIntervalRef())
{
Interval* theInterval = rp->getInterval();
applyCalleeSaveHeuristics(rp);
if (theInterval->isLocalVar)
{
if (RefTypeIsUse(rp->refType))
{
RefPosition* const prevRP = theInterval->recentRefPosition;
if ((prevRP != nullptr) && (prevRP->bbNum == rp->bbNum))
{
prevRP->lastUse = false;
}
}
rp->lastUse = (rp->refType != RefTypeExpUse) && (rp->refType != RefTypeParamDef) &&
(rp->refType != RefTypeZeroInit) && !extendLifetimes();
}
else if (rp->refType == RefTypeUse)
{
checkConflictingDefUse(rp);
rp->lastUse = true;
}
}
RefPosition* prevRP = theReferent->recentRefPosition;
if (prevRP != nullptr)
{
prevRP->nextRefPosition = rp;
}
else
{
theReferent->firstRefPosition = rp;
}
theReferent->recentRefPosition = rp;
theReferent->lastRefPosition = rp;
}
else
{
assert((rp->refType == RefTypeBB) || (rp->refType == RefTypeKillGCRefs));
}
}
//---------------------------------------------------------------------------
// newRefPosition: allocate and initialize a new RefPosition.
//
// Arguments:
// reg - reg number that identifies RegRecord to be associated
// with this RefPosition
// theLocation - LSRA location of RefPosition
// theRefType - RefPosition type
// theTreeNode - GenTree node for which this RefPosition is created
// mask - Set of valid registers for this RefPosition
// multiRegIdx - register position if this RefPosition corresponds to a
// multi-reg call node.
//
// Return Value:
// a new RefPosition
//
RefPosition* LinearScan::newRefPosition(
regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask)
{
RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType);
RegRecord* regRecord = getRegisterRecord(reg);
newRP->setReg(regRecord);
newRP->registerAssignment = mask;
newRP->setMultiRegIdx(0);
newRP->setRegOptional(false);
// We can't have two RefPositions on a RegRecord at the same location, unless they are different types.
assert((regRecord->lastRefPosition == nullptr) || (regRecord->lastRefPosition->nodeLocation < theLocation) ||
(regRecord->lastRefPosition->refType != theRefType));
associateRefPosWithInterval(newRP);
DBEXEC(VERBOSE, newRP->dump(this));
return newRP;
}
//---------------------------------------------------------------------------
// newRefPosition: allocate and initialize a new RefPosition.
//
// Arguments:
// theInterval - interval to which RefPosition is associated with.
// theLocation - LSRA location of RefPosition
// theRefType - RefPosition type
// theTreeNode - GenTree node for which this RefPosition is created
// mask - Set of valid registers for this RefPosition
// multiRegIdx - register position if this RefPosition corresponds to a
// multi-reg call node.
//
// Return Value:
// a new RefPosition
//
RefPosition* LinearScan::newRefPosition(Interval* theInterval,
LsraLocation theLocation,
RefType theRefType,
GenTree* theTreeNode,
regMaskTP mask,
unsigned multiRegIdx /* = 0 */)
{
if (theInterval != nullptr)
{
if (mask == RBM_NONE)
{
mask = allRegs(theInterval->registerType);
}
}
else
{
assert(theRefType == RefTypeBB || theRefType == RefTypeKillGCRefs);
}
#ifdef DEBUG
if (theInterval != nullptr && regType(theInterval->registerType) == FloatRegisterType)
{
// In the case we're using floating point registers we must make sure
// this flag was set previously in the compiler since this will mandate
// whether LSRA will take into consideration FP reg killsets.
assert(compiler->compFloatingPointUsed || ((mask & RBM_FLT_CALLEE_SAVED) == 0));
}
#endif // DEBUG
// If this reference is constrained to a single register (and it's not a dummy
// or Kill reftype already), add a RefTypeFixedReg at this location so that its
// availability can be more accurately determined
bool isFixedRegister = isSingleRegister(mask);
bool insertFixedRef = false;
if (isFixedRegister)
{
// Insert a RefTypeFixedReg for any normal def or use (not ParamDef or BB),
// but not an internal use (it will already have a FixedRef for the def).
if ((theRefType == RefTypeDef) || ((theRefType == RefTypeUse) && !theInterval->isInternal))
{
insertFixedRef = true;
}
}
if (insertFixedRef)
{
regNumber physicalReg = genRegNumFromMask(mask);
RefPosition* pos = newRefPosition(physicalReg, theLocation, RefTypeFixedReg, nullptr, mask);
assert(theInterval != nullptr);
assert((allRegs(theInterval->registerType) & mask) != 0);
}
RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType);
newRP->setInterval(theInterval);
// Spill info
newRP->isFixedRegRef = isFixedRegister;
#ifndef TARGET_AMD64
// We don't need this for AMD because the PInvoke method epilog code is explicit
// at register allocation time.
if (theInterval != nullptr && theInterval->isLocalVar && compiler->compMethodRequiresPInvokeFrame() &&
theInterval->varNum == compiler->genReturnLocal)
{
mask &= ~(RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME);
noway_assert(mask != RBM_NONE);
}
#endif // !TARGET_AMD64
newRP->registerAssignment = mask;
newRP->setMultiRegIdx(multiRegIdx);
newRP->setRegOptional(false);
associateRefPosWithInterval(newRP);
if (RefTypeIsDef(newRP->refType))
{
assert(theInterval != nullptr);
theInterval->isSingleDef = theInterval->firstRefPosition == newRP;
}
DBEXEC(VERBOSE, newRP->dump(this));
return newRP;
}
//---------------------------------------------------------------------------
// newUseRefPosition: allocate and initialize a RefTypeUse RefPosition at currentLoc.
//
// Arguments:
// theInterval - interval to which RefPosition is associated with.
// theTreeNode - GenTree node for which this RefPosition is created
// mask - Set of valid registers for this RefPosition
// multiRegIdx - register position if this RefPosition corresponds to a
// multi-reg call node.
// minRegCount - Minimum number registers that needs to be ensured while
// constraining candidates for this ref position under
// LSRA stress. This is a DEBUG only arg.
//
// Return Value:
// a new RefPosition
//
// Notes:
// If the caller knows that 'theTreeNode' is NOT a candidate local, newRefPosition
// can/should be called directly.
//
RefPosition* LinearScan::newUseRefPosition(Interval* theInterval,
GenTree* theTreeNode,
regMaskTP mask,
unsigned multiRegIdx)
{
GenTree* treeNode = isCandidateLocalRef(theTreeNode) ? theTreeNode : nullptr;
RefPosition* pos = newRefPosition(theInterval, currentLoc, RefTypeUse, treeNode, mask, multiRegIdx);
if (theTreeNode->IsRegOptional())
{
pos->setRegOptional(true);
}
return pos;
}
//------------------------------------------------------------------------
// IsContainableMemoryOp: Checks whether this is a memory op that can be contained.
//
// Arguments:
// node - the node of interest.
//
// Return value:
// True if this will definitely be a memory reference that could be contained.
//
// Notes:
// This differs from the isMemoryOp() method on GenTree because it checks for
// the case of doNotEnregister local. This won't include locals that
// for some other reason do not become register candidates, nor those that get
// spilled.
// Also, because we usually call this before we redo dataflow, any new lclVars
// introduced after the last dataflow analysis will not yet be marked lvTracked,
// so we don't use that.
//
bool LinearScan::isContainableMemoryOp(GenTree* node)
{
if (node->isMemoryOp())
{
return true;
}
if (node->IsLocal())
{
if (!enregisterLocalVars)
{
return true;
}
const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar());
return varDsc->lvDoNotEnregister;
}
return false;
}
//------------------------------------------------------------------------
// addRefsForPhysRegMask: Adds RefPositions of the given type for all the registers in 'mask'.
//
// Arguments:
// mask - the mask (set) of registers.
// currentLoc - the location at which they should be added
// refType - the type of refposition
// isLastUse - true IFF this is a last use of the register
//
void LinearScan::addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse)
{
if (refType == RefTypeKill)
{
// The mask identifies a set of registers that will be used during
// codegen. Mark these as modified here, so when we do final frame
// layout, we'll know about all these registers. This is especially
// important if mask contains callee-saved registers, which affect the
// frame size since we need to save/restore them. In the case where we
// have a copyBlk with GC pointers, can need to call the
// CORINFO_HELP_ASSIGN_BYREF helper, which kills callee-saved RSI and
// RDI, if LSRA doesn't assign RSI/RDI, they wouldn't get marked as
// modified until codegen, which is too late.
compiler->codeGen->regSet.rsSetRegsModified(mask DEBUGARG(true));
}
for (regNumber reg = REG_FIRST; mask; reg = REG_NEXT(reg), mask >>= 1)
{
if (mask & 1)
{
// This assumes that these are all "special" RefTypes that
// don't need to be recorded on the tree (hence treeNode is nullptr)
RefPosition* pos = newRefPosition(reg, currentLoc, refType, nullptr,
genRegMask(reg)); // This MUST occupy the physical register (obviously)
if (isLastUse)
{
pos->lastUse = true;
}
}
}
}
//------------------------------------------------------------------------
// getKillSetForStoreInd: Determine the liveness kill set for a GT_STOREIND node.
// If the GT_STOREIND will generate a write barrier, determine the specific kill
// set required by the case-specific, platform-specific write barrier. If no
// write barrier is required, the kill set will be RBM_NONE.
//
// Arguments:
// tree - the GT_STOREIND node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForStoreInd(GenTreeStoreInd* tree)
{
assert(tree->OperIs(GT_STOREIND));
regMaskTP killMask = RBM_NONE;
GenTree* data = tree->Data();
GCInfo::WriteBarrierForm writeBarrierForm = compiler->codeGen->gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
if (compiler->codeGen->genUseOptimizedWriteBarriers(writeBarrierForm))
{
// We can't determine the exact helper to be used at this point, because it depends on
// the allocated register for the `data` operand. However, all the (x86) optimized
// helpers have the same kill set: EDX. And note that currently, only x86 can return
// `true` for genUseOptimizedWriteBarriers().
killMask = RBM_CALLEE_TRASH_NOGC;
}
else
{
// Figure out which helper we're going to use, and then get the kill set for that helper.
CorInfoHelpFunc helper =
compiler->codeGen->genWriteBarrierHelperForWriteBarrierForm(tree, writeBarrierForm);
killMask = compiler->compHelperCallKillSet(helper);
}
}
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForShiftRotate: Determine the liveness kill set for a shift or rotate node.
//
// Arguments:
// shiftNode - the shift or rotate node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForShiftRotate(GenTreeOp* shiftNode)
{
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
assert(shiftNode->OperIsShiftOrRotate());
GenTree* shiftBy = shiftNode->gtGetOp2();
if (!shiftBy->isContained())
{
killMask = RBM_RCX;
}
#endif // TARGET_XARCH
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForMul: Determine the liveness kill set for a multiply node.
//
// Arguments:
// tree - the multiply node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForMul(GenTreeOp* mulNode)
{
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
assert(mulNode->OperIsMul());
if (!mulNode->OperIs(GT_MUL) || (((mulNode->gtFlags & GTF_UNSIGNED) != 0) && mulNode->gtOverflowEx()))
{
killMask = RBM_RAX | RBM_RDX;
}
#endif // TARGET_XARCH
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForModDiv: Determine the liveness kill set for a mod or div node.
//
// Arguments:
// tree - the mod or div node as a GenTreeOp
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForModDiv(GenTreeOp* node)
{
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
assert(node->OperIs(GT_MOD, GT_DIV, GT_UMOD, GT_UDIV));
if (!varTypeIsFloating(node->TypeGet()))
{
// Both RAX and RDX are killed by the operation
killMask = RBM_RAX | RBM_RDX;
}
#endif // TARGET_XARCH
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForCall: Determine the liveness kill set for a call node.
//
// Arguments:
// tree - the GenTreeCall node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForCall(GenTreeCall* call)
{
regMaskTP killMask = RBM_CALLEE_TRASH;
#ifdef TARGET_X86
if (compiler->compFloatingPointUsed)
{
if (call->TypeGet() == TYP_DOUBLE)
{
needDoubleTmpForFPCall = true;
}
else if (call->TypeGet() == TYP_FLOAT)
{
needFloatTmpForFPCall = true;
}
}
#endif // TARGET_X86
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
// if there is no FP used, we can ignore the FP kills
if (!compiler->compFloatingPointUsed)
{
killMask &= ~RBM_FLT_CALLEE_TRASH;
}
#ifdef TARGET_ARM
if (call->IsVirtualStub())
{
killMask |= compiler->virtualStubParamInfo->GetRegMask();
}
#else // !TARGET_ARM
// Verify that the special virtual stub call registers are in the kill mask.
// We don't just add them unconditionally to the killMask because for most architectures
// they are already in the RBM_CALLEE_TRASH set,
// and we don't want to introduce extra checks and calls in this hot function.
assert(!call->IsVirtualStub() ||
((killMask & compiler->virtualStubParamInfo->GetRegMask()) == compiler->virtualStubParamInfo->GetRegMask()));
#endif // !TARGET_ARM
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForBlockStore: Determine the liveness kill set for a block store node.
//
// Arguments:
// tree - the block store node as a GenTreeBlk
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForBlockStore(GenTreeBlk* blkNode)
{
assert(blkNode->OperIsStore());
regMaskTP killMask = RBM_NONE;
if ((blkNode->OperGet() == GT_STORE_OBJ) && blkNode->OperIsCopyBlkOp())
{
assert(blkNode->AsObj()->GetLayout()->HasGCPtr());
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_ASSIGN_BYREF);
}
else
{
bool isCopyBlk = varTypeIsStruct(blkNode->Data());
switch (blkNode->gtBlkOpKind)
{
#ifndef TARGET_X86
case GenTreeBlk::BlkOpKindHelper:
if (isCopyBlk)
{
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMCPY);
}
else
{
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET);
}
break;
#endif
#ifdef TARGET_XARCH
case GenTreeBlk::BlkOpKindRepInstr:
if (isCopyBlk)
{
// rep movs kills RCX, RDI and RSI
killMask = RBM_RCX | RBM_RDI | RBM_RSI;
}
else
{
// rep stos kills RCX and RDI.
// (Note that the Data() node, if not constant, will be assigned to
// RCX, but it's find that this kills it, as the value is not available
// after this node in any case.)
killMask = RBM_RDI | RBM_RCX;
}
break;
#endif
case GenTreeBlk::BlkOpKindUnroll:
case GenTreeBlk::BlkOpKindInvalid:
// for these 'gtBlkOpKind' kinds, we leave 'killMask' = RBM_NONE
break;
}
}
return killMask;
}
#ifdef FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// getKillSetForHWIntrinsic: Determine the liveness kill set for a GT_STOREIND node.
// If the GT_STOREIND will generate a write barrier, determine the specific kill
// set required by the case-specific, platform-specific write barrier. If no
// write barrier is required, the kill set will be RBM_NONE.
//
// Arguments:
// tree - the GT_STOREIND node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node)
{
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
switch (node->GetHWIntrinsicId())
{
case NI_SSE2_MaskMove:
// maskmovdqu uses edi as the implicit address register.
// Although it is set as the srcCandidate on the address, if there is also a fixed
// assignment for the definition of the address, resolveConflictingDefAndUse() may
// change the register assignment on the def or use of a tree temp (SDSU) when there
// is a conflict, and the FixedRef on edi won't be sufficient to ensure that another
// Interval will not be allocated there.
// Issue #17674 tracks this.
killMask = RBM_EDI;
break;
default:
// Leave killMask as RBM_NONE
break;
}
#endif // TARGET_XARCH
return killMask;
}
#endif // FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// getKillSetForReturn: Determine the liveness kill set for a return node.
//
// Arguments:
// NONE (this kill set is independent of the details of the specific return.)
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForReturn()
{
return compiler->compIsProfilerHookNeeded() ? compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_LEAVE)
: RBM_NONE;
}
//------------------------------------------------------------------------
// getKillSetForProfilerHook: Determine the liveness kill set for a profiler hook.
//
// Arguments:
// NONE (this kill set is independent of the details of the specific node.)
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForProfilerHook()
{
return compiler->compIsProfilerHookNeeded() ? compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_TAILCALL)
: RBM_NONE;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// getKillSetForNode: Return the registers killed by the given tree node.
//
// Arguments:
// tree - the tree for which the kill set is needed.
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForNode(GenTree* tree)
{
regMaskTP killMask = RBM_NONE;
switch (tree->OperGet())
{
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
#ifdef TARGET_X86
case GT_LSH_HI:
case GT_RSH_LO:
#endif
killMask = getKillSetForShiftRotate(tree->AsOp());
break;
case GT_MUL:
case GT_MULHI:
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
case GT_MUL_LONG:
#endif
killMask = getKillSetForMul(tree->AsOp());
break;
case GT_MOD:
case GT_DIV:
case GT_UMOD:
case GT_UDIV:
killMask = getKillSetForModDiv(tree->AsOp());
break;
case GT_STORE_OBJ:
case GT_STORE_BLK:
case GT_STORE_DYN_BLK:
killMask = getKillSetForBlockStore(tree->AsBlk());
break;
case GT_RETURNTRAP:
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC);
break;
case GT_CALL:
killMask = getKillSetForCall(tree->AsCall());
break;
case GT_STOREIND:
killMask = getKillSetForStoreInd(tree->AsStoreInd());
break;
#if defined(PROFILING_SUPPORTED)
// If this method requires profiler ELT hook then mark these nodes as killing
// callee trash registers (excluding RAX and XMM0). The reason for this is that
// profiler callback would trash these registers. See vm\amd64\asmhelpers.asm for
// more details.
case GT_RETURN:
killMask = getKillSetForReturn();
break;
case GT_PROF_HOOK:
killMask = getKillSetForProfilerHook();
break;
#endif // PROFILING_SUPPORTED
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
killMask = getKillSetForHWIntrinsic(tree->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
default:
// for all other 'tree->OperGet()' kinds, leave 'killMask' = RBM_NONE
break;
}
return killMask;
}
#endif // DEBUG
//------------------------------------------------------------------------
// buildKillPositionsForNode:
// Given some tree node add refpositions for all the registers this node kills
//
// Arguments:
// tree - the tree for which kill positions should be generated
// currentLoc - the location at which the kills should be added
// killMask - The mask of registers killed by this node
//
// Return Value:
// true - kills were inserted
// false - no kills were inserted
//
// Notes:
// The return value is needed because if we have any kills, we need to make sure that
// all defs are located AFTER the kills. On the other hand, if there aren't kills,
// the multiple defs for a regPair are in different locations.
// If we generate any kills, we will mark all currentLiveVars as being preferenced
// to avoid the killed registers. This is somewhat conservative.
//
// This method can add kills even if killMask is RBM_NONE, if this tree is one of the
// special cases that signals that we can't permit callee save registers to hold GC refs.
bool LinearScan::buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc, regMaskTP killMask)
{
bool insertedKills = false;
if (killMask != RBM_NONE)
{
addRefsForPhysRegMask(killMask, currentLoc, RefTypeKill, true);
// TODO-CQ: It appears to be valuable for both fp and int registers to avoid killing the callee
// save regs on infrequently executed paths. However, it results in a large number of asmDiffs,
// many of which appear to be regressions (because there is more spill on the infrequently path),
// but are not really because the frequent path becomes smaller. Validating these diffs will need
// to be done before making this change.
// Also note that we avoid setting callee-save preferences for floating point. This may need
// revisiting, and note that it doesn't currently apply to SIMD types, only float or double.
// if (!blockSequence[curBBSeqNum]->isRunRarely())
if (enregisterLocalVars)
{
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType()))
{
if (!VarSetOps::IsMember(compiler, largeVectorCalleeSaveCandidateVars, varIndex))
{
continue;
}
}
else
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (varTypeIsFloating(varDsc) &&
!VarSetOps::IsMember(compiler, fpCalleeSaveCandidateVars, varIndex))
{
continue;
}
Interval* interval = getIntervalForLocalVar(varIndex);
const bool isCallKill = ((killMask == RBM_INT_CALLEE_TRASH) || (killMask == RBM_CALLEE_TRASH));
if (isCallKill)
{
interval->preferCalleeSave = true;
}
// We are more conservative about allocating callee-saves registers to write-thru vars, since
// a call only requires reloading after (not spilling before). So we record (above) the fact
// that we'd prefer a callee-save register, but we don't update the preferences at this point.
// See the "heuristics for writeThru intervals" in 'buildIntervals()'.
if (!interval->isWriteThru || !isCallKill)
{
regMaskTP newPreferences = allRegs(interval->registerType) & (~killMask);
if (newPreferences != RBM_NONE)
{
interval->updateRegisterPreferences(newPreferences);
}
else
{
// If there are no callee-saved registers, the call could kill all the registers.
// This is a valid state, so in that case assert should not trigger. The RA will spill in order
// to free a register later.
assert(compiler->opts.compDbgEnC || (calleeSaveRegs(varDsc->lvType)) == RBM_NONE);
}
}
}
}
insertedKills = true;
}
if (compiler->killGCRefs(tree))
{
RefPosition* pos =
newRefPosition((Interval*)nullptr, currentLoc, RefTypeKillGCRefs, tree, (allRegs(TYP_REF) & ~RBM_ARG_REGS));
insertedKills = true;
}
return insertedKills;
}
//------------------------------------------------------------------------
// LinearScan::isCandidateMultiRegLclVar: Check whether a MultiReg node should
// remain a candidate MultiReg
//
// Arguments:
// lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR of interest
//
// Return Value:
// true iff it remains a MultiReg lclVar.
//
// Notes:
// When identifying candidates, the register allocator will only retain
// promoted fields of a multi-reg local as candidates if all of its fields
// are candidates. This is because of the added complexity of dealing with a
// def or use of a multi-reg lclVar when only some of the fields have liveness
// info.
// At the time we determine whether a multi-reg lclVar can still be handled
// as such, we've already completed Lowering, so during the build phase of
// LSRA we have to reset the GTF_VAR_MULTIREG flag if necessary as we visit
// each node.
//
bool LinearScan::isCandidateMultiRegLclVar(GenTreeLclVar* lclNode)
{
assert(compiler->lvaEnregMultiRegVars && lclNode->IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
assert(varDsc->lvPromoted);
bool isMultiReg = (compiler->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT);
if (!isMultiReg)
{
lclNode->ClearMultiReg();
}
#ifdef DEBUG
for (unsigned int i = 0; i < varDsc->lvFieldCnt; i++)
{
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i);
assert(isCandidateVar(fieldVarDsc) == isMultiReg);
}
#endif // DEBUG
return isMultiReg;
}
//------------------------------------------------------------------------
// checkContainedOrCandidateLclVar: Check whether a GT_LCL_VAR node is a
// candidate or contained.
//
// Arguments:
// lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR of interest
//
// Return Value:
// true if the node remains a candidate or is contained
// false otherwise (i.e. if it will define a register)
//
// Notes:
// We handle candidate variables differently from non-candidate ones.
// If it is a candidate, we will simply add a use of it at its parent/consumer.
// Otherwise, for a use we need to actually add the appropriate references for loading
// or storing the variable.
//
// A candidate lclVar won't actually get used until the appropriate ancestor node
// is processed, unless this is marked "isLocalDefUse" because it is a stack-based argument
// to a call or an orphaned dead node.
//
// Also, because we do containment analysis before we redo dataflow and identify register
// candidates, the containment analysis only uses !lvDoNotEnregister to estimate register
// candidates.
// If there is a lclVar that is estimated during Lowering to be register candidate but turns
// out not to be, if a use was marked regOptional it should now be marked contained instead.
//
bool LinearScan::checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode)
{
bool isCandidate;
bool makeContained = false;
// We shouldn't be calling this if this node was already contained.
assert(!lclNode->isContained());
// If we have a multireg local, verify that its fields are still register candidates.
if (lclNode->IsMultiReg())
{
// Multi-reg uses must support containment, but if we have an actual multi-reg local
// we don't want it to be RegOptional in fixed-use cases, so that we can ensure proper
// liveness modeling (e.g. if one field is in a register required by another field, in
// a RegOptional case we won't handle the conflict properly if we decide not to allocate).
isCandidate = isCandidateMultiRegLclVar(lclNode);
if (isCandidate)
{
assert(!lclNode->IsRegOptional());
}
else
{
makeContained = true;
}
}
else
{
isCandidate = compiler->lvaGetDesc(lclNode)->lvLRACandidate;
makeContained = !isCandidate && lclNode->IsRegOptional();
}
if (makeContained)
{
lclNode->ClearRegOptional();
lclNode->SetContained();
return true;
}
return isCandidate;
}
//----------------------------------------------------------------------------
// defineNewInternalTemp: Defines a ref position for an internal temp.
//
// Arguments:
// tree - Gentree node requiring an internal register
// regType - Register type
// currentLoc - Location of the temp Def position
// regMask - register mask of candidates for temp
//
RefPosition* LinearScan::defineNewInternalTemp(GenTree* tree, RegisterType regType, regMaskTP regMask)
{
Interval* current = newInterval(regType);
current->isInternal = true;
RefPosition* newDef = newRefPosition(current, currentLoc, RefTypeDef, tree, regMask, 0);
assert(internalCount < MaxInternalCount);
internalDefs[internalCount++] = newDef;
return newDef;
}
//------------------------------------------------------------------------
// buildInternalRegisterDefForNode - Create an Interval for an internal int register, and a def RefPosition
//
// Arguments:
// tree - Gentree node that needs internal registers
// internalCands - The mask of valid registers
//
// Returns:
// The def RefPosition created for this internal temp.
//
RefPosition* LinearScan::buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands)
{
// The candidate set should contain only integer registers.
assert((internalCands & ~allRegs(TYP_INT)) == RBM_NONE);
RefPosition* defRefPosition = defineNewInternalTemp(tree, IntRegisterType, internalCands);
return defRefPosition;
}
//------------------------------------------------------------------------
// buildInternalFloatRegisterDefForNode - Create an Interval for an internal fp register, and a def RefPosition
//
// Arguments:
// tree - Gentree node that needs internal registers
// internalCands - The mask of valid registers
//
// Returns:
// The def RefPosition created for this internal temp.
//
RefPosition* LinearScan::buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands)
{
// The candidate set should contain only float registers.
assert((internalCands & ~allRegs(TYP_FLOAT)) == RBM_NONE);
RefPosition* defRefPosition = defineNewInternalTemp(tree, FloatRegisterType, internalCands);
return defRefPosition;
}
//------------------------------------------------------------------------
// buildInternalRegisterUses - adds use positions for internal
// registers required for tree node.
//
// Notes:
// During the BuildNode process, calls to buildInternalIntRegisterDefForNode and
// buildInternalFloatRegisterDefForNode put new RefPositions in the 'internalDefs'
// array, and increment 'internalCount'. This method must be called to add corresponding
// uses. It then resets the 'internalCount' for the handling of the next node.
//
// If the internal registers must differ from the target register, 'setInternalRegsDelayFree'
// must be set to true, so that the uses may be marked 'delayRegFree'.
// Note that if a node has both float and int temps, generally the target with either be
// int *or* float, and it is not really necessary to set this on the other type, but it does
// no harm as it won't restrict the register selection.
//
void LinearScan::buildInternalRegisterUses()
{
assert(internalCount <= MaxInternalCount);
for (int i = 0; i < internalCount; i++)
{
RefPosition* def = internalDefs[i];
regMaskTP mask = def->registerAssignment;
RefPosition* use = newRefPosition(def->getInterval(), currentLoc, RefTypeUse, def->treeNode, mask, 0);
if (setInternalRegsDelayFree)
{
use->delayRegFree = true;
pendingDelayFree = true;
}
}
// internalCount = 0;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
//------------------------------------------------------------------------
// makeUpperVectorInterval - Create an Interval for saving and restoring
// the upper half of a large vector.
//
// Arguments:
// varIndex - The tracked index for a large vector lclVar.
//
void LinearScan::makeUpperVectorInterval(unsigned varIndex)
{
Interval* lclVarInterval = getIntervalForLocalVar(varIndex);
assert(Compiler::varTypeNeedsPartialCalleeSave(lclVarInterval->registerType));
Interval* newInt = newInterval(LargeVectorSaveType);
newInt->relatedInterval = lclVarInterval;
newInt->isUpperVector = true;
}
//------------------------------------------------------------------------
// getUpperVectorInterval - Get the Interval for saving and restoring
// the upper half of a large vector.
//
// Arguments:
// varIndex - The tracked index for a large vector lclVar.
//
Interval* LinearScan::getUpperVectorInterval(unsigned varIndex)
{
// TODO-Throughput: Consider creating a map from varIndex to upperVector interval.
for (Interval& interval : intervals)
{
if (interval.isLocalVar)
{
continue;
}
noway_assert(interval.isUpperVector);
if (interval.relatedInterval->getVarIndex(compiler) == varIndex)
{
return &interval;
}
}
unreached();
}
//------------------------------------------------------------------------
// buildUpperVectorSaveRefPositions - Create special RefPositions for saving
// the upper half of a set of large vectors.
//
// Arguments:
// tree - The current node being handled
// currentLoc - The location of the current node
// fpCalleeKillSet - The set of registers killed by this node.
//
// Notes: This is called by BuildDefsWithKills for any node that kills registers in the
// RBM_FLT_CALLEE_TRASH set. We actually need to find any calls that kill the upper-half
// of the callee-save vector registers.
// But we will use as a proxy any node that kills floating point registers.
// (Note that some calls are masquerading as other nodes at this point so we can't just check for calls.)
//
void LinearScan::buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc, regMaskTP fpCalleeKillSet)
{
if ((tree != nullptr) && tree->IsCall())
{
if (tree->AsCall()->IsNoReturn())
{
// No point in having vector save/restore if the call will not return.
return;
}
}
if (enregisterLocalVars && !VarSetOps::IsEmpty(compiler, largeVectorVars))
{
// We assume that the kill set includes at least some callee-trash registers, but
// that it doesn't include any callee-save registers.
assert((fpCalleeKillSet & RBM_FLT_CALLEE_TRASH) != RBM_NONE);
assert((fpCalleeKillSet & RBM_FLT_CALLEE_SAVED) == RBM_NONE);
// We only need to save the upper half of any large vector vars that are currently live.
VARSET_TP liveLargeVectors(VarSetOps::Intersection(compiler, currentLiveVars, largeVectorVars));
VarSetOps::Iter iter(compiler, liveLargeVectors);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
Interval* varInterval = getIntervalForLocalVar(varIndex);
if (!varInterval->isPartiallySpilled)
{
Interval* upperVectorInterval = getUpperVectorInterval(varIndex);
RefPosition* pos =
newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorSave, tree, RBM_FLT_CALLEE_SAVED);
varInterval->isPartiallySpilled = true;
#ifdef TARGET_XARCH
pos->regOptional = true;
#endif
}
}
}
// For any non-lclVar intervals that are live at this point (i.e. in the DefList), we will also create
// a RefTypeUpperVectorSave. For now these will all be spilled at this point, as we don't currently
// have a mechanism to communicate any non-lclVar intervals that need to be restored.
// TODO-CQ: We could consider adding such a mechanism, but it's unclear whether this rare
// case of a large vector temp live across a call is worth the added complexity.
for (RefInfoListNode *listNode = defList.Begin(), *end = defList.End(); listNode != end;
listNode = listNode->Next())
{
const GenTree* defNode = listNode->treeNode;
var_types regType = defNode->TypeGet();
if (regType == TYP_STRUCT)
{
assert(defNode->OperIs(GT_LCL_VAR, GT_CALL));
if (defNode->OperIs(GT_LCL_VAR))
{
const GenTreeLclVar* lcl = defNode->AsLclVar();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
regType = varDsc->GetRegisterType();
}
else
{
const GenTreeCall* call = defNode->AsCall();
const CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
regType = compiler->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
if (howToReturnStruct == Compiler::SPK_ByValueAsHfa)
{
regType = compiler->GetHfaType(retClsHnd);
}
#if defined(TARGET_ARM64)
else if (howToReturnStruct == Compiler::SPK_ByValue)
{
// TODO-Cleanup: add a new Compiler::SPK for this case.
// This is the case when 16-byte struct is returned as [x0, x1].
// We don't need a partial callee save.
regType = TYP_LONG;
}
#endif // TARGET_ARM64
}
assert((regType != TYP_STRUCT) && (regType != TYP_UNDEF));
}
if (Compiler::varTypeNeedsPartialCalleeSave(regType))
{
// In the rare case where such an interval is live across nested calls, we don't need to insert another.
if (listNode->ref->getInterval()->recentRefPosition->refType != RefTypeUpperVectorSave)
{
RefPosition* pos = newRefPosition(listNode->ref->getInterval(), currentLoc, RefTypeUpperVectorSave,
tree, RBM_FLT_CALLEE_SAVED);
}
}
}
}
//------------------------------------------------------------------------
// buildUpperVectorRestoreRefPosition - Create a RefPosition for restoring
// the upper half of a large vector.
//
// Arguments:
// lclVarInterval - A lclVarInterval that is live at 'currentLoc'
// currentLoc - The current location for which we're building RefPositions
// node - The node, if any, that the restore would be inserted before.
// If null, the restore will be inserted at the end of the block.
//
void LinearScan::buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, LsraLocation currentLoc, GenTree* node)
{
if (lclVarInterval->isPartiallySpilled)
{
unsigned varIndex = lclVarInterval->getVarIndex(compiler);
Interval* upperVectorInterval = getUpperVectorInterval(varIndex);
RefPosition* pos = newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorRestore, node, RBM_NONE);
lclVarInterval->isPartiallySpilled = false;
#ifdef TARGET_XARCH
pos->regOptional = true;
#endif
}
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#ifdef DEBUG
//------------------------------------------------------------------------
// ComputeOperandDstCount: computes the number of registers defined by a
// node.
//
// For most nodes, this is simple:
// - Nodes that do not produce values (e.g. stores and other void-typed
// nodes) and nodes that immediately use the registers they define
// produce no registers
// - Nodes that are marked as defining N registers define N registers.
//
// For contained nodes, however, things are more complicated: for purposes
// of bookkeeping, a contained node is treated as producing the transitive
// closure of the registers produced by its sources.
//
// Arguments:
// operand - The operand for which to compute a register count.
//
// Returns:
// The number of registers defined by `operand`.
//
int LinearScan::ComputeOperandDstCount(GenTree* operand)
{
// GT_ARGPLACE is the only non-LIR node that is currently in the trees at this stage, though
// note that it is not in the linear order.
if (operand->OperIs(GT_ARGPLACE))
{
return 0;
}
if (operand->isContained())
{
int dstCount = 0;
for (GenTree* op : operand->Operands())
{
dstCount += ComputeOperandDstCount(op);
}
return dstCount;
}
if (operand->IsUnusedValue())
{
// Operands that define an unused value do not produce any registers.
return 0;
}
if (operand->IsValue())
{
// Operands that are values and are not contained consume all of their operands
// and produce one or more registers.
return operand->GetRegisterDstCount(compiler);
}
else
{
// This must be one of the operand types that are neither contained nor produce a value.
// Stores and void-typed operands may be encountered when processing call nodes, which contain
// pointers to argument setup stores.
assert(operand->OperIsStore() || operand->OperIsBlkOp() || operand->OperIsPutArgStk() ||
operand->OperIsCompare() || operand->OperIs(GT_CMP) || operand->TypeGet() == TYP_VOID);
return 0;
}
}
//------------------------------------------------------------------------
// ComputeAvailableSrcCount: computes the number of registers available as
// sources for a node.
//
// This is simply the sum of the number of registers produced by each
// operand to the node.
//
// Arguments:
// node - The node for which to compute a source count.
//
// Return Value:
// The number of registers available as sources for `node`.
//
int LinearScan::ComputeAvailableSrcCount(GenTree* node)
{
int numSources = 0;
for (GenTree* operand : node->Operands())
{
numSources += ComputeOperandDstCount(operand);
}
return numSources;
}
#endif // DEBUG
//------------------------------------------------------------------------
// buildRefPositionsForNode: The main entry point for building the RefPositions
// and "tree temp" Intervals for a given node.
//
// Arguments:
// tree - The node for which we are building RefPositions
// currentLoc - The LsraLocation of the given node
//
void LinearScan::buildRefPositionsForNode(GenTree* tree, LsraLocation currentLoc)
{
// The LIR traversal doesn't visit GT_ARGPLACE nodes.
// GT_CLS_VAR nodes should have been eliminated by rationalizer.
assert(tree->OperGet() != GT_ARGPLACE);
assert(tree->OperGet() != GT_CLS_VAR);
// The set of internal temporary registers used by this node are stored in the
// gtRsvdRegs register mask. Clear it out.
tree->gtRsvdRegs = RBM_NONE;
#ifdef DEBUG
if (VERBOSE)
{
dumpDefList();
compiler->gtDispTree(tree, nullptr, nullptr, true);
}
#endif // DEBUG
if (tree->isContained())
{
#ifdef TARGET_XARCH
// On XArch we can have contained candidate lclVars if they are part of a RMW
// address computation. In this case we need to check whether it is a last use.
if (tree->IsLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
LclVarDsc* const varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon());
if (isCandidateVar(varDsc))
{
assert(varDsc->lvTracked);
unsigned varIndex = varDsc->lvVarIndex;
VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex);
}
}
#else // TARGET_XARCH
assert(!isCandidateLocalRef(tree));
#endif // TARGET_XARCH
JITDUMP("Contained\n");
return;
}
#ifdef DEBUG
// If we are constraining the registers for allocation, we will modify all the RefPositions
// we've built for this node after we've created them. In order to do that, we'll remember
// the last RefPosition prior to those created for this node.
RefPositionIterator refPositionMark = refPositions.backPosition();
int oldDefListCount = defList.Count();
#endif // DEBUG
int consume = BuildNode(tree);
#ifdef DEBUG
int newDefListCount = defList.Count();
// Currently produce is unused, but need to strengthen an assert to check if produce is
// as expected. See https://github.com/dotnet/runtime/issues/8678
int produce = newDefListCount - oldDefListCount;
assert((consume == 0) || (ComputeAvailableSrcCount(tree) == consume));
// If we are constraining registers, modify all the RefPositions we've just built to specify the
// minimum reg count required.
if ((getStressLimitRegs() != LSRA_LIMIT_NONE) || (getSelectionHeuristics() != LSRA_SELECT_DEFAULT))
{
// The number of registers required for a tree node is the sum of
// { RefTypeUses } + { RefTypeDef for the node itself } + specialPutArgCount
// This is the minimum set of registers that needs to be ensured in the candidate set of ref positions created.
//
// First, we count them.
unsigned minRegCount = 0;
RefPositionIterator iter = refPositionMark;
for (iter++; iter != refPositions.end(); iter++)
{
RefPosition* newRefPosition = &(*iter);
if (newRefPosition->isIntervalRef())
{
if ((newRefPosition->refType == RefTypeUse) ||
((newRefPosition->refType == RefTypeDef) && !newRefPosition->getInterval()->isInternal))
{
minRegCount++;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
else if (newRefPosition->refType == RefTypeUpperVectorSave)
{
minRegCount++;
}
#endif
if (newRefPosition->getInterval()->isSpecialPutArg)
{
minRegCount++;
}
}
}
if (tree->OperIsPutArgSplit())
{
// While we have attempted to account for any "specialPutArg" defs above, we're only looking at RefPositions
// created for this node. We must be defining at least one register in the PutArgSplit, so conservatively
// add one less than the maximum number of registers args to 'minRegCount'.
minRegCount += MAX_REG_ARG - 1;
}
for (refPositionMark++; refPositionMark != refPositions.end(); refPositionMark++)
{
RefPosition* newRefPosition = &(*refPositionMark);
unsigned minRegCountForRef = minRegCount;
if (RefTypeIsUse(newRefPosition->refType) && newRefPosition->delayRegFree)
{
// If delayRegFree, then Use will interfere with the destination of the consuming node.
// Therefore, we also need add the kill set of the consuming node to minRegCount.
//
// For example consider the following IR on x86, where v01 and v02
// are method args coming in ecx and edx respectively.
// GT_DIV(v01, v02)
//
// For GT_DIV, the minRegCount will be 3 without adding kill set of GT_DIV node.
//
// Assume further JitStressRegs=2, which would constrain candidates to callee trashable
// regs { eax, ecx, edx } on use positions of v01 and v02. LSRA allocates ecx for v01.
// The use position of v02 cannot be allocated a reg since it is marked delay-reg free and
// {eax,edx} are getting killed before the def of GT_DIV. For this reason, minRegCount for
// the use position of v02 also needs to take into account the kill set of its consuming node.
regMaskTP killMask = getKillSetForNode(tree);
if (killMask != RBM_NONE)
{
minRegCountForRef += genCountBits(killMask);
}
}
else if ((newRefPosition->refType) == RefTypeDef && (newRefPosition->getInterval()->isSpecialPutArg))
{
minRegCountForRef++;
}
newRefPosition->minRegCandidateCount = minRegCountForRef;
if (newRefPosition->IsActualRef() && doReverseCallerCallee())
{
Interval* interval = newRefPosition->getInterval();
regMaskTP oldAssignment = newRefPosition->registerAssignment;
regMaskTP calleeSaveMask = calleeSaveRegs(interval->registerType);
newRefPosition->registerAssignment =
getConstrainedRegMask(oldAssignment, calleeSaveMask, minRegCountForRef);
if ((newRefPosition->registerAssignment != oldAssignment) && (newRefPosition->refType == RefTypeUse) &&
!interval->isLocalVar)
{
checkConflictingDefUse(newRefPosition);
}
}
}
}
#endif // DEBUG
JITDUMP("\n");
}
static const regNumber lsraRegOrder[] = {REG_VAR_ORDER};
const unsigned lsraRegOrderSize = ArrLen(lsraRegOrder);
static const regNumber lsraRegOrderFlt[] = {REG_VAR_ORDER_FLT};
const unsigned lsraRegOrderFltSize = ArrLen(lsraRegOrderFlt);
//------------------------------------------------------------------------
// buildPhysRegRecords: Make an interval for each physical register
//
void LinearScan::buildPhysRegRecords()
{
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* curr = &physRegs[reg];
curr->init(reg);
}
for (unsigned int i = 0; i < lsraRegOrderSize; i++)
{
regNumber reg = lsraRegOrder[i];
RegRecord* curr = &physRegs[reg];
curr->regOrder = (unsigned char)i;
}
for (unsigned int i = 0; i < lsraRegOrderFltSize; i++)
{
regNumber reg = lsraRegOrderFlt[i];
RegRecord* curr = &physRegs[reg];
curr->regOrder = (unsigned char)i;
}
}
//------------------------------------------------------------------------
// insertZeroInitRefPositions: Handle lclVars that are live-in to the first block
//
// Notes:
// Prior to calling this method, 'currentLiveVars' must be set to the set of register
// candidate variables that are liveIn to the first block.
// For each register candidate that is live-in to the first block:
// - If it is a GC ref, or if compInitMem is set, a ZeroInit RefPosition will be created.
// - Otherwise, it will be marked as spilled, since it will not be assigned a register
// on entry and will be loaded from memory on the undefined path.
// Note that, when the compInitMem option is not set, we may encounter these on
// paths that are protected by the same condition as an earlier def. However, since
// we don't do the analysis to determine this - and couldn't rely on always identifying
// such cases even if we tried - we must conservatively treat the undefined path as
// being possible. This is a relatively rare case, so the introduced conservatism is
// not expected to warrant the analysis required to determine the best placement of
// an initialization.
//
void LinearScan::insertZeroInitRefPositions()
{
assert(enregisterLocalVars);
#ifdef DEBUG
VARSET_TP expectedLiveVars(VarSetOps::Intersection(compiler, registerCandidateVars, compiler->fgFirstBB->bbLiveIn));
assert(VarSetOps::Equal(compiler, currentLiveVars, expectedLiveVars));
#endif // DEBUG
// insert defs for this, then a block boundary
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
if (!varDsc->lvIsParam && isCandidateVar(varDsc))
{
JITDUMP("V%02u was live in to first block:", compiler->lvaTrackedIndexToLclNum(varIndex));
Interval* interval = getIntervalForLocalVar(varIndex);
if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()))
{
varDsc->lvMustInit = true;
// OSR will handle init of locals and promoted fields thereof
if (compiler->lvaIsOSRLocal(compiler->lvaTrackedIndexToLclNum(varIndex)))
{
JITDUMP(" will be initialized by OSR\n");
// setIntervalAsSpilled(interval);
varDsc->lvMustInit = false;
}
JITDUMP(" creating ZeroInit\n");
RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeZeroInit, nullptr /* theTreeNode */,
allRegs(interval->registerType));
pos->setRegOptional(true);
}
else
{
setIntervalAsSpilled(interval);
JITDUMP(" marking as spilled\n");
}
}
}
// We must also insert zero-inits for any finallyVars if they are refs or if compInitMem is true.
if (compiler->lvaEnregEHVars)
{
VarSetOps::Iter iter(compiler, finallyVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
if (!varDsc->lvIsParam && isCandidateVar(varDsc))
{
JITDUMP("V%02u is a finally var:", compiler->lvaTrackedIndexToLclNum(varIndex));
Interval* interval = getIntervalForLocalVar(varIndex);
if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()))
{
if (interval->recentRefPosition == nullptr)
{
JITDUMP(" creating ZeroInit\n");
RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeZeroInit,
nullptr /* theTreeNode */, allRegs(interval->registerType));
pos->setRegOptional(true);
varDsc->lvMustInit = true;
}
else
{
// We must only generate one entry RefPosition for each Interval. Since this is not
// a parameter, it can't be RefTypeParamDef, so it must be RefTypeZeroInit, which
// we must have generated for the live-in case above.
assert(interval->recentRefPosition->refType == RefTypeZeroInit);
JITDUMP(" already ZeroInited\n");
}
}
}
}
}
}
#if defined(UNIX_AMD64_ABI)
//------------------------------------------------------------------------
// unixAmd64UpdateRegStateForArg: Sets the register state for an argument of type STRUCT for System V systems.
//
// Arguments:
// argDsc - the LclVarDsc for the argument of interest
//
// Notes:
// See Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *argDsc) in regalloc.cpp
// for how state for argument is updated for unix non-structs and Windows AMD64 structs.
//
void LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc)
{
assert(varTypeIsStruct(argDsc));
RegState* intRegState = &compiler->codeGen->intRegState;
RegState* floatRegState = &compiler->codeGen->floatRegState;
if ((argDsc->GetArgReg() != REG_STK) && (argDsc->GetArgReg() != REG_NA))
{
if (genRegMask(argDsc->GetArgReg()) & (RBM_ALLFLOAT))
{
assert(genRegMask(argDsc->GetArgReg()) & (RBM_FLTARG_REGS));
floatRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetArgReg());
}
else
{
assert(genRegMask(argDsc->GetArgReg()) & (RBM_ARG_REGS));
intRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetArgReg());
}
}
if ((argDsc->GetOtherArgReg() != REG_STK) && (argDsc->GetOtherArgReg() != REG_NA))
{
if (genRegMask(argDsc->GetOtherArgReg()) & (RBM_ALLFLOAT))
{
assert(genRegMask(argDsc->GetOtherArgReg()) & (RBM_FLTARG_REGS));
floatRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetOtherArgReg());
}
else
{
assert(genRegMask(argDsc->GetOtherArgReg()) & (RBM_ARG_REGS));
intRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetOtherArgReg());
}
}
}
#endif // defined(UNIX_AMD64_ABI)
//------------------------------------------------------------------------
// updateRegStateForArg: Updates rsCalleeRegArgMaskLiveIn for the appropriate
// regState (either compiler->intRegState or compiler->floatRegState),
// with the lvArgReg on "argDsc"
//
// Arguments:
// argDsc - the argument for which the state is to be updated.
//
// Return Value: None
//
// Assumptions:
// The argument is live on entry to the function
// (or is untracked and therefore assumed live)
//
// Notes:
// This relies on a method in regAlloc.cpp that is shared between LSRA
// and regAlloc. It is further abstracted here because regState is updated
// separately for tracked and untracked variables in LSRA.
//
void LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
{
#if defined(UNIX_AMD64_ABI)
// For System V AMD64 calls the argDsc can have 2 registers (for structs.)
// Handle them here.
if (varTypeIsStruct(argDsc))
{
unixAmd64UpdateRegStateForArg(argDsc);
}
else
#endif // defined(UNIX_AMD64_ABI)
{
RegState* intRegState = &compiler->codeGen->intRegState;
RegState* floatRegState = &compiler->codeGen->floatRegState;
bool isFloat = emitter::isFloatReg(argDsc->GetArgReg());
if (argDsc->lvIsHfaRegArg())
{
isFloat = true;
}
if (isFloat)
{
JITDUMP("Float arg V%02u in reg %s\n", compiler->lvaGetLclNum(argDsc), getRegName(argDsc->GetArgReg()));
compiler->raUpdateRegStateForArg(floatRegState, argDsc);
}
else
{
JITDUMP("Int arg V%02u in reg %s\n", compiler->lvaGetLclNum(argDsc), getRegName(argDsc->GetArgReg()));
#if FEATURE_MULTIREG_ARGS
if (argDsc->GetOtherArgReg() != REG_NA)
{
JITDUMP("(second half) in reg %s\n", getRegName(argDsc->GetOtherArgReg()));
}
#endif // FEATURE_MULTIREG_ARGS
compiler->raUpdateRegStateForArg(intRegState, argDsc);
}
}
}
//------------------------------------------------------------------------
// buildIntervals: The main entry point for building the data structures over
// which we will do register allocation.
//
void LinearScan::buildIntervals()
{
BasicBlock* block;
JITDUMP("\nbuildIntervals ========\n");
// Build (empty) records for all of the physical registers
buildPhysRegRecords();
#ifdef DEBUG
if (VERBOSE)
{
printf("\n-----------------\n");
printf("LIVENESS:\n");
printf("-----------------\n");
for (BasicBlock* const block : compiler->Blocks())
{
printf(FMT_BB " use def in out\n", block->bbNum);
dumpConvertedVarSet(compiler, block->bbVarUse);
printf("\n");
dumpConvertedVarSet(compiler, block->bbVarDef);
printf("\n");
dumpConvertedVarSet(compiler, block->bbLiveIn);
printf("\n");
dumpConvertedVarSet(compiler, block->bbLiveOut);
printf("\n");
}
}
#endif // DEBUG
#if DOUBLE_ALIGN
// We will determine whether we should double align the frame during
// identifyCandidates(), but we initially assume that we will not.
doDoubleAlign = false;
#endif
identifyCandidates();
// Figure out if we're going to use a frame pointer. We need to do this before building
// the ref positions, because those objects will embed the frame register in various register masks
// if the frame pointer is not reserved. If we decide to have a frame pointer, setFrameType() will
// remove the frame pointer from the masks.
setFrameType();
DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_PRE));
// second part:
JITDUMP("\nbuildIntervals second part ========\n");
currentLoc = 0;
// TODO-Cleanup: This duplicates prior behavior where entry (ParamDef) RefPositions were
// being assigned the bbNum of the last block traversed in the 2nd phase of Lowering.
// Previously, the block sequencing was done for the (formerly separate) Build pass,
// and the curBBNum was left as the last block sequenced. This block was then used to set the
// weight for the entry (ParamDef) RefPositions. It would be logical to set this to the
// normalized entry weight (compiler->fgCalledCount), but that results in a net regression.
if (!blockSequencingDone)
{
setBlockSequence();
}
// Next, create ParamDef RefPositions for all the tracked parameters, in order of their varIndex.
// Assign these RefPositions to the (nonexistent) BB0.
curBBNum = 0;
RegState* intRegState = &compiler->codeGen->intRegState;
RegState* floatRegState = &compiler->codeGen->floatRegState;
intRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE;
floatRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE;
for (unsigned int varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
LclVarDsc* argDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
if (!argDsc->lvIsParam)
{
continue;
}
// Only reserve a register if the argument is actually used.
// Is it dead on entry? If compJmpOpUsed is true, then the arguments
// have to be kept alive, so we have to consider it as live on entry.
// Use lvRefCnt instead of checking bbLiveIn because if it's volatile we
// won't have done dataflow on it, but it needs to be marked as live-in so
// it will get saved in the prolog.
if (!compiler->compJmpOpUsed && argDsc->lvRefCnt() == 0 && !compiler->opts.compDbgCode)
{
continue;
}
if (argDsc->lvIsRegArg)
{
updateRegStateForArg(argDsc);
}
if (isCandidateVar(argDsc))
{
Interval* interval = getIntervalForLocalVar(varIndex);
const var_types regType = argDsc->GetRegisterType();
regMaskTP mask = allRegs(regType);
if (argDsc->lvIsRegArg)
{
// Set this interval as currently assigned to that register
regNumber inArgReg = argDsc->GetArgReg();
assert(inArgReg < REG_COUNT);
mask = genRegMask(inArgReg);
assignPhysReg(inArgReg, interval);
INDEBUG(registersToDump |= getRegMask(inArgReg, interval->registerType));
}
RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, mask);
pos->setRegOptional(true);
}
else if (varTypeIsStruct(argDsc->lvType))
{
for (unsigned fieldVarNum = argDsc->lvFieldLclStart;
fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum)
{
const LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum);
if (fieldVarDsc->lvLRACandidate)
{
assert(fieldVarDsc->lvTracked);
Interval* interval = getIntervalForLocalVar(fieldVarDsc->lvVarIndex);
RefPosition* pos =
newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, allRegs(TypeGet(fieldVarDsc)));
pos->setRegOptional(true);
}
}
}
else
{
// We can overwrite the register (i.e. codegen saves it on entry)
assert(argDsc->lvRefCnt() == 0 || !argDsc->lvIsRegArg || argDsc->lvDoNotEnregister ||
!argDsc->lvLRACandidate || (varTypeIsFloating(argDsc->TypeGet()) && compiler->opts.compDbgCode));
}
}
// Now set up the reg state for the non-tracked args
// (We do this here because we want to generate the ParamDef RefPositions in tracked
// order, so that loop doesn't hit the non-tracked args)
for (unsigned argNum = 0; argNum < compiler->info.compArgsCount; argNum++)
{
LclVarDsc* argDsc = compiler->lvaGetDesc(argNum);
if (argDsc->lvPromotedStruct())
{
for (unsigned fieldVarNum = argDsc->lvFieldLclStart;
fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum)
{
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum);
noway_assert(fieldVarDsc->lvIsParam);
if (!fieldVarDsc->lvTracked && fieldVarDsc->lvIsRegArg)
{
updateRegStateForArg(fieldVarDsc);
}
}
}
else
{
noway_assert(argDsc->lvIsParam);
if (!argDsc->lvTracked && argDsc->lvIsRegArg)
{
updateRegStateForArg(argDsc);
}
}
}
// If there is a secret stub param, it is also live in
if (compiler->info.compPublishStubParam)
{
intRegState->rsCalleeRegArgMaskLiveIn |= RBM_SECRET_STUB_PARAM;
}
BasicBlock* predBlock = nullptr;
BasicBlock* prevBlock = nullptr;
// Initialize currentLiveVars to the empty set. We will set it to the current
// live-in at the entry to each block (this will include the incoming args on
// the first block).
VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::MakeEmpty(compiler));
for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
JITDUMP("\nNEW BLOCK " FMT_BB "\n", block->bbNum);
bool predBlockIsAllocated = false;
predBlock = findPredBlockForLiveIn(block, prevBlock DEBUGARG(&predBlockIsAllocated));
if (predBlock != nullptr)
{
JITDUMP("\n\nSetting " FMT_BB " as the predecessor for determining incoming variable registers of " FMT_BB
"\n",
predBlock->bbNum, block->bbNum);
assert(predBlock->bbNum <= bbNumMaxBeforeResolution);
blockInfo[block->bbNum].predBBNum = predBlock->bbNum;
}
if (enregisterLocalVars)
{
VarSetOps::AssignNoCopy(compiler, currentLiveVars,
VarSetOps::Intersection(compiler, registerCandidateVars, block->bbLiveIn));
if (block == compiler->fgFirstBB)
{
insertZeroInitRefPositions();
// The first real location is at 1; 0 is for the entry.
currentLoc = 1;
}
// For blocks that don't have EHBoundaryIn, we need DummyDefs for cases where "predBlock" isn't
// really a predecessor.
// Note that it's possible to have uses of unitialized variables, in which case even the first
// block may require DummyDefs, which we are not currently adding - this means that these variables
// will always be considered to be in memory on entry (and reloaded when the use is encountered).
// TODO-CQ: Consider how best to tune this. Currently, if we create DummyDefs for uninitialized
// variables (which may actually be initialized along the dynamically executed paths, but not
// on all static paths), we wind up with excessive liveranges for some of these variables.
if (!blockInfo[block->bbNum].hasEHBoundaryIn)
{
// Any lclVars live-in on a non-EH boundary edge are resolution candidates.
VarSetOps::UnionD(compiler, resolutionCandidateVars, currentLiveVars);
if (block != compiler->fgFirstBB)
{
VARSET_TP newLiveIn(VarSetOps::MakeCopy(compiler, currentLiveVars));
if (predBlock != nullptr)
{
// Compute set difference: newLiveIn = currentLiveVars - predBlock->bbLiveOut
VarSetOps::DiffD(compiler, newLiveIn, predBlock->bbLiveOut);
}
// Don't create dummy defs for EH vars; we'll load them from the stack as/when needed.
VarSetOps::DiffD(compiler, newLiveIn, exceptVars);
// Create dummy def RefPositions
if (!VarSetOps::IsEmpty(compiler, newLiveIn))
{
// If we are using locations from a predecessor, we should never require DummyDefs.
assert(!predBlockIsAllocated);
JITDUMP("Creating dummy definitions\n");
VarSetOps::Iter iter(compiler, newLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
// Add a dummyDef for any candidate vars that are in the "newLiveIn" set.
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
assert(isCandidateVar(varDsc));
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr,
allRegs(interval->registerType));
pos->setRegOptional(true);
}
JITDUMP("Finished creating dummy definitions\n\n");
}
}
}
}
// Add a dummy RefPosition to mark the block boundary.
// Note that we do this AFTER adding the exposed uses above, because the
// register positions for those exposed uses need to be recorded at
// this point.
RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE);
currentLoc += 2;
JITDUMP("\n");
if (firstColdLoc == MaxLocation)
{
if (block->isRunRarely())
{
firstColdLoc = currentLoc;
JITDUMP("firstColdLoc = %d\n", firstColdLoc);
}
}
else
{
// TODO: We'd like to assert the following but we don't currently ensure that only
// "RunRarely" blocks are contiguous.
// (The funclets will generally be last, but we don't follow layout order, so we
// don't have to preserve that in the block sequence.)
// assert(block->isRunRarely());
}
// For frame poisoning we generate code into scratch BB right after prolog since
// otherwise the prolog might become too large. In this case we will put the poison immediate
// into the scratch register, so it will be killed here.
if (compiler->compShouldPoisonFrame() && compiler->fgFirstBBisScratch() && block == compiler->fgFirstBB)
{
regMaskTP killed;
#if defined(TARGET_XARCH)
// Poisoning uses EAX for small vars and rep stosd that kills edi, ecx and eax for large vars.
killed = RBM_EDI | RBM_ECX | RBM_EAX;
#else
// Poisoning uses REG_SCRATCH for small vars and memset helper for big vars.
killed = genRegMask(REG_SCRATCH) | compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET);
#endif
addRefsForPhysRegMask(killed, currentLoc + 1, RefTypeKill, true);
currentLoc += 2;
}
LIR::Range& blockRange = LIR::AsRange(block);
for (GenTree* node : blockRange)
{
// We increment the location of each tree node by 2 so that the node definition, if any,
// is at a new location and doesn't interfere with the uses.
// For multi-reg local stores, the 'BuildMultiRegStoreLoc' method will further increment the
// location by 2 for each destination register beyond the first.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
node->gtSeqNum = currentLoc;
// In DEBUG, we want to set the gtRegTag to GT_REGTAG_REG, so that subsequent dumps will show the register
// value.
// Although this looks like a no-op it sets the tag.
node->SetRegNum(node->GetRegNum());
#endif
buildRefPositionsForNode(node, currentLoc);
#ifdef DEBUG
if (currentLoc > maxNodeLocation)
{
maxNodeLocation = currentLoc;
}
#endif // DEBUG
currentLoc += 2;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// At the end of each block, create upperVectorRestores for any largeVectorVars that may be
// partiallySpilled (during the build phase all intervals will be marked isPartiallySpilled if
// they *may) be partially spilled at any point.
if (enregisterLocalVars)
{
VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars);
unsigned largeVectorVarIndex = 0;
while (largeVectorVarsIter.NextElem(&largeVectorVarIndex))
{
Interval* lclVarInterval = getIntervalForLocalVar(largeVectorVarIndex);
buildUpperVectorRestoreRefPosition(lclVarInterval, currentLoc, nullptr);
}
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Note: the visited set is cleared in LinearScan::doLinearScan()
markBlockVisited(block);
if (!defList.IsEmpty())
{
INDEBUG(dumpDefList());
assert(!"Expected empty defList at end of block");
}
if (enregisterLocalVars)
{
// Insert exposed uses for a lclVar that is live-out of 'block' but not live-in to the
// next block, or any unvisited successors.
// This will address lclVars that are live on a backedge, as well as those that are kept
// live at a GT_JMP.
//
// Blocks ending with "jmp method" are marked as BBJ_HAS_JMP,
// and jmp call is represented using GT_JMP node which is a leaf node.
// Liveness phase keeps all the arguments of the method live till the end of
// block by adding them to liveout set of the block containing GT_JMP.
//
// The target of a GT_JMP implicitly uses all the current method arguments, however
// there are no actual references to them. This can cause LSRA to assert, because
// the variables are live but it sees no references. In order to correctly model the
// liveness of these arguments, we add dummy exposed uses, in the same manner as for
// backward branches. This will happen automatically via expUseSet.
//
// Note that a block ending with GT_JMP has no successors and hence the variables
// for which dummy use ref positions are added are arguments of the method.
VARSET_TP expUseSet(VarSetOps::MakeCopy(compiler, block->bbLiveOut));
VarSetOps::IntersectionD(compiler, expUseSet, registerCandidateVars);
BasicBlock* nextBlock = getNextBlock();
if (nextBlock != nullptr)
{
VarSetOps::DiffD(compiler, expUseSet, nextBlock->bbLiveIn);
}
for (BasicBlock* succ : block->GetAllSuccs(compiler))
{
if (VarSetOps::IsEmpty(compiler, expUseSet))
{
break;
}
if (isBlockVisited(succ))
{
continue;
}
VarSetOps::DiffD(compiler, expUseSet, succ->bbLiveIn);
}
if (!VarSetOps::IsEmpty(compiler, expUseSet))
{
JITDUMP("Exposed uses:");
VarSetOps::Iter iter(compiler, expUseSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(isCandidateVar(varDsc));
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* pos =
newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
pos->setRegOptional(true);
JITDUMP(" V%02u", varNum);
}
JITDUMP("\n");
}
// Clear the "last use" flag on any vars that are live-out from this block.
VARSET_TP bbLiveDefs(VarSetOps::Intersection(compiler, registerCandidateVars, block->bbLiveOut));
VarSetOps::Iter iter(compiler, bbLiveDefs);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
LclVarDsc* const varDsc = compiler->lvaGetDesc(varNum);
assert(isCandidateVar(varDsc));
RefPosition* const lastRP = getIntervalForLocalVar(varIndex)->lastRefPosition;
// We should be able to assert that lastRP is non-null if it is live-out, but sometimes liveness
// lies.
if ((lastRP != nullptr) && (lastRP->bbNum == block->bbNum))
{
lastRP->lastUse = false;
}
}
#ifdef DEBUG
checkLastUses(block);
if (VERBOSE)
{
printf("use: ");
dumpConvertedVarSet(compiler, block->bbVarUse);
printf("\ndef: ");
dumpConvertedVarSet(compiler, block->bbVarDef);
printf("\n");
}
#endif // DEBUG
}
prevBlock = block;
}
if (enregisterLocalVars)
{
if (compiler->lvaKeepAliveAndReportThis())
{
// If we need to KeepAliveAndReportThis, add a dummy exposed use of it at the end
unsigned keepAliveVarNum = compiler->info.compThisArg;
assert(compiler->info.compIsStatic == false);
const LclVarDsc* varDsc = compiler->lvaGetDesc(keepAliveVarNum);
if (isCandidateVar(varDsc))
{
JITDUMP("Adding exposed use of this, for lvaKeepAliveAndReportThis\n");
Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex);
RefPosition* pos =
newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
pos->setRegOptional(true);
}
}
// Adjust heuristics for writeThru intervals.
if (compiler->compHndBBtabCount > 0)
{
VarSetOps::Iter iter(compiler, exceptVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
Interval* interval = getIntervalForLocalVar(varIndex);
assert(interval->isWriteThru);
weight_t weight = varDsc->lvRefCntWtd();
// We'd like to only allocate registers for EH vars that have enough uses
// to compensate for the additional registers being live (and for the possibility
// that we may have to insert an additional copy).
// However, we don't currently have that information available. Instead, we'll
// aggressively assume that these vars are defined once, at their first RefPosition.
//
RefPosition* firstRefPosition = interval->firstRefPosition;
// Incoming reg args are given an initial weight of 2 * BB_UNITY_WEIGHT
// (see lvaComputeRefCounts(); this may be reviewed/changed in future).
//
weight_t initialWeight = (firstRefPosition->refType == RefTypeParamDef)
? (2 * BB_UNITY_WEIGHT)
: blockInfo[firstRefPosition->bbNum].weight;
weight -= initialWeight;
// If the remaining weight is less than the initial weight, we'd like to allocate it only
// opportunistically, but we don't currently have a mechanism to do so.
// For now, we'll just avoid using callee-save registers if the weight is too low.
if (interval->preferCalleeSave)
{
// The benefit of a callee-save register isn't as high as it would be for a normal arg.
// We'll have at least the cost of saving & restoring the callee-save register,
// so we won't break even until we have at least 4 * BB_UNITY_WEIGHT.
// Given that we also don't have a good way to tell whether the variable is live
// across a call in the non-EH code, we'll be extra conservative about this.
// Note that for writeThru intervals we don't update the preferences to be only callee-save.
unsigned calleeSaveCount =
(varTypeUsesFloatReg(interval->registerType)) ? CNT_CALLEE_SAVED_FLOAT : CNT_CALLEE_ENREG;
if ((weight <= (BB_UNITY_WEIGHT * 7)) || varDsc->lvVarIndex >= calleeSaveCount)
{
// If this is relatively low weight, don't prefer callee-save at all.
interval->preferCalleeSave = false;
}
else
{
// In other cases, we'll add in the callee-save regs to the preferences, but not clear
// the non-callee-save regs . We also handle this case specially in tryAllocateFreeReg().
interval->registerPreferences |= calleeSaveRegs(interval->registerType);
}
}
}
}
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
for (unsigned lclNum = 0; lclNum < compiler->lvaCount; lclNum++)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
if (varDsc->lvLRACandidate)
{
JITDUMP("Adding exposed use of V%02u for LsraExtendLifetimes\n", lclNum);
Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex);
RefPosition* pos =
newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
pos->setRegOptional(true);
}
}
}
#endif // DEBUG
}
// If the last block has successors, create a RefTypeBB to record
// what's live
if (prevBlock->NumSucc(compiler) > 0)
{
RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE);
}
#ifdef DEBUG
// Make sure we don't have any blocks that were not visited
for (BasicBlock* const block : compiler->Blocks())
{
assert(isBlockVisited(block));
}
if (VERBOSE)
{
lsraDumpIntervals("BEFORE VALIDATING INTERVALS");
dumpRefPositions("BEFORE VALIDATING INTERVALS");
}
validateIntervals();
#endif // DEBUG
}
#ifdef DEBUG
//------------------------------------------------------------------------
// validateIntervals: A DEBUG-only method that checks that:
// - the lclVar RefPositions do not reflect uses of undefined values
// - A singleDef interval should have just first RefPosition as RefTypeDef.
//
// TODO-Cleanup: If an undefined use is encountered, it merely prints a message
// but probably assert.
//
void LinearScan::validateIntervals()
{
if (enregisterLocalVars)
{
for (unsigned i = 0; i < compiler->lvaTrackedCount; i++)
{
if (!compiler->lvaGetDescByTrackedIndex(i)->lvLRACandidate)
{
continue;
}
Interval* interval = getIntervalForLocalVar(i);
bool defined = false;
unsigned lastUseBBNum = 0;
JITDUMP("-----------------\n");
for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
{
if (VERBOSE)
{
ref->dump(this);
}
RefType refType = ref->refType;
if (!defined && RefTypeIsUse(refType) && (lastUseBBNum == ref->bbNum))
{
if (!ref->lastUse)
{
if (compiler->info.compMethodName != nullptr)
{
JITDUMP("%s: ", compiler->info.compMethodName);
}
JITDUMP("LocalVar V%02u: undefined use at %u\n", interval->varNum, ref->nodeLocation);
assert(false);
}
}
// For single-def intervals, the only the first refposition should be a RefTypeDef
if (interval->isSingleDef && RefTypeIsDef(refType))
{
assert(ref == interval->firstRefPosition);
}
// Note that there can be multiple last uses if they are on disjoint paths,
// so we can't really check the lastUse flag
if (ref->lastUse)
{
defined = false;
lastUseBBNum = ref->bbNum;
}
if (RefTypeIsDef(refType))
{
defined = true;
}
}
}
}
}
#endif // DEBUG
#if defined(TARGET_XARCH) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// setTgtPref: Set a preference relationship between the given Interval
// and a Use RefPosition.
//
// Arguments:
// interval - An interval whose defining instruction has tgtPrefUse as a use
// tgtPrefUse - The use RefPosition
//
// Notes:
// This is called when we would like tgtPrefUse and this def to get the same register.
// This is only desirable if the use is a last use, which it is if it is a non-local,
// *or* if it is a lastUse.
// Note that we don't yet have valid lastUse information in the RefPositions that we're building
// (every RefPosition is set as a lastUse until we encounter a new use), so we have to rely on the treeNode.
// This may be called for multiple uses, in which case 'interval' will only get preferenced at most
// to the first one (if it didn't already have a 'relatedInterval'.
//
void setTgtPref(Interval* interval, RefPosition* tgtPrefUse)
{
if (tgtPrefUse != nullptr)
{
Interval* useInterval = tgtPrefUse->getInterval();
if (!useInterval->isLocalVar || (tgtPrefUse->treeNode == nullptr) ||
((tgtPrefUse->treeNode->gtFlags & GTF_VAR_DEATH) != 0))
{
// Set the use interval as related to the interval we're defining.
useInterval->assignRelatedIntervalIfUnassigned(interval);
}
}
}
#endif // TARGET_XARCH || FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// BuildDef: Build a RefTypeDef RefPosition for the given node
//
// Arguments:
// tree - The node that defines a register
// dstCandidates - The candidate registers for the definition
// multiRegIdx - The index of the definition, defaults to zero.
// Only non-zero for multi-reg nodes.
//
// Return Value:
// The newly created RefPosition.
//
// Notes:
// Adds the RefInfo for the definition to the defList.
//
RefPosition* LinearScan::BuildDef(GenTree* tree, regMaskTP dstCandidates, int multiRegIdx)
{
assert(!tree->isContained());
if (dstCandidates != RBM_NONE)
{
assert((tree->GetRegNum() == REG_NA) || (dstCandidates == genRegMask(tree->GetRegByIndex(multiRegIdx))));
}
RegisterType type;
if (!tree->IsMultiRegNode())
{
type = getDefType(tree);
}
else
{
type = tree->GetRegTypeByIndex(multiRegIdx);
}
if (varTypeUsesFloatReg(type))
{
compiler->compFloatingPointUsed = true;
}
Interval* interval = newInterval(type);
if (tree->GetRegNum() != REG_NA)
{
if (!tree->IsMultiRegNode() || (multiRegIdx == 0))
{
assert((dstCandidates == RBM_NONE) || (dstCandidates == genRegMask(tree->GetRegNum())));
dstCandidates = genRegMask(tree->GetRegNum());
}
else
{
assert(isSingleRegister(dstCandidates));
}
}
#ifdef TARGET_X86
else if (varTypeIsByte(tree))
{
if (dstCandidates == RBM_NONE)
{
dstCandidates = allRegs(TYP_INT);
}
dstCandidates &= ~RBM_NON_BYTE_REGS;
assert(dstCandidates != RBM_NONE);
}
#endif // TARGET_X86
if (pendingDelayFree)
{
interval->hasInterferingUses = true;
// pendingDelayFree = false;
}
RefPosition* defRefPosition =
newRefPosition(interval, currentLoc + 1, RefTypeDef, tree, dstCandidates, multiRegIdx);
if (tree->IsUnusedValue())
{
defRefPosition->isLocalDefUse = true;
defRefPosition->lastUse = true;
}
else
{
RefInfoListNode* refInfo = listNodePool.GetNode(defRefPosition, tree);
defList.Append(refInfo);
}
#if defined(TARGET_XARCH) || defined(FEATURE_HW_INTRINSICS)
setTgtPref(interval, tgtPrefUse);
setTgtPref(interval, tgtPrefUse2);
#endif // TARGET_XARCH
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
assert(!interval->isPartiallySpilled);
#endif
return defRefPosition;
}
//------------------------------------------------------------------------
// BuildDef: Build one or more RefTypeDef RefPositions for the given node
//
// Arguments:
// tree - The node that defines a register
// dstCount - The number of registers defined by the node
// dstCandidates - the candidate registers for the definition
//
// Notes:
// Adds the RefInfo for the definitions to the defList.
//
void LinearScan::BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates)
{
bool fixedReg = false;
if ((dstCount > 1) && (dstCandidates != RBM_NONE) && ((int)genCountBits(dstCandidates) == dstCount))
{
fixedReg = true;
}
const ReturnTypeDesc* retTypeDesc = nullptr;
if (tree->IsMultiRegCall())
{
retTypeDesc = tree->AsCall()->GetReturnTypeDesc();
}
for (int i = 0; i < dstCount; i++)
{
regMaskTP thisDstCandidates;
if (fixedReg)
{
// In case of multi-reg call node, we have to query the ith position return register.
// For all other cases of multi-reg definitions, the registers must be in sequential order.
if (retTypeDesc != nullptr)
{
thisDstCandidates = genRegMask(tree->AsCall()->GetReturnTypeDesc()->GetABIReturnReg(i));
assert((dstCandidates & thisDstCandidates) != RBM_NONE);
}
else
{
thisDstCandidates = genFindLowestBit(dstCandidates);
}
dstCandidates &= ~thisDstCandidates;
}
else
{
thisDstCandidates = dstCandidates;
}
BuildDef(tree, thisDstCandidates, i);
}
}
//------------------------------------------------------------------------
// BuildDef: Build one or more RefTypeDef RefPositions for the given node,
// as well as kills as specified by the given mask.
//
// Arguments:
// tree - The node that defines a register
// dstCount - The number of registers defined by the node
// dstCandidates - The candidate registers for the definition
// killMask - The mask of registers killed by this node
//
// Notes:
// Adds the RefInfo for the definitions to the defList.
// The def and kill functionality is folded into a single method so that the
// save and restores of upper vector registers can be bracketed around the def.
//
void LinearScan::BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask)
{
assert(killMask == getKillSetForNode(tree));
// Call this even when killMask is RBM_NONE, as we have to check for some special cases
buildKillPositionsForNode(tree, currentLoc + 1, killMask);
if (killMask != RBM_NONE)
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Build RefPositions to account for the fact that, even in a callee-save register, the upper half of any large
// vector will be killed by a call.
// We actually need to find any calls that kill the upper-half of the callee-save vector registers.
// But we will use as a proxy any node that kills floating point registers.
// (Note that some calls are masquerading as other nodes at this point so we can't just check for calls.)
// We call this unconditionally for such nodes, as we will create RefPositions for any large vector tree temps
// even if 'enregisterLocalVars' is false, or 'liveLargeVectors' is empty, though currently the allocation
// phase will fully (rather than partially) spill those, so we don't need to build the UpperVectorRestore
// RefPositions in that case.
// This must be done after the kills, so that we know which large vectors are still live.
//
if ((killMask & RBM_FLT_CALLEE_TRASH) != RBM_NONE)
{
buildUpperVectorSaveRefPositions(tree, currentLoc + 1, killMask);
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
// Now, create the Def(s)
BuildDefs(tree, dstCount, dstCandidates);
}
//------------------------------------------------------------------------
// BuildUse: Remove the RefInfoListNode for the given multi-reg index of the given node from
// the defList, and build a use RefPosition for the associated Interval.
//
// Arguments:
// operand - The node of interest
// candidates - The register candidates for the use
// multiRegIdx - The index of the multireg def/use
//
// Return Value:
// The newly created use RefPosition
//
// Notes:
// The node must not be contained, and must have been processed by buildRefPositionsForNode().
//
RefPosition* LinearScan::BuildUse(GenTree* operand, regMaskTP candidates, int multiRegIdx)
{
assert(!operand->isContained());
Interval* interval;
bool regOptional = operand->IsRegOptional();
if (isCandidateLocalRef(operand))
{
interval = getIntervalForLocalVarNode(operand->AsLclVarCommon());
// We have only approximate last-use information at this point. This is because the
// execution order doesn't actually reflect the true order in which the localVars
// are referenced - but the order of the RefPositions will, so we recompute it after
// RefPositions are built.
// Use the old value for setting currentLiveVars - note that we do this with the
// not-quite-correct setting of lastUse. However, this is OK because
// 1) this is only for preferencing, which doesn't require strict correctness, and
// 2) the cases where these out-of-order uses occur should not overlap a kill.
// TODO-Throughput: clean this up once we have the execution order correct. At that point
// we can update currentLiveVars at the same place that we create the RefPosition.
if ((operand->gtFlags & GTF_VAR_DEATH) != 0)
{
unsigned varIndex = interval->getVarIndex(compiler);
VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex);
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
buildUpperVectorRestoreRefPosition(interval, currentLoc, operand);
#endif
}
else if (operand->IsMultiRegLclVar())
{
assert(compiler->lvaEnregMultiRegVars);
LclVarDsc* varDsc = compiler->lvaGetDesc(operand->AsLclVar());
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + multiRegIdx);
interval = getIntervalForLocalVar(fieldVarDsc->lvVarIndex);
if (operand->AsLclVar()->IsLastUse(multiRegIdx))
{
VarSetOps::RemoveElemD(compiler, currentLiveVars, fieldVarDsc->lvVarIndex);
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
buildUpperVectorRestoreRefPosition(interval, currentLoc, operand);
#endif
}
else
{
RefInfoListNode* refInfo = defList.removeListNode(operand, multiRegIdx);
RefPosition* defRefPos = refInfo->ref;
assert(defRefPos->multiRegIdx == multiRegIdx);
interval = defRefPos->getInterval();
listNodePool.ReturnNode(refInfo);
operand = nullptr;
}
RefPosition* useRefPos = newRefPosition(interval, currentLoc, RefTypeUse, operand, candidates, multiRegIdx);
useRefPos->setRegOptional(regOptional);
return useRefPos;
}
//------------------------------------------------------------------------
// BuildIndirUses: Build Use RefPositions for an indirection that might be contained
//
// Arguments:
// indirTree - The indirection node of interest
//
// Return Value:
// The number of source registers used by the *parent* of this node.
//
// Notes:
// This method may only be used if the candidates are the same for all sources.
//
int LinearScan::BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates)
{
GenTree* const addr = indirTree->gtOp1;
return BuildAddrUses(addr, candidates);
}
int LinearScan::BuildAddrUses(GenTree* addr, regMaskTP candidates)
{
if (!addr->isContained())
{
BuildUse(addr, candidates);
return 1;
}
if (!addr->OperIs(GT_LEA))
{
return 0;
}
GenTreeAddrMode* const addrMode = addr->AsAddrMode();
unsigned srcCount = 0;
if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained())
{
BuildUse(addrMode->Base(), candidates);
srcCount++;
}
if (addrMode->Index() != nullptr)
{
if (!addrMode->Index()->isContained())
{
BuildUse(addrMode->Index(), candidates);
srcCount++;
}
#ifdef TARGET_ARM64
else if (addrMode->Index()->OperIs(GT_BFIZ))
{
GenTreeCast* cast = addrMode->Index()->gtGetOp1()->AsCast();
assert(cast->isContained());
BuildUse(cast->CastOp(), candidates);
srcCount++;
}
#endif
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildOperandUses: Build Use RefPositions for an operand that might be contained.
//
// Arguments:
// node - The node of interest
//
// Return Value:
// The number of source registers used by the *parent* of this node.
//
int LinearScan::BuildOperandUses(GenTree* node, regMaskTP candidates)
{
if (!node->isContained())
{
BuildUse(node, candidates);
return 1;
}
#ifdef TARGET_ARM64
// Must happen before OperIsHWIntrinsic case,
// but this occurs when a vector zero node is marked as contained.
if (node->IsVectorZero())
{
return 0;
}
#endif
#if !defined(TARGET_64BIT)
if (node->OperIs(GT_LONG))
{
return BuildBinaryUses(node->AsOp(), candidates);
}
#endif // !defined(TARGET_64BIT)
if (node->OperIsIndir())
{
return BuildIndirUses(node->AsIndir(), candidates);
}
if (node->OperIs(GT_LEA))
{
return BuildAddrUses(node, candidates);
}
#ifdef FEATURE_HW_INTRINSICS
if (node->OperIsHWIntrinsic())
{
if (node->AsHWIntrinsic()->OperIsMemoryLoad())
{
return BuildAddrUses(node->AsHWIntrinsic()->Op(1));
}
assert(node->AsHWIntrinsic()->GetOperandCount() == 1);
BuildUse(node->AsHWIntrinsic()->Op(1), candidates);
return 1;
}
#endif // FEATURE_HW_INTRINSICS
#ifdef TARGET_ARM64
if (node->OperIs(GT_MUL))
{
// Can be contained for MultiplyAdd on arm64
return BuildBinaryUses(node->AsOp(), candidates);
}
if (node->OperIs(GT_NEG, GT_CAST, GT_LSH))
{
// GT_NEG can be contained for MultiplyAdd on arm64
// GT_CAST and GT_LSH for ADD with sign/zero extension
return BuildOperandUses(node->gtGetOp1(), candidates);
}
#endif
return 0;
}
//------------------------------------------------------------------------
// setDelayFree: Mark a RefPosition as delayRegFree, and set pendingDelayFree
//
// Arguments:
// use - The use RefPosition to mark
//
void LinearScan::setDelayFree(RefPosition* use)
{
use->delayRegFree = true;
pendingDelayFree = true;
}
//------------------------------------------------------------------------
// BuildDelayFreeUses: Build Use RefPositions for an operand that might be contained,
// and which may need to be marked delayRegFree
//
// Arguments:
// node - The node of interest
// rmwNode - The node that has RMW semantics (if applicable)
// candidates - The set of candidates for the uses
//
// Return Value:
// The number of source registers used by the *parent* of this node.
//
int LinearScan::BuildDelayFreeUses(GenTree* node, GenTree* rmwNode, regMaskTP candidates)
{
RefPosition* use = nullptr;
Interval* rmwInterval = nullptr;
bool rmwIsLastUse = false;
GenTree* addr = nullptr;
if ((rmwNode != nullptr) && isCandidateLocalRef(rmwNode))
{
rmwInterval = getIntervalForLocalVarNode(rmwNode->AsLclVar());
// Note: we don't handle multi-reg vars here. It's not clear that there are any cases
// where we'd encounter a multi-reg var in an RMW context.
assert(!rmwNode->AsLclVar()->IsMultiReg());
rmwIsLastUse = rmwNode->AsLclVar()->IsLastUse(0);
}
if (!node->isContained())
{
use = BuildUse(node, candidates);
}
#ifdef TARGET_ARM64
// Must happen before OperIsHWIntrinsic case,
// but this occurs when a vector zero node is marked as contained.
else if (node->IsVectorZero())
{
return 0;
}
#endif
#ifdef FEATURE_HW_INTRINSICS
else if (node->OperIsHWIntrinsic())
{
assert(node->AsHWIntrinsic()->GetOperandCount() == 1);
use = BuildUse(node->AsHWIntrinsic()->Op(1), candidates);
}
#endif
else if (!node->OperIsIndir())
{
return 0;
}
else
{
GenTreeIndir* indirTree = node->AsIndir();
addr = indirTree->gtOp1;
if (!addr->isContained())
{
use = BuildUse(addr, candidates);
}
else if (!addr->OperIs(GT_LEA))
{
return 0;
}
}
if (use != nullptr)
{
// If node != rmwNode, then definitely node should be marked as "delayFree".
// However, if node == rmwNode, then we can mark node as "delayFree" only if
// none of the node/rmwNode are the last uses. If either of them are last use,
// we can safely reuse the rmwNode as destination.
if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse))
{
setDelayFree(use);
}
return 1;
}
// If we reach here we have a contained LEA in 'addr'.
GenTreeAddrMode* const addrMode = addr->AsAddrMode();
unsigned srcCount = 0;
if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained())
{
use = BuildUse(addrMode->Base(), candidates);
if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse))
{
setDelayFree(use);
}
srcCount++;
}
if ((addrMode->Index() != nullptr) && !addrMode->Index()->isContained())
{
use = BuildUse(addrMode->Index(), candidates);
if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse))
{
setDelayFree(use);
}
srcCount++;
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildBinaryUses: Get the RefInfoListNodes for the operands of the
// given node, and build uses for them.
//
// Arguments:
// node - a GenTreeOp
//
// Return Value:
// The number of actual register operands.
//
// Notes:
// The operands must already have been processed by buildRefPositionsForNode, and their
// RefInfoListNodes placed in the defList.
//
int LinearScan::BuildBinaryUses(GenTreeOp* node, regMaskTP candidates)
{
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2IfPresent();
#ifdef TARGET_XARCH
if (node->OperIsBinary() && isRMWRegOper(node))
{
assert(op2 != nullptr);
return BuildRMWUses(node, op1, op2, candidates);
}
#endif // TARGET_XARCH
int srcCount = 0;
if (op1 != nullptr)
{
srcCount += BuildOperandUses(op1, candidates);
}
if (op2 != nullptr)
{
srcCount += BuildOperandUses(op2, candidates);
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildStoreLocDef: Build a definition RefPosition for a local store
//
// Arguments:
// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
//
// Notes:
// This takes an index to enable building multiple defs for a multi-reg local.
//
void LinearScan::BuildStoreLocDef(GenTreeLclVarCommon* storeLoc,
LclVarDsc* varDsc,
RefPosition* singleUseRef,
int index)
{
assert(varDsc->lvTracked);
unsigned varIndex = varDsc->lvVarIndex;
Interval* varDefInterval = getIntervalForLocalVar(varIndex);
if (!storeLoc->IsLastUse(index))
{
VarSetOps::AddElemD(compiler, currentLiveVars, varIndex);
}
if (singleUseRef != nullptr)
{
Interval* srcInterval = singleUseRef->getInterval();
if (srcInterval->relatedInterval == nullptr)
{
// Preference the source to the dest, unless this is a non-last-use localVar.
// Note that the last-use info is not correct, but it is a better approximation than preferencing
// the source to the dest, if the source's lifetime extends beyond the dest.
if (!srcInterval->isLocalVar || (singleUseRef->treeNode->gtFlags & GTF_VAR_DEATH) != 0)
{
srcInterval->assignRelatedInterval(varDefInterval);
}
}
else if (!srcInterval->isLocalVar)
{
// Preference the source to dest, if src is not a local var.
srcInterval->assignRelatedInterval(varDefInterval);
}
}
regMaskTP defCandidates = RBM_NONE;
var_types type = varDsc->GetRegisterType();
#ifdef TARGET_X86
if (varTypeIsByte(type))
{
defCandidates = allByteRegs();
}
else
{
defCandidates = allRegs(type);
}
#else
defCandidates = allRegs(type);
#endif // TARGET_X86
RefPosition* def = newRefPosition(varDefInterval, currentLoc + 1, RefTypeDef, storeLoc, defCandidates, index);
if (varDefInterval->isWriteThru)
{
// We always make write-thru defs reg-optional, as we can store them if they don't
// get a register.
def->regOptional = true;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (Compiler::varTypeNeedsPartialCalleeSave(varDefInterval->registerType))
{
varDefInterval->isPartiallySpilled = false;
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
//------------------------------------------------------------------------
// BuildMultiRegStoreLoc: Set register requirements for a store of a lclVar
//
// Arguments:
// storeLoc - the multireg local store (GT_STORE_LCL_VAR)
//
// Returns:
// The number of source registers read.
//
int LinearScan::BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc)
{
GenTree* op1 = storeLoc->gtGetOp1();
unsigned int dstCount = storeLoc->GetFieldCount(compiler);
unsigned int srcCount = dstCount;
LclVarDsc* varDsc = compiler->lvaGetDesc(storeLoc);
assert(compiler->lvaEnregMultiRegVars);
assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
bool isMultiRegSrc = op1->IsMultiRegNode();
// The source must be:
// - a multi-reg source
// - an enregisterable SIMD type, or
// - in-memory local
//
if (isMultiRegSrc)
{
assert(op1->GetMultiRegCount() == srcCount);
}
else if (varTypeIsEnregisterable(op1))
{
// Create a delay free use, as we'll have to use it to create each field
RefPosition* use = BuildUse(op1, RBM_NONE);
setDelayFree(use);
srcCount = 1;
}
else
{
// Otherwise we must have an in-memory struct lclVar.
// We will just load directly into the register allocated for this lclVar,
// so we don't need to build any uses.
assert(op1->OperIs(GT_LCL_VAR) && op1->isContained() && op1->TypeIs(TYP_STRUCT));
srcCount = 0;
}
// For multi-reg local stores of multi-reg sources, the code generator will read each source
// register, and then move it, if needed, to the destination register. These nodes have
// 2*N locations where N is the number of registers, so that the liveness can
// be reflected accordingly.
//
for (unsigned int i = 0; i < dstCount; ++i)
{
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i);
RefPosition* singleUseRef = nullptr;
if (isMultiRegSrc)
{
regMaskTP srcCandidates = RBM_NONE;
#ifdef TARGET_X86
var_types type = fieldVarDsc->TypeGet();
if (varTypeIsByte(type))
{
srcCandidates = allByteRegs();
}
#endif // TARGET_X86
singleUseRef = BuildUse(op1, srcCandidates, i);
}
assert(isCandidateVar(fieldVarDsc));
BuildStoreLocDef(storeLoc, fieldVarDsc, singleUseRef, i);
if (isMultiRegSrc && (i < (dstCount - 1)))
{
currentLoc += 2;
}
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildStoreLoc: Set register requirements for a store of a lclVar
//
// Arguments:
// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
//
// Notes:
// This involves:
// - Setting the appropriate candidates.
// - Handling of contained immediates.
// - Requesting an internal register for SIMD12 stores.
//
int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc)
{
GenTree* op1 = storeLoc->gtGetOp1();
int srcCount;
RefPosition* singleUseRef = nullptr;
LclVarDsc* varDsc = compiler->lvaGetDesc(storeLoc);
if (storeLoc->IsMultiRegLclVar())
{
return BuildMultiRegStoreLoc(storeLoc->AsLclVar());
}
// First, define internal registers.
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(storeLoc) && !op1->IsCnsIntOrI() && (storeLoc->TypeGet() == TYP_SIMD12))
{
// Need an additional register to extract upper 4 bytes of Vector3,
// it has to be float for x86.
buildInternalFloatRegisterDefForNode(storeLoc, allSIMDRegs());
}
#endif // FEATURE_SIMD
// Second, use source registers.
if (op1->IsMultiRegNode() && (op1->GetMultiRegCount() > 1))
{
// This is the case where the source produces multiple registers.
// This must be a store lclvar.
assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
srcCount = op1->GetMultiRegCount();
for (int i = 0; i < srcCount; ++i)
{
BuildUse(op1, RBM_NONE, i);
}
#if defined(FEATURE_SIMD) && defined(TARGET_X86)
if (TargetOS::IsWindows && !compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
if (varTypeIsSIMD(storeLoc) && op1->IsCall())
{
// Need an additional register to create a SIMD8 from EAX/EDX without SSE4.1.
buildInternalFloatRegisterDefForNode(storeLoc, allSIMDRegs());
if (isCandidateVar(varDsc))
{
// This internal register must be different from the target register.
setInternalRegsDelayFree = true;
}
}
}
#endif // FEATURE_SIMD && TARGET_X86
}
else if (op1->isContained() && op1->OperIs(GT_BITCAST))
{
GenTree* bitCastSrc = op1->gtGetOp1();
RegisterType registerType = bitCastSrc->TypeGet();
singleUseRef = BuildUse(bitCastSrc, allRegs(registerType));
Interval* srcInterval = singleUseRef->getInterval();
assert(srcInterval->registerType == registerType);
srcCount = 1;
}
#ifndef TARGET_64BIT
else if (varTypeIsLong(op1))
{
// GT_MUL_LONG is handled by the IsMultiRegNode case above.
assert(op1->OperIs(GT_LONG));
assert(op1->isContained() && !op1->gtGetOp1()->isContained() && !op1->gtGetOp2()->isContained());
srcCount = BuildBinaryUses(op1->AsOp());
assert(srcCount == 2);
}
#endif // !TARGET_64BIT
else if (op1->isContained())
{
#ifdef TARGET_XARCH
if (varTypeIsSIMD(storeLoc))
{
// This is the zero-init case, and we need a register to hold the zero.
// (On Arm64 we can just store REG_ZR.)
assert(op1->IsSIMDZero());
singleUseRef = BuildUse(op1->gtGetOp1());
srcCount = 1;
}
else
#endif
{
srcCount = 0;
}
}
else
{
srcCount = 1;
regMaskTP srcCandidates = RBM_NONE;
#ifdef TARGET_X86
var_types type = varDsc->GetRegisterType(storeLoc);
if (varTypeIsByte(type))
{
srcCandidates = allByteRegs();
}
#endif // TARGET_X86
singleUseRef = BuildUse(op1, srcCandidates);
}
// Third, use internal registers.
#ifdef TARGET_ARM
if (storeLoc->OperIs(GT_STORE_LCL_FLD) && storeLoc->AsLclFld()->IsOffsetMisaligned())
{
buildInternalIntRegisterDefForNode(storeLoc); // to generate address.
buildInternalIntRegisterDefForNode(storeLoc); // to move float into an int reg.
if (storeLoc->TypeIs(TYP_DOUBLE))
{
buildInternalIntRegisterDefForNode(storeLoc); // to move the second half into an int reg.
}
}
#endif // TARGET_ARM
#if defined(FEATURE_SIMD) || defined(TARGET_ARM)
buildInternalRegisterUses();
#endif // FEATURE_SIMD || TARGET_ARM
// Fourth, define destination registers.
// Add the lclVar to currentLiveVars (if it will remain live)
if (isCandidateVar(varDsc))
{
BuildStoreLocDef(storeLoc, varDsc, singleUseRef, 0);
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildSimple: Builds use RefPositions for trees requiring no special handling
//
// Arguments:
// tree - The node of interest
//
// Return Value:
// The number of use RefPositions created
//
int LinearScan::BuildSimple(GenTree* tree)
{
unsigned kind = tree->OperKind();
int srcCount = 0;
if ((kind & GTK_LEAF) == 0)
{
assert((kind & GTK_SMPOP) != 0);
srcCount = BuildBinaryUses(tree->AsOp());
}
if (tree->IsValue())
{
BuildDef(tree);
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildReturn: Set the NodeInfo for a GT_RETURN.
//
// Arguments:
// tree - The node of interest
//
// Return Value:
// The number of sources consumed by this node.
//
int LinearScan::BuildReturn(GenTree* tree)
{
GenTree* op1 = tree->gtGetOp1();
#if !defined(TARGET_64BIT)
if (tree->TypeGet() == TYP_LONG)
{
assert((op1->OperGet() == GT_LONG) && op1->isContained());
GenTree* loVal = op1->gtGetOp1();
GenTree* hiVal = op1->gtGetOp2();
BuildUse(loVal, RBM_LNGRET_LO);
BuildUse(hiVal, RBM_LNGRET_HI);
return 2;
}
else
#endif // !defined(TARGET_64BIT)
if ((tree->TypeGet() != TYP_VOID) && !op1->isContained())
{
regMaskTP useCandidates = RBM_NONE;
#if FEATURE_MULTIREG_RET
#ifdef TARGET_ARM64
if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar())
{
useCandidates = allSIMDRegs();
if (op1->OperGet() == GT_LCL_VAR)
{
assert(op1->TypeGet() != TYP_SIMD32);
useCandidates = RBM_DOUBLERET;
}
BuildUse(op1, useCandidates);
return 1;
}
#endif // TARGET_ARM64
if (varTypeIsStruct(tree))
{
// op1 has to be either a lclvar or a multi-reg returning call
if ((op1->OperGet() == GT_LCL_VAR) && !op1->IsMultiRegLclVar())
{
BuildUse(op1, useCandidates);
}
else
{
noway_assert(op1->IsMultiRegCall() || op1->IsMultiRegLclVar());
int srcCount;
ReturnTypeDesc nonCallRetTypeDesc;
const ReturnTypeDesc* pRetTypeDesc;
if (op1->OperIs(GT_CALL))
{
pRetTypeDesc = op1->AsCall()->GetReturnTypeDesc();
}
else
{
assert(compiler->lvaEnregMultiRegVars);
LclVarDsc* varDsc = compiler->lvaGetDesc(op1->AsLclVar());
nonCallRetTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd(),
compiler->info.compCallConv);
pRetTypeDesc = &nonCallRetTypeDesc;
assert(compiler->lvaGetDesc(op1->AsLclVar())->lvFieldCnt == nonCallRetTypeDesc.GetReturnRegCount());
}
srcCount = pRetTypeDesc->GetReturnRegCount();
// For any source that's coming from a different register file, we need to ensure that
// we reserve the specific ABI register we need.
bool hasMismatchedRegTypes = false;
if (op1->IsMultiRegLclVar())
{
for (int i = 0; i < srcCount; i++)
{
RegisterType srcType = regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i));
RegisterType dstType = regType(pRetTypeDesc->GetReturnRegType(i));
if (srcType != dstType)
{
hasMismatchedRegTypes = true;
regMaskTP dstRegMask = genRegMask(pRetTypeDesc->GetABIReturnReg(i));
if (varTypeUsesFloatReg(dstType))
{
buildInternalFloatRegisterDefForNode(tree, dstRegMask);
}
else
{
buildInternalIntRegisterDefForNode(tree, dstRegMask);
}
}
}
}
for (int i = 0; i < srcCount; i++)
{
// We will build uses of the type of the operand registers/fields, and the codegen
// for return will move as needed.
if (!hasMismatchedRegTypes || (regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)) ==
regType(pRetTypeDesc->GetReturnRegType(i))))
{
BuildUse(op1, genRegMask(pRetTypeDesc->GetABIReturnReg(i)), i);
}
else
{
BuildUse(op1, RBM_NONE, i);
}
}
if (hasMismatchedRegTypes)
{
buildInternalRegisterUses();
}
return srcCount;
}
}
else
#endif // FEATURE_MULTIREG_RET
{
// Non-struct type return - determine useCandidates
switch (tree->TypeGet())
{
case TYP_VOID:
useCandidates = RBM_NONE;
break;
case TYP_FLOAT:
useCandidates = RBM_FLOATRET;
break;
case TYP_DOUBLE:
// We ONLY want the valid double register in the RBM_DOUBLERET mask.
useCandidates = (RBM_DOUBLERET & RBM_ALLDOUBLE);
break;
case TYP_LONG:
useCandidates = RBM_LNGRET;
break;
default:
useCandidates = RBM_INTRET;
break;
}
BuildUse(op1, useCandidates);
return 1;
}
}
// No kills or defs.
return 0;
}
//------------------------------------------------------------------------
// supportsSpecialPutArg: Determine if we can support specialPutArgs
//
// Return Value:
// True iff specialPutArg intervals can be supported.
//
// Notes:
// See below.
//
bool LinearScan::supportsSpecialPutArg()
{
#if defined(DEBUG) && defined(TARGET_X86)
// On x86, `LSRA_LIMIT_CALLER` is too restrictive to allow the use of special put args: this stress mode
// leaves only three registers allocatable--eax, ecx, and edx--of which the latter two are also used for the
// first two integral arguments to a call. This can leave us with too few registers to succesfully allocate in
// situations like the following:
//
// t1026 = lclVar ref V52 tmp35 u:3 REG NA <l:$3a1, c:$98d>
//
// /--* t1026 ref
// t1352 = * putarg_reg ref REG NA
//
// t342 = lclVar int V14 loc6 u:4 REG NA $50c
//
// t343 = const int 1 REG NA $41
//
// /--* t342 int
// +--* t343 int
// t344 = * + int REG NA $495
//
// t345 = lclVar int V04 arg4 u:2 REG NA $100
//
// /--* t344 int
// +--* t345 int
// t346 = * % int REG NA $496
//
// /--* t346 int
// t1353 = * putarg_reg int REG NA
//
// t1354 = lclVar ref V52 tmp35 (last use) REG NA
//
// /--* t1354 ref
// t1355 = * lea(b+0) byref REG NA
//
// Here, the first `putarg_reg` would normally be considered a special put arg, which would remove `ecx` from the
// set of allocatable registers, leaving only `eax` and `edx`. The allocator will then fail to allocate a register
// for the def of `t345` if arg4 is not a register candidate: the corresponding ref position will be constrained to
// { `ecx`, `ebx`, `esi`, `edi` }, which `LSRA_LIMIT_CALLER` will further constrain to `ecx`, which will not be
// available due to the special put arg.
return getStressLimitRegs() != LSRA_LIMIT_CALLER;
#else
return true;
#endif
}
//------------------------------------------------------------------------
// BuildPutArgReg: Set the NodeInfo for a PUTARG_REG.
//
// Arguments:
// node - The PUTARG_REG node.
// argReg - The register in which to pass the argument.
// info - The info for the node's using call.
// isVarArgs - True if the call uses a varargs calling convention.
// callHasFloatRegArgs - Set to true if this PUTARG_REG uses an FP register.
//
// Return Value:
// None.
//
int LinearScan::BuildPutArgReg(GenTreeUnOp* node)
{
assert(node != nullptr);
assert(node->OperIsPutArgReg());
regNumber argReg = node->GetRegNum();
assert(argReg != REG_NA);
bool isSpecialPutArg = false;
int srcCount = 1;
GenTree* op1 = node->gtGetOp1();
// First, handle the GT_OBJ case, which loads into the arg register
// (so we don't set the use to prefer that register for the source address).
if (op1->OperIs(GT_OBJ))
{
GenTreeObj* obj = op1->AsObj();
GenTree* addr = obj->Addr();
unsigned size = obj->GetLayout()->GetSize();
assert(size <= MAX_PASS_SINGLEREG_BYTES);
if (addr->OperIsLocalAddr())
{
// We don't need a source register.
assert(addr->isContained());
srcCount = 0;
}
else if (!isPow2(size))
{
// We'll need an internal register to do the odd-size load.
// This can only happen with integer registers.
assert(genIsValidIntReg(argReg));
buildInternalIntRegisterDefForNode(node);
BuildUse(addr);
buildInternalRegisterUses();
}
return srcCount;
}
// To avoid redundant moves, have the argument operand computed in the
// register in which the argument is passed to the call.
regMaskTP argMask = genRegMask(argReg);
RefPosition* use = BuildUse(op1, argMask);
if (supportsSpecialPutArg() && isCandidateLocalRef(op1) && ((op1->gtFlags & GTF_VAR_DEATH) == 0))
{
// This is the case for a "pass-through" copy of a lclVar. In the case where it is a non-last-use,
// we don't want the def of the copy to kill the lclVar register, if it is assigned the same register
// (which is actually what we hope will happen).
JITDUMP("Setting putarg_reg as a pass-through of a non-last use lclVar\n");
// Preference the destination to the interval of the first register defined by the first operand.
assert(use->getInterval()->isLocalVar);
isSpecialPutArg = true;
}
#ifdef TARGET_ARM
// If type of node is `long` then it is actually `double`.
// The actual `long` types must have been transformed as a field list with two fields.
if (node->TypeGet() == TYP_LONG)
{
srcCount++;
regMaskTP argMaskHi = genRegMask(REG_NEXT(argReg));
assert(genRegArgNext(argReg) == REG_NEXT(argReg));
use = BuildUse(op1, argMaskHi, 1);
BuildDef(node, argMask, 0);
BuildDef(node, argMaskHi, 1);
}
else
#endif // TARGET_ARM
{
RefPosition* def = BuildDef(node, argMask);
if (isSpecialPutArg)
{
def->getInterval()->isSpecialPutArg = true;
def->getInterval()->assignRelatedInterval(use->getInterval());
}
}
return srcCount;
}
//------------------------------------------------------------------------
// HandleFloatVarArgs: Handle additional register requirements for a varargs call
//
// Arguments:
// call - The call node of interest
// argNode - The current argument
//
// Return Value:
// None.
//
// Notes:
// In the case of a varargs call, the ABI dictates that if we have floating point args,
// we must pass the enregistered arguments in both the integer and floating point registers.
// Since the integer register is not associated with the arg node, we will reserve it as
// an internal register on the call so that it is not used during the evaluation of the call node
// (e.g. for the target).
void LinearScan::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs)
{
if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode))
{
*callHasFloatRegArgs = true;
// We'll have to return the internal def and then later create a use for it.
regNumber argReg = argNode->GetRegNum();
regNumber targetReg = compiler->getCallArgIntRegister(argReg);
buildInternalIntRegisterDefForNode(call, genRegMask(targetReg));
}
}
//------------------------------------------------------------------------
// BuildGCWriteBarrier: Handle additional register requirements for a GC write barrier
//
// Arguments:
// tree - The STORE_IND for which a write barrier is required
//
int LinearScan::BuildGCWriteBarrier(GenTree* tree)
{
GenTree* addr = tree->gtGetOp1();
GenTree* src = tree->gtGetOp2();
// In the case where we are doing a helper assignment, even if the dst
// is an indir through an lea, we need to actually instantiate the
// lea in a register
assert(!addr->isContained() && !src->isContained());
regMaskTP addrCandidates = RBM_ARG_0;
regMaskTP srcCandidates = RBM_ARG_1;
#if defined(TARGET_ARM64)
// the 'addr' goes into x14 (REG_WRITE_BARRIER_DST)
// the 'src' goes into x15 (REG_WRITE_BARRIER_SRC)
//
addrCandidates = RBM_WRITE_BARRIER_DST;
srcCandidates = RBM_WRITE_BARRIER_SRC;
#elif defined(TARGET_X86) && NOGC_WRITE_BARRIERS
bool useOptimizedWriteBarrierHelper = compiler->codeGen->genUseOptimizedWriteBarriers(tree, src);
if (useOptimizedWriteBarrierHelper)
{
// Special write barrier:
// op1 (addr) goes into REG_WRITE_BARRIER (rdx) and
// op2 (src) goes into any int register.
addrCandidates = RBM_WRITE_BARRIER;
srcCandidates = RBM_WRITE_BARRIER_SRC;
}
#endif // defined(TARGET_X86) && NOGC_WRITE_BARRIERS
BuildUse(addr, addrCandidates);
BuildUse(src, srcCandidates);
regMaskTP killMask = getKillSetForStoreInd(tree->AsStoreInd());
buildKillPositionsForNode(tree, currentLoc + 1, killMask);
return 2;
}
//------------------------------------------------------------------------
// BuildCmp: Set the register requirements for a compare.
//
// Arguments:
// tree - The node of interest
//
// Return Value:
// None.
//
int LinearScan::BuildCmp(GenTree* tree)
{
assert(tree->OperIsCompare() || tree->OperIs(GT_CMP) || tree->OperIs(GT_JCMP));
regMaskTP dstCandidates = RBM_NONE;
regMaskTP op1Candidates = RBM_NONE;
regMaskTP op2Candidates = RBM_NONE;
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
#ifdef TARGET_X86
// If the compare is used by a jump, we just need to set the condition codes. If not, then we need
// to store the result into the low byte of a register, which requires the dst be a byteable register.
if (tree->TypeGet() != TYP_VOID)
{
dstCandidates = allByteRegs();
}
bool needByteRegs = false;
if (varTypeIsByte(tree))
{
if (!varTypeIsFloating(op1))
{
needByteRegs = true;
}
}
// Example1: GT_EQ(int, op1 of type ubyte, op2 of type ubyte) - in this case codegen uses
// ubyte as the result of comparison and if the result needs to be materialized into a reg
// simply zero extend it to TYP_INT size. Here is an example of generated code:
// cmp dl, byte ptr[addr mode]
// movzx edx, dl
else if (varTypeIsByte(op1) && varTypeIsByte(op2))
{
needByteRegs = true;
}
// Example2: GT_EQ(int, op1 of type ubyte, op2 is GT_CNS_INT) - in this case codegen uses
// ubyte as the result of the comparison and if the result needs to be materialized into a reg
// simply zero extend it to TYP_INT size.
else if (varTypeIsByte(op1) && op2->IsCnsIntOrI())
{
needByteRegs = true;
}
// Example3: GT_EQ(int, op1 is GT_CNS_INT, op2 of type ubyte) - in this case codegen uses
// ubyte as the result of the comparison and if the result needs to be materialized into a reg
// simply zero extend it to TYP_INT size.
else if (op1->IsCnsIntOrI() && varTypeIsByte(op2))
{
needByteRegs = true;
}
if (needByteRegs)
{
if (!op1->isContained())
{
op1Candidates = allByteRegs();
}
if (!op2->isContained())
{
op2Candidates = allByteRegs();
}
}
#endif // TARGET_X86
int srcCount = BuildOperandUses(op1, op1Candidates);
srcCount += BuildOperandUses(op2, op2Candidates);
if (tree->TypeGet() != TYP_VOID)
{
BuildDef(tree, dstCandidates);
}
return srcCount;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Interval and RefPosition Building XX
XX XX
XX This contains the logic for constructing Intervals and RefPositions that XX
XX is common across architectures. See lsra{arch}.cpp for the architecture- XX
XX specific methods for building. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "lsra.h"
//------------------------------------------------------------------------
// RefInfoList
//------------------------------------------------------------------------
// removeListNode - retrieve the RefInfoListNode for the given GenTree node
//
// Notes:
// The BuildNode methods use this helper to retrieve the RefPositions for child nodes
// from the useList being constructed. Note that, if the user knows the order of the operands,
// it is expected that they should just retrieve them directly.
RefInfoListNode* RefInfoList::removeListNode(GenTree* node)
{
RefInfoListNode* prevListNode = nullptr;
for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next())
{
if (listNode->treeNode == node)
{
assert(listNode->ref->getMultiRegIdx() == 0);
return removeListNode(listNode, prevListNode);
}
prevListNode = listNode;
}
assert(!"removeListNode didn't find the node");
unreached();
}
//------------------------------------------------------------------------
// removeListNode - retrieve the RefInfoListNode for one reg of the given multireg GenTree node
//
// Notes:
// The BuildNode methods use this helper to retrieve the RefPositions for child nodes
// from the useList being constructed. Note that, if the user knows the order of the operands,
// it is expected that they should just retrieve them directly.
RefInfoListNode* RefInfoList::removeListNode(GenTree* node, unsigned multiRegIdx)
{
RefInfoListNode* prevListNode = nullptr;
for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next())
{
if ((listNode->treeNode == node) && (listNode->ref->getMultiRegIdx() == multiRegIdx))
{
return removeListNode(listNode, prevListNode);
}
prevListNode = listNode;
}
assert(!"removeListNode didn't find the node");
unreached();
}
//------------------------------------------------------------------------
// RefInfoListNodePool::RefInfoListNodePool:
// Creates a pool of `RefInfoListNode` values.
//
// Arguments:
// compiler - The compiler context.
// preallocate - The number of nodes to preallocate.
//
RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocate) : m_compiler(compiler)
{
if (preallocate > 0)
{
RefInfoListNode* preallocatedNodes = compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(preallocate);
RefInfoListNode* head = preallocatedNodes;
head->m_next = nullptr;
for (unsigned i = 1; i < preallocate; i++)
{
RefInfoListNode* node = &preallocatedNodes[i];
node->m_next = head;
head = node;
}
m_freeList = head;
}
}
//------------------------------------------------------------------------
// RefInfoListNodePool::GetNode: Fetches an unused node from the
// pool.
//
// Arguments:
// r - The `RefPosition` for the `RefInfo` value.
// t - The IR node for the `RefInfo` value
//
// Returns:
// A pooled or newly-allocated `RefInfoListNode`, depending on the
// contents of the pool.
RefInfoListNode* RefInfoListNodePool::GetNode(RefPosition* r, GenTree* t)
{
RefInfoListNode* head = m_freeList;
if (head == nullptr)
{
head = m_compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(1);
}
else
{
m_freeList = head->m_next;
}
head->ref = r;
head->treeNode = t;
head->m_next = nullptr;
return head;
}
//------------------------------------------------------------------------
// RefInfoListNodePool::ReturnNode: Returns a list of nodes to the node
// pool and clears the given list.
//
// Arguments:
// list - The list to return.
//
void RefInfoListNodePool::ReturnNode(RefInfoListNode* listNode)
{
listNode->m_next = m_freeList;
m_freeList = listNode;
}
//------------------------------------------------------------------------
// newInterval: Create a new Interval of the given RegisterType.
//
// Arguments:
// theRegisterType - The type of Interval to create.
//
// TODO-Cleanup: Consider adding an overload that takes a varDsc, and can appropriately
// set such fields as isStructField
//
Interval* LinearScan::newInterval(RegisterType theRegisterType)
{
intervals.emplace_back(theRegisterType, allRegs(theRegisterType));
Interval* newInt = &intervals.back();
#ifdef DEBUG
newInt->intervalIndex = static_cast<unsigned>(intervals.size() - 1);
#endif // DEBUG
DBEXEC(VERBOSE, newInt->dump());
return newInt;
}
//------------------------------------------------------------------------
// newRefPositionRaw: Create a new RefPosition
//
// Arguments:
// nodeLocation - The location of the reference.
// treeNode - The GenTree of the reference.
// refType - The type of reference
//
// Notes:
// This is used to create RefPositions for both RegRecords and Intervals,
// so it does only the common initialization.
//
RefPosition* LinearScan::newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType)
{
refPositions.emplace_back(curBBNum, nodeLocation, treeNode, refType);
RefPosition* newRP = &refPositions.back();
#ifdef DEBUG
newRP->rpNum = static_cast<unsigned>(refPositions.size() - 1);
#endif // DEBUG
return newRP;
}
//------------------------------------------------------------------------
// resolveConflictingDefAndUse: Resolve the situation where we have conflicting def and use
// register requirements on a single-def, single-use interval.
//
// Arguments:
// defRefPosition - The interval definition
// useRefPosition - The (sole) interval use
//
// Return Value:
// None.
//
// Assumptions:
// The two RefPositions are for the same interval, which is a tree-temp.
//
// Notes:
// We require some special handling for the case where the use is a "delayRegFree" case of a fixedReg.
// In that case, if we change the registerAssignment on the useRefPosition, we will lose the fact that,
// even if we assign a different register (and rely on codegen to do the copy), that fixedReg also needs
// to remain busy until the Def register has been allocated. In that case, we don't allow Case 1 or Case 4
// below.
// Here are the cases we consider (in this order):
// 1. If The defRefPosition specifies a single register, and there are no conflicting
// FixedReg uses of it between the def and use, we use that register, and the code generator
// will insert the copy. Note that it cannot be in use because there is a FixedRegRef for the def.
// 2. If the useRefPosition specifies a single register, and it is not in use, and there are no
// conflicting FixedReg uses of it between the def and use, we use that register, and the code generator
// will insert the copy.
// 3. If the defRefPosition specifies a single register (but there are conflicts, as determined
// in 1.), and there are no conflicts with the useRefPosition register (if it's a single register),
/// we set the register requirements on the defRefPosition to the use registers, and the
// code generator will insert a copy on the def. We can't rely on the code generator to put a copy
// on the use if it has multiple possible candidates, as it won't know which one has been allocated.
// 4. If the useRefPosition specifies a single register, and there are no conflicts with the register
// on the defRefPosition, we leave the register requirements on the defRefPosition as-is, and set
// the useRefPosition to the def registers, for similar reasons to case #3.
// 5. If both the defRefPosition and the useRefPosition specify single registers, but both have conflicts,
// We set the candiates on defRefPosition to be all regs of the appropriate type, and since they are
// single registers, codegen can insert the copy.
// 6. Finally, if the RefPositions specify disjoint subsets of the registers (or the use is fixed but
// has a conflict), we must insert a copy. The copy will be inserted before the use if the
// use is not fixed (in the fixed case, the code generator will insert the use).
//
// TODO-CQ: We get bad register allocation in case #3 in the situation where no register is
// available for the lifetime. We end up allocating a register that must be spilled, and it probably
// won't be the register that is actually defined by the target instruction. So, we have to copy it
// and THEN spill it. In this case, we should be using the def requirement. But we need to change
// the interface to this method a bit to make that work (e.g. returning a candidate set to use, but
// leaving the registerAssignment as-is on the def, so that if we find that we need to spill anyway
// we can use the fixed-reg on the def.
//
void LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition)
{
assert(!interval->isLocalVar);
RefPosition* useRefPosition = defRefPosition->nextRefPosition;
regMaskTP defRegAssignment = defRefPosition->registerAssignment;
regMaskTP useRegAssignment = useRefPosition->registerAssignment;
RegRecord* defRegRecord = nullptr;
RegRecord* useRegRecord = nullptr;
regNumber defReg = REG_NA;
regNumber useReg = REG_NA;
bool defRegConflict = ((defRegAssignment & useRegAssignment) == RBM_NONE);
bool useRegConflict = defRegConflict;
// If the useRefPosition is a "delayRegFree", we can't change the registerAssignment
// on it, or we will fail to ensure that the fixedReg is busy at the time the target
// (of the node that uses this interval) is allocated.
bool canChangeUseAssignment = !useRefPosition->isFixedRegRef || !useRefPosition->delayRegFree;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CONFLICT));
if (!canChangeUseAssignment)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_FIXED_DELAY_USE));
}
if (defRefPosition->isFixedRegRef && !defRegConflict)
{
defReg = defRefPosition->assignedReg();
defRegRecord = getRegisterRecord(defReg);
if (canChangeUseAssignment)
{
RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition;
assert(currFixedRegRefPosition != nullptr &&
currFixedRegRefPosition->nodeLocation == defRefPosition->nodeLocation);
if (currFixedRegRefPosition->nextRefPosition == nullptr ||
currFixedRegRefPosition->nextRefPosition->nodeLocation > useRefPosition->getRefEndLocation())
{
// This is case #1. Use the defRegAssignment
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE1));
useRefPosition->registerAssignment = defRegAssignment;
return;
}
else
{
defRegConflict = true;
}
}
}
if (useRefPosition->isFixedRegRef && !useRegConflict)
{
useReg = useRefPosition->assignedReg();
useRegRecord = getRegisterRecord(useReg);
// We know that useRefPosition is a fixed use, so the nextRefPosition must not be null.
RefPosition* nextFixedRegRefPosition = useRegRecord->getNextRefPosition();
assert(nextFixedRegRefPosition != nullptr &&
nextFixedRegRefPosition->nodeLocation <= useRefPosition->nodeLocation);
// First, check to see if there are any conflicting FixedReg references between the def and use.
if (nextFixedRegRefPosition->nodeLocation == useRefPosition->nodeLocation)
{
// OK, no conflicting FixedReg references.
// Now, check to see whether it is currently in use.
if (useRegRecord->assignedInterval != nullptr)
{
RefPosition* possiblyConflictingRef = useRegRecord->assignedInterval->recentRefPosition;
LsraLocation possiblyConflictingRefLocation = possiblyConflictingRef->getRefEndLocation();
if (possiblyConflictingRefLocation >= defRefPosition->nodeLocation)
{
useRegConflict = true;
}
}
if (!useRegConflict)
{
// This is case #2. Use the useRegAssignment
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE2, interval));
defRefPosition->registerAssignment = useRegAssignment;
return;
}
}
else
{
useRegConflict = true;
}
}
if (defRegRecord != nullptr && !useRegConflict)
{
// This is case #3.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE3, interval));
defRefPosition->registerAssignment = useRegAssignment;
return;
}
if (useRegRecord != nullptr && !defRegConflict && canChangeUseAssignment)
{
// This is case #4.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE4, interval));
useRefPosition->registerAssignment = defRegAssignment;
return;
}
if (defRegRecord != nullptr && useRegRecord != nullptr)
{
// This is case #5.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE5, interval));
RegisterType regType = interval->registerType;
assert((getRegisterType(interval, defRefPosition) == regType) &&
(getRegisterType(interval, useRefPosition) == regType));
regMaskTP candidates = allRegs(regType);
defRefPosition->registerAssignment = candidates;
defRefPosition->isFixedRegRef = false;
return;
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE6, interval));
return;
}
//------------------------------------------------------------------------
// applyCalleeSaveHeuristics: Set register preferences for an interval based on the given RefPosition
//
// Arguments:
// rp - The RefPosition of interest
//
// Notes:
// This is slightly more general than its name applies, and updates preferences not just
// for callee-save registers.
//
void LinearScan::applyCalleeSaveHeuristics(RefPosition* rp)
{
#ifdef TARGET_AMD64
if (compiler->opts.compDbgEnC)
{
// We only use RSI and RDI for EnC code, so we don't want to favor callee-save regs.
return;
}
#endif // TARGET_AMD64
Interval* theInterval = rp->getInterval();
#ifdef DEBUG
if (!doReverseCallerCallee())
#endif // DEBUG
{
// Set preferences so that this register set will be preferred for earlier refs
theInterval->mergeRegisterPreferences(rp->registerAssignment);
}
}
//------------------------------------------------------------------------
// checkConflictingDefUse: Ensure that we have consistent def/use on SDSU temps.
//
// Arguments:
// useRP - The use RefPosition of a tree temp (SDSU Interval)
//
// Notes:
// There are a couple of cases where this may over-constrain allocation:
// 1. In the case of a non-commutative rmw def (in which the rmw source must be delay-free), or
// 2. In the case where the defining node requires a temp distinct from the target (also a
// delay-free case).
// In those cases, if we propagate a single-register restriction from the consumer to the producer
// the delayed uses will not see a fixed reference in the PhysReg at that position, and may
// incorrectly allocate that register.
// TODO-CQ: This means that we may often require a copy at the use of this node's result.
// This case could be moved to BuildRefPositionsForNode, at the point where the def RefPosition is
// created, causing a RefTypeFixedReg to be added at that location. This, however, results in
// more PhysReg RefPositions (a throughput impact), and a large number of diffs that require
// further analysis to determine benefit.
// See Issue #11274.
//
void LinearScan::checkConflictingDefUse(RefPosition* useRP)
{
assert(useRP->refType == RefTypeUse);
Interval* theInterval = useRP->getInterval();
assert(!theInterval->isLocalVar);
RefPosition* defRP = theInterval->firstRefPosition;
// All defs must have a valid treeNode, but we check it below to be conservative.
assert(defRP->treeNode != nullptr);
regMaskTP prevAssignment = defRP->registerAssignment;
regMaskTP newAssignment = (prevAssignment & useRP->registerAssignment);
if (newAssignment != RBM_NONE)
{
if (!isSingleRegister(newAssignment) || !theInterval->hasInterferingUses)
{
defRP->registerAssignment = newAssignment;
}
}
else
{
theInterval->hasConflictingDefUse = true;
}
}
//------------------------------------------------------------------------
// associateRefPosWithInterval: Update the Interval based on the given RefPosition.
//
// Arguments:
// rp - The RefPosition of interest
//
// Notes:
// This is called at the time when 'rp' has just been created, so it becomes
// the nextRefPosition of the recentRefPosition, and both the recentRefPosition
// and lastRefPosition of its referent.
//
void LinearScan::associateRefPosWithInterval(RefPosition* rp)
{
Referenceable* theReferent = rp->referent;
if (theReferent != nullptr)
{
// All RefPositions except the dummy ones at the beginning of blocks
if (rp->isIntervalRef())
{
Interval* theInterval = rp->getInterval();
applyCalleeSaveHeuristics(rp);
if (theInterval->isLocalVar)
{
if (RefTypeIsUse(rp->refType))
{
RefPosition* const prevRP = theInterval->recentRefPosition;
if ((prevRP != nullptr) && (prevRP->bbNum == rp->bbNum))
{
prevRP->lastUse = false;
}
}
rp->lastUse = (rp->refType != RefTypeExpUse) && (rp->refType != RefTypeParamDef) &&
(rp->refType != RefTypeZeroInit) && !extendLifetimes();
}
else if (rp->refType == RefTypeUse)
{
checkConflictingDefUse(rp);
rp->lastUse = true;
}
}
RefPosition* prevRP = theReferent->recentRefPosition;
if (prevRP != nullptr)
{
prevRP->nextRefPosition = rp;
}
else
{
theReferent->firstRefPosition = rp;
}
theReferent->recentRefPosition = rp;
theReferent->lastRefPosition = rp;
}
else
{
assert((rp->refType == RefTypeBB) || (rp->refType == RefTypeKillGCRefs));
}
}
//---------------------------------------------------------------------------
// newRefPosition: allocate and initialize a new RefPosition.
//
// Arguments:
// reg - reg number that identifies RegRecord to be associated
// with this RefPosition
// theLocation - LSRA location of RefPosition
// theRefType - RefPosition type
// theTreeNode - GenTree node for which this RefPosition is created
// mask - Set of valid registers for this RefPosition
// multiRegIdx - register position if this RefPosition corresponds to a
// multi-reg call node.
//
// Return Value:
// a new RefPosition
//
RefPosition* LinearScan::newRefPosition(
regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask)
{
RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType);
RegRecord* regRecord = getRegisterRecord(reg);
newRP->setReg(regRecord);
newRP->registerAssignment = mask;
newRP->setMultiRegIdx(0);
newRP->setRegOptional(false);
// We can't have two RefPositions on a RegRecord at the same location, unless they are different types.
assert((regRecord->lastRefPosition == nullptr) || (regRecord->lastRefPosition->nodeLocation < theLocation) ||
(regRecord->lastRefPosition->refType != theRefType));
associateRefPosWithInterval(newRP);
DBEXEC(VERBOSE, newRP->dump(this));
return newRP;
}
//---------------------------------------------------------------------------
// newRefPosition: allocate and initialize a new RefPosition.
//
// Arguments:
// theInterval - interval to which RefPosition is associated with.
// theLocation - LSRA location of RefPosition
// theRefType - RefPosition type
// theTreeNode - GenTree node for which this RefPosition is created
// mask - Set of valid registers for this RefPosition
// multiRegIdx - register position if this RefPosition corresponds to a
// multi-reg call node.
//
// Return Value:
// a new RefPosition
//
RefPosition* LinearScan::newRefPosition(Interval* theInterval,
LsraLocation theLocation,
RefType theRefType,
GenTree* theTreeNode,
regMaskTP mask,
unsigned multiRegIdx /* = 0 */)
{
if (theInterval != nullptr)
{
if (mask == RBM_NONE)
{
mask = allRegs(theInterval->registerType);
}
}
else
{
assert(theRefType == RefTypeBB || theRefType == RefTypeKillGCRefs);
}
#ifdef DEBUG
if (theInterval != nullptr && regType(theInterval->registerType) == FloatRegisterType)
{
// In the case we're using floating point registers we must make sure
// this flag was set previously in the compiler since this will mandate
// whether LSRA will take into consideration FP reg killsets.
assert(compiler->compFloatingPointUsed || ((mask & RBM_FLT_CALLEE_SAVED) == 0));
}
#endif // DEBUG
// If this reference is constrained to a single register (and it's not a dummy
// or Kill reftype already), add a RefTypeFixedReg at this location so that its
// availability can be more accurately determined
bool isFixedRegister = isSingleRegister(mask);
bool insertFixedRef = false;
if (isFixedRegister)
{
// Insert a RefTypeFixedReg for any normal def or use (not ParamDef or BB),
// but not an internal use (it will already have a FixedRef for the def).
if ((theRefType == RefTypeDef) || ((theRefType == RefTypeUse) && !theInterval->isInternal))
{
insertFixedRef = true;
}
}
if (insertFixedRef)
{
regNumber physicalReg = genRegNumFromMask(mask);
RefPosition* pos = newRefPosition(physicalReg, theLocation, RefTypeFixedReg, nullptr, mask);
assert(theInterval != nullptr);
assert((allRegs(theInterval->registerType) & mask) != 0);
}
RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType);
newRP->setInterval(theInterval);
// Spill info
newRP->isFixedRegRef = isFixedRegister;
#ifndef TARGET_AMD64
// We don't need this for AMD because the PInvoke method epilog code is explicit
// at register allocation time.
if (theInterval != nullptr && theInterval->isLocalVar && compiler->compMethodRequiresPInvokeFrame() &&
theInterval->varNum == compiler->genReturnLocal)
{
mask &= ~(RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME);
noway_assert(mask != RBM_NONE);
}
#endif // !TARGET_AMD64
newRP->registerAssignment = mask;
newRP->setMultiRegIdx(multiRegIdx);
newRP->setRegOptional(false);
associateRefPosWithInterval(newRP);
if (RefTypeIsDef(newRP->refType))
{
assert(theInterval != nullptr);
theInterval->isSingleDef = theInterval->firstRefPosition == newRP;
}
DBEXEC(VERBOSE, newRP->dump(this));
return newRP;
}
//---------------------------------------------------------------------------
// newUseRefPosition: allocate and initialize a RefTypeUse RefPosition at currentLoc.
//
// Arguments:
// theInterval - interval to which RefPosition is associated with.
// theTreeNode - GenTree node for which this RefPosition is created
// mask - Set of valid registers for this RefPosition
// multiRegIdx - register position if this RefPosition corresponds to a
// multi-reg call node.
// minRegCount - Minimum number registers that needs to be ensured while
// constraining candidates for this ref position under
// LSRA stress. This is a DEBUG only arg.
//
// Return Value:
// a new RefPosition
//
// Notes:
// If the caller knows that 'theTreeNode' is NOT a candidate local, newRefPosition
// can/should be called directly.
//
RefPosition* LinearScan::newUseRefPosition(Interval* theInterval,
GenTree* theTreeNode,
regMaskTP mask,
unsigned multiRegIdx)
{
GenTree* treeNode = isCandidateLocalRef(theTreeNode) ? theTreeNode : nullptr;
RefPosition* pos = newRefPosition(theInterval, currentLoc, RefTypeUse, treeNode, mask, multiRegIdx);
if (theTreeNode->IsRegOptional())
{
pos->setRegOptional(true);
}
return pos;
}
//------------------------------------------------------------------------
// IsContainableMemoryOp: Checks whether this is a memory op that can be contained.
//
// Arguments:
// node - the node of interest.
//
// Return value:
// True if this will definitely be a memory reference that could be contained.
//
// Notes:
// This differs from the isMemoryOp() method on GenTree because it checks for
// the case of doNotEnregister local. This won't include locals that
// for some other reason do not become register candidates, nor those that get
// spilled.
// Also, because we usually call this before we redo dataflow, any new lclVars
// introduced after the last dataflow analysis will not yet be marked lvTracked,
// so we don't use that.
//
bool LinearScan::isContainableMemoryOp(GenTree* node)
{
if (node->isMemoryOp())
{
return true;
}
if (node->IsLocal())
{
if (!enregisterLocalVars)
{
return true;
}
const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar());
return varDsc->lvDoNotEnregister;
}
return false;
}
//------------------------------------------------------------------------
// addRefsForPhysRegMask: Adds RefPositions of the given type for all the registers in 'mask'.
//
// Arguments:
// mask - the mask (set) of registers.
// currentLoc - the location at which they should be added
// refType - the type of refposition
// isLastUse - true IFF this is a last use of the register
//
void LinearScan::addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse)
{
if (refType == RefTypeKill)
{
// The mask identifies a set of registers that will be used during
// codegen. Mark these as modified here, so when we do final frame
// layout, we'll know about all these registers. This is especially
// important if mask contains callee-saved registers, which affect the
// frame size since we need to save/restore them. In the case where we
// have a copyBlk with GC pointers, can need to call the
// CORINFO_HELP_ASSIGN_BYREF helper, which kills callee-saved RSI and
// RDI, if LSRA doesn't assign RSI/RDI, they wouldn't get marked as
// modified until codegen, which is too late.
compiler->codeGen->regSet.rsSetRegsModified(mask DEBUGARG(true));
}
for (regNumber reg = REG_FIRST; mask; reg = REG_NEXT(reg), mask >>= 1)
{
if (mask & 1)
{
// This assumes that these are all "special" RefTypes that
// don't need to be recorded on the tree (hence treeNode is nullptr)
RefPosition* pos = newRefPosition(reg, currentLoc, refType, nullptr,
genRegMask(reg)); // This MUST occupy the physical register (obviously)
if (isLastUse)
{
pos->lastUse = true;
}
}
}
}
//------------------------------------------------------------------------
// getKillSetForStoreInd: Determine the liveness kill set for a GT_STOREIND node.
// If the GT_STOREIND will generate a write barrier, determine the specific kill
// set required by the case-specific, platform-specific write barrier. If no
// write barrier is required, the kill set will be RBM_NONE.
//
// Arguments:
// tree - the GT_STOREIND node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForStoreInd(GenTreeStoreInd* tree)
{
assert(tree->OperIs(GT_STOREIND));
regMaskTP killMask = RBM_NONE;
GenTree* data = tree->Data();
GCInfo::WriteBarrierForm writeBarrierForm = compiler->codeGen->gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
if (compiler->codeGen->genUseOptimizedWriteBarriers(writeBarrierForm))
{
// We can't determine the exact helper to be used at this point, because it depends on
// the allocated register for the `data` operand. However, all the (x86) optimized
// helpers have the same kill set: EDX. And note that currently, only x86 can return
// `true` for genUseOptimizedWriteBarriers().
killMask = RBM_CALLEE_TRASH_NOGC;
}
else
{
// Figure out which helper we're going to use, and then get the kill set for that helper.
CorInfoHelpFunc helper =
compiler->codeGen->genWriteBarrierHelperForWriteBarrierForm(tree, writeBarrierForm);
killMask = compiler->compHelperCallKillSet(helper);
}
}
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForShiftRotate: Determine the liveness kill set for a shift or rotate node.
//
// Arguments:
// shiftNode - the shift or rotate node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForShiftRotate(GenTreeOp* shiftNode)
{
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
assert(shiftNode->OperIsShiftOrRotate());
GenTree* shiftBy = shiftNode->gtGetOp2();
if (!shiftBy->isContained())
{
killMask = RBM_RCX;
}
#endif // TARGET_XARCH
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForMul: Determine the liveness kill set for a multiply node.
//
// Arguments:
// tree - the multiply node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForMul(GenTreeOp* mulNode)
{
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
assert(mulNode->OperIsMul());
if (!mulNode->OperIs(GT_MUL) || (((mulNode->gtFlags & GTF_UNSIGNED) != 0) && mulNode->gtOverflowEx()))
{
killMask = RBM_RAX | RBM_RDX;
}
#endif // TARGET_XARCH
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForModDiv: Determine the liveness kill set for a mod or div node.
//
// Arguments:
// tree - the mod or div node as a GenTreeOp
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForModDiv(GenTreeOp* node)
{
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
assert(node->OperIs(GT_MOD, GT_DIV, GT_UMOD, GT_UDIV));
if (!varTypeIsFloating(node->TypeGet()))
{
// Both RAX and RDX are killed by the operation
killMask = RBM_RAX | RBM_RDX;
}
#endif // TARGET_XARCH
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForCall: Determine the liveness kill set for a call node.
//
// Arguments:
// tree - the GenTreeCall node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForCall(GenTreeCall* call)
{
regMaskTP killMask = RBM_CALLEE_TRASH;
#ifdef TARGET_X86
if (compiler->compFloatingPointUsed)
{
if (call->TypeGet() == TYP_DOUBLE)
{
needDoubleTmpForFPCall = true;
}
else if (call->TypeGet() == TYP_FLOAT)
{
needFloatTmpForFPCall = true;
}
}
#endif // TARGET_X86
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
// if there is no FP used, we can ignore the FP kills
if (!compiler->compFloatingPointUsed)
{
killMask &= ~RBM_FLT_CALLEE_TRASH;
}
#ifdef TARGET_ARM
if (call->IsVirtualStub())
{
killMask |= compiler->virtualStubParamInfo->GetRegMask();
}
#else // !TARGET_ARM
// Verify that the special virtual stub call registers are in the kill mask.
// We don't just add them unconditionally to the killMask because for most architectures
// they are already in the RBM_CALLEE_TRASH set,
// and we don't want to introduce extra checks and calls in this hot function.
assert(!call->IsVirtualStub() ||
((killMask & compiler->virtualStubParamInfo->GetRegMask()) == compiler->virtualStubParamInfo->GetRegMask()));
#endif // !TARGET_ARM
return killMask;
}
//------------------------------------------------------------------------
// getKillSetForBlockStore: Determine the liveness kill set for a block store node.
//
// Arguments:
// tree - the block store node as a GenTreeBlk
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForBlockStore(GenTreeBlk* blkNode)
{
assert(blkNode->OperIsStore());
regMaskTP killMask = RBM_NONE;
if ((blkNode->OperGet() == GT_STORE_OBJ) && blkNode->OperIsCopyBlkOp())
{
assert(blkNode->AsObj()->GetLayout()->HasGCPtr());
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_ASSIGN_BYREF);
}
else
{
bool isCopyBlk = varTypeIsStruct(blkNode->Data());
switch (blkNode->gtBlkOpKind)
{
#ifndef TARGET_X86
case GenTreeBlk::BlkOpKindHelper:
if (isCopyBlk)
{
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMCPY);
}
else
{
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET);
}
break;
#endif
#ifdef TARGET_XARCH
case GenTreeBlk::BlkOpKindRepInstr:
if (isCopyBlk)
{
// rep movs kills RCX, RDI and RSI
killMask = RBM_RCX | RBM_RDI | RBM_RSI;
}
else
{
// rep stos kills RCX and RDI.
// (Note that the Data() node, if not constant, will be assigned to
// RCX, but it's find that this kills it, as the value is not available
// after this node in any case.)
killMask = RBM_RDI | RBM_RCX;
}
break;
#endif
case GenTreeBlk::BlkOpKindUnroll:
case GenTreeBlk::BlkOpKindInvalid:
// for these 'gtBlkOpKind' kinds, we leave 'killMask' = RBM_NONE
break;
}
}
return killMask;
}
#ifdef FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// getKillSetForHWIntrinsic: Determine the liveness kill set for a GT_STOREIND node.
// If the GT_STOREIND will generate a write barrier, determine the specific kill
// set required by the case-specific, platform-specific write barrier. If no
// write barrier is required, the kill set will be RBM_NONE.
//
// Arguments:
// tree - the GT_STOREIND node
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node)
{
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
switch (node->GetHWIntrinsicId())
{
case NI_SSE2_MaskMove:
// maskmovdqu uses edi as the implicit address register.
// Although it is set as the srcCandidate on the address, if there is also a fixed
// assignment for the definition of the address, resolveConflictingDefAndUse() may
// change the register assignment on the def or use of a tree temp (SDSU) when there
// is a conflict, and the FixedRef on edi won't be sufficient to ensure that another
// Interval will not be allocated there.
// Issue #17674 tracks this.
killMask = RBM_EDI;
break;
default:
// Leave killMask as RBM_NONE
break;
}
#endif // TARGET_XARCH
return killMask;
}
#endif // FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// getKillSetForReturn: Determine the liveness kill set for a return node.
//
// Arguments:
// NONE (this kill set is independent of the details of the specific return.)
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForReturn()
{
return compiler->compIsProfilerHookNeeded() ? compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_LEAVE)
: RBM_NONE;
}
//------------------------------------------------------------------------
// getKillSetForProfilerHook: Determine the liveness kill set for a profiler hook.
//
// Arguments:
// NONE (this kill set is independent of the details of the specific node.)
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForProfilerHook()
{
return compiler->compIsProfilerHookNeeded() ? compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_TAILCALL)
: RBM_NONE;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// getKillSetForNode: Return the registers killed by the given tree node.
//
// Arguments:
// tree - the tree for which the kill set is needed.
//
// Return Value: a register mask of the registers killed
//
regMaskTP LinearScan::getKillSetForNode(GenTree* tree)
{
regMaskTP killMask = RBM_NONE;
switch (tree->OperGet())
{
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
#ifdef TARGET_X86
case GT_LSH_HI:
case GT_RSH_LO:
#endif
killMask = getKillSetForShiftRotate(tree->AsOp());
break;
case GT_MUL:
case GT_MULHI:
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
case GT_MUL_LONG:
#endif
killMask = getKillSetForMul(tree->AsOp());
break;
case GT_MOD:
case GT_DIV:
case GT_UMOD:
case GT_UDIV:
killMask = getKillSetForModDiv(tree->AsOp());
break;
case GT_STORE_OBJ:
case GT_STORE_BLK:
case GT_STORE_DYN_BLK:
killMask = getKillSetForBlockStore(tree->AsBlk());
break;
case GT_RETURNTRAP:
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC);
break;
case GT_CALL:
killMask = getKillSetForCall(tree->AsCall());
break;
case GT_STOREIND:
killMask = getKillSetForStoreInd(tree->AsStoreInd());
break;
#if defined(PROFILING_SUPPORTED)
// If this method requires profiler ELT hook then mark these nodes as killing
// callee trash registers (excluding RAX and XMM0). The reason for this is that
// profiler callback would trash these registers. See vm\amd64\asmhelpers.asm for
// more details.
case GT_RETURN:
killMask = getKillSetForReturn();
break;
case GT_PROF_HOOK:
killMask = getKillSetForProfilerHook();
break;
#endif // PROFILING_SUPPORTED
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
killMask = getKillSetForHWIntrinsic(tree->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
default:
// for all other 'tree->OperGet()' kinds, leave 'killMask' = RBM_NONE
break;
}
return killMask;
}
#endif // DEBUG
//------------------------------------------------------------------------
// buildKillPositionsForNode:
// Given some tree node add refpositions for all the registers this node kills
//
// Arguments:
// tree - the tree for which kill positions should be generated
// currentLoc - the location at which the kills should be added
// killMask - The mask of registers killed by this node
//
// Return Value:
// true - kills were inserted
// false - no kills were inserted
//
// Notes:
// The return value is needed because if we have any kills, we need to make sure that
// all defs are located AFTER the kills. On the other hand, if there aren't kills,
// the multiple defs for a regPair are in different locations.
// If we generate any kills, we will mark all currentLiveVars as being preferenced
// to avoid the killed registers. This is somewhat conservative.
//
// This method can add kills even if killMask is RBM_NONE, if this tree is one of the
// special cases that signals that we can't permit callee save registers to hold GC refs.
bool LinearScan::buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc, regMaskTP killMask)
{
bool insertedKills = false;
if (killMask != RBM_NONE)
{
addRefsForPhysRegMask(killMask, currentLoc, RefTypeKill, true);
// TODO-CQ: It appears to be valuable for both fp and int registers to avoid killing the callee
// save regs on infrequently executed paths. However, it results in a large number of asmDiffs,
// many of which appear to be regressions (because there is more spill on the infrequently path),
// but are not really because the frequent path becomes smaller. Validating these diffs will need
// to be done before making this change.
// Also note that we avoid setting callee-save preferences for floating point. This may need
// revisiting, and note that it doesn't currently apply to SIMD types, only float or double.
// if (!blockSequence[curBBSeqNum]->isRunRarely())
if (enregisterLocalVars)
{
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (Compiler::varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType()))
{
if (!VarSetOps::IsMember(compiler, largeVectorCalleeSaveCandidateVars, varIndex))
{
continue;
}
}
else
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (varTypeIsFloating(varDsc) &&
!VarSetOps::IsMember(compiler, fpCalleeSaveCandidateVars, varIndex))
{
continue;
}
Interval* interval = getIntervalForLocalVar(varIndex);
const bool isCallKill = ((killMask == RBM_INT_CALLEE_TRASH) || (killMask == RBM_CALLEE_TRASH));
if (isCallKill)
{
interval->preferCalleeSave = true;
}
// We are more conservative about allocating callee-saves registers to write-thru vars, since
// a call only requires reloading after (not spilling before). So we record (above) the fact
// that we'd prefer a callee-save register, but we don't update the preferences at this point.
// See the "heuristics for writeThru intervals" in 'buildIntervals()'.
if (!interval->isWriteThru || !isCallKill)
{
regMaskTP newPreferences = allRegs(interval->registerType) & (~killMask);
if (newPreferences != RBM_NONE)
{
interval->updateRegisterPreferences(newPreferences);
}
else
{
// If there are no callee-saved registers, the call could kill all the registers.
// This is a valid state, so in that case assert should not trigger. The RA will spill in order
// to free a register later.
assert(compiler->opts.compDbgEnC || (calleeSaveRegs(varDsc->lvType)) == RBM_NONE);
}
}
}
}
insertedKills = true;
}
if (compiler->killGCRefs(tree))
{
RefPosition* pos =
newRefPosition((Interval*)nullptr, currentLoc, RefTypeKillGCRefs, tree, (allRegs(TYP_REF) & ~RBM_ARG_REGS));
insertedKills = true;
}
return insertedKills;
}
//------------------------------------------------------------------------
// LinearScan::isCandidateMultiRegLclVar: Check whether a MultiReg node should
// remain a candidate MultiReg
//
// Arguments:
// lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR of interest
//
// Return Value:
// true iff it remains a MultiReg lclVar.
//
// Notes:
// When identifying candidates, the register allocator will only retain
// promoted fields of a multi-reg local as candidates if all of its fields
// are candidates. This is because of the added complexity of dealing with a
// def or use of a multi-reg lclVar when only some of the fields have liveness
// info.
// At the time we determine whether a multi-reg lclVar can still be handled
// as such, we've already completed Lowering, so during the build phase of
// LSRA we have to reset the GTF_VAR_MULTIREG flag if necessary as we visit
// each node.
//
bool LinearScan::isCandidateMultiRegLclVar(GenTreeLclVar* lclNode)
{
assert(compiler->lvaEnregMultiRegVars && lclNode->IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
assert(varDsc->lvPromoted);
bool isMultiReg = (compiler->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT);
if (!isMultiReg)
{
lclNode->ClearMultiReg();
}
#ifdef DEBUG
for (unsigned int i = 0; i < varDsc->lvFieldCnt; i++)
{
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i);
assert(isCandidateVar(fieldVarDsc) == isMultiReg);
}
#endif // DEBUG
return isMultiReg;
}
//------------------------------------------------------------------------
// checkContainedOrCandidateLclVar: Check whether a GT_LCL_VAR node is a
// candidate or contained.
//
// Arguments:
// lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR of interest
//
// Return Value:
// true if the node remains a candidate or is contained
// false otherwise (i.e. if it will define a register)
//
// Notes:
// We handle candidate variables differently from non-candidate ones.
// If it is a candidate, we will simply add a use of it at its parent/consumer.
// Otherwise, for a use we need to actually add the appropriate references for loading
// or storing the variable.
//
// A candidate lclVar won't actually get used until the appropriate ancestor node
// is processed, unless this is marked "isLocalDefUse" because it is a stack-based argument
// to a call or an orphaned dead node.
//
// Also, because we do containment analysis before we redo dataflow and identify register
// candidates, the containment analysis only uses !lvDoNotEnregister to estimate register
// candidates.
// If there is a lclVar that is estimated during Lowering to be register candidate but turns
// out not to be, if a use was marked regOptional it should now be marked contained instead.
//
bool LinearScan::checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode)
{
bool isCandidate;
bool makeContained = false;
// We shouldn't be calling this if this node was already contained.
assert(!lclNode->isContained());
// If we have a multireg local, verify that its fields are still register candidates.
if (lclNode->IsMultiReg())
{
// Multi-reg uses must support containment, but if we have an actual multi-reg local
// we don't want it to be RegOptional in fixed-use cases, so that we can ensure proper
// liveness modeling (e.g. if one field is in a register required by another field, in
// a RegOptional case we won't handle the conflict properly if we decide not to allocate).
isCandidate = isCandidateMultiRegLclVar(lclNode);
if (isCandidate)
{
assert(!lclNode->IsRegOptional());
}
else
{
makeContained = true;
}
}
else
{
isCandidate = compiler->lvaGetDesc(lclNode)->lvLRACandidate;
makeContained = !isCandidate && lclNode->IsRegOptional();
}
if (makeContained)
{
lclNode->ClearRegOptional();
lclNode->SetContained();
return true;
}
return isCandidate;
}
//----------------------------------------------------------------------------
// defineNewInternalTemp: Defines a ref position for an internal temp.
//
// Arguments:
// tree - Gentree node requiring an internal register
// regType - Register type
// currentLoc - Location of the temp Def position
// regMask - register mask of candidates for temp
//
RefPosition* LinearScan::defineNewInternalTemp(GenTree* tree, RegisterType regType, regMaskTP regMask)
{
Interval* current = newInterval(regType);
current->isInternal = true;
RefPosition* newDef = newRefPosition(current, currentLoc, RefTypeDef, tree, regMask, 0);
assert(internalCount < MaxInternalCount);
internalDefs[internalCount++] = newDef;
return newDef;
}
//------------------------------------------------------------------------
// buildInternalRegisterDefForNode - Create an Interval for an internal int register, and a def RefPosition
//
// Arguments:
// tree - Gentree node that needs internal registers
// internalCands - The mask of valid registers
//
// Returns:
// The def RefPosition created for this internal temp.
//
RefPosition* LinearScan::buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands)
{
// The candidate set should contain only integer registers.
assert((internalCands & ~allRegs(TYP_INT)) == RBM_NONE);
RefPosition* defRefPosition = defineNewInternalTemp(tree, IntRegisterType, internalCands);
return defRefPosition;
}
//------------------------------------------------------------------------
// buildInternalFloatRegisterDefForNode - Create an Interval for an internal fp register, and a def RefPosition
//
// Arguments:
// tree - Gentree node that needs internal registers
// internalCands - The mask of valid registers
//
// Returns:
// The def RefPosition created for this internal temp.
//
RefPosition* LinearScan::buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands)
{
// The candidate set should contain only float registers.
assert((internalCands & ~allRegs(TYP_FLOAT)) == RBM_NONE);
RefPosition* defRefPosition = defineNewInternalTemp(tree, FloatRegisterType, internalCands);
return defRefPosition;
}
//------------------------------------------------------------------------
// buildInternalRegisterUses - adds use positions for internal
// registers required for tree node.
//
// Notes:
// During the BuildNode process, calls to buildInternalIntRegisterDefForNode and
// buildInternalFloatRegisterDefForNode put new RefPositions in the 'internalDefs'
// array, and increment 'internalCount'. This method must be called to add corresponding
// uses. It then resets the 'internalCount' for the handling of the next node.
//
// If the internal registers must differ from the target register, 'setInternalRegsDelayFree'
// must be set to true, so that the uses may be marked 'delayRegFree'.
// Note that if a node has both float and int temps, generally the target with either be
// int *or* float, and it is not really necessary to set this on the other type, but it does
// no harm as it won't restrict the register selection.
//
void LinearScan::buildInternalRegisterUses()
{
assert(internalCount <= MaxInternalCount);
for (int i = 0; i < internalCount; i++)
{
RefPosition* def = internalDefs[i];
regMaskTP mask = def->registerAssignment;
RefPosition* use = newRefPosition(def->getInterval(), currentLoc, RefTypeUse, def->treeNode, mask, 0);
if (setInternalRegsDelayFree)
{
use->delayRegFree = true;
pendingDelayFree = true;
}
}
// internalCount = 0;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
//------------------------------------------------------------------------
// makeUpperVectorInterval - Create an Interval for saving and restoring
// the upper half of a large vector.
//
// Arguments:
// varIndex - The tracked index for a large vector lclVar.
//
void LinearScan::makeUpperVectorInterval(unsigned varIndex)
{
Interval* lclVarInterval = getIntervalForLocalVar(varIndex);
assert(Compiler::varTypeNeedsPartialCalleeSave(lclVarInterval->registerType));
Interval* newInt = newInterval(LargeVectorSaveType);
newInt->relatedInterval = lclVarInterval;
newInt->isUpperVector = true;
}
//------------------------------------------------------------------------
// getUpperVectorInterval - Get the Interval for saving and restoring
// the upper half of a large vector.
//
// Arguments:
// varIndex - The tracked index for a large vector lclVar.
//
Interval* LinearScan::getUpperVectorInterval(unsigned varIndex)
{
// TODO-Throughput: Consider creating a map from varIndex to upperVector interval.
for (Interval& interval : intervals)
{
if (interval.isLocalVar)
{
continue;
}
noway_assert(interval.isUpperVector);
if (interval.relatedInterval->getVarIndex(compiler) == varIndex)
{
return &interval;
}
}
unreached();
}
//------------------------------------------------------------------------
// buildUpperVectorSaveRefPositions - Create special RefPositions for saving
// the upper half of a set of large vectors.
//
// Arguments:
// tree - The current node being handled
// currentLoc - The location of the current node
// fpCalleeKillSet - The set of registers killed by this node.
//
// Notes: This is called by BuildDefsWithKills for any node that kills registers in the
// RBM_FLT_CALLEE_TRASH set. We actually need to find any calls that kill the upper-half
// of the callee-save vector registers.
// But we will use as a proxy any node that kills floating point registers.
// (Note that some calls are masquerading as other nodes at this point so we can't just check for calls.)
//
void LinearScan::buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc, regMaskTP fpCalleeKillSet)
{
if ((tree != nullptr) && tree->IsCall())
{
if (tree->AsCall()->IsNoReturn())
{
// No point in having vector save/restore if the call will not return.
return;
}
}
if (enregisterLocalVars && !VarSetOps::IsEmpty(compiler, largeVectorVars))
{
// We assume that the kill set includes at least some callee-trash registers, but
// that it doesn't include any callee-save registers.
assert((fpCalleeKillSet & RBM_FLT_CALLEE_TRASH) != RBM_NONE);
assert((fpCalleeKillSet & RBM_FLT_CALLEE_SAVED) == RBM_NONE);
// We only need to save the upper half of any large vector vars that are currently live.
VARSET_TP liveLargeVectors(VarSetOps::Intersection(compiler, currentLiveVars, largeVectorVars));
VarSetOps::Iter iter(compiler, liveLargeVectors);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
Interval* varInterval = getIntervalForLocalVar(varIndex);
if (!varInterval->isPartiallySpilled)
{
Interval* upperVectorInterval = getUpperVectorInterval(varIndex);
RefPosition* pos =
newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorSave, tree, RBM_FLT_CALLEE_SAVED);
varInterval->isPartiallySpilled = true;
#ifdef TARGET_XARCH
pos->regOptional = true;
#endif
}
}
}
// For any non-lclVar intervals that are live at this point (i.e. in the DefList), we will also create
// a RefTypeUpperVectorSave. For now these will all be spilled at this point, as we don't currently
// have a mechanism to communicate any non-lclVar intervals that need to be restored.
// TODO-CQ: We could consider adding such a mechanism, but it's unclear whether this rare
// case of a large vector temp live across a call is worth the added complexity.
for (RefInfoListNode *listNode = defList.Begin(), *end = defList.End(); listNode != end;
listNode = listNode->Next())
{
const GenTree* defNode = listNode->treeNode;
var_types regType = defNode->TypeGet();
if (regType == TYP_STRUCT)
{
assert(defNode->OperIs(GT_LCL_VAR, GT_CALL));
if (defNode->OperIs(GT_LCL_VAR))
{
const GenTreeLclVar* lcl = defNode->AsLclVar();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
regType = varDsc->GetRegisterType();
}
else
{
const GenTreeCall* call = defNode->AsCall();
const CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
regType = compiler->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
if (howToReturnStruct == Compiler::SPK_ByValueAsHfa)
{
regType = compiler->GetHfaType(retClsHnd);
}
#if defined(TARGET_ARM64)
else if (howToReturnStruct == Compiler::SPK_ByValue)
{
// TODO-Cleanup: add a new Compiler::SPK for this case.
// This is the case when 16-byte struct is returned as [x0, x1].
// We don't need a partial callee save.
regType = TYP_LONG;
}
#endif // TARGET_ARM64
}
assert((regType != TYP_STRUCT) && (regType != TYP_UNDEF));
}
if (Compiler::varTypeNeedsPartialCalleeSave(regType))
{
// In the rare case where such an interval is live across nested calls, we don't need to insert another.
if (listNode->ref->getInterval()->recentRefPosition->refType != RefTypeUpperVectorSave)
{
RefPosition* pos = newRefPosition(listNode->ref->getInterval(), currentLoc, RefTypeUpperVectorSave,
tree, RBM_FLT_CALLEE_SAVED);
}
}
}
}
//------------------------------------------------------------------------
// buildUpperVectorRestoreRefPosition - Create a RefPosition for restoring
// the upper half of a large vector.
//
// Arguments:
// lclVarInterval - A lclVarInterval that is live at 'currentLoc'
// currentLoc - The current location for which we're building RefPositions
// node - The node, if any, that the restore would be inserted before.
// If null, the restore will be inserted at the end of the block.
//
void LinearScan::buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, LsraLocation currentLoc, GenTree* node)
{
if (lclVarInterval->isPartiallySpilled)
{
unsigned varIndex = lclVarInterval->getVarIndex(compiler);
Interval* upperVectorInterval = getUpperVectorInterval(varIndex);
RefPosition* pos = newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorRestore, node, RBM_NONE);
lclVarInterval->isPartiallySpilled = false;
#ifdef TARGET_XARCH
pos->regOptional = true;
#endif
}
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#ifdef DEBUG
//------------------------------------------------------------------------
// ComputeOperandDstCount: computes the number of registers defined by a
// node.
//
// For most nodes, this is simple:
// - Nodes that do not produce values (e.g. stores and other void-typed
// nodes) and nodes that immediately use the registers they define
// produce no registers
// - Nodes that are marked as defining N registers define N registers.
//
// For contained nodes, however, things are more complicated: for purposes
// of bookkeeping, a contained node is treated as producing the transitive
// closure of the registers produced by its sources.
//
// Arguments:
// operand - The operand for which to compute a register count.
//
// Returns:
// The number of registers defined by `operand`.
//
int LinearScan::ComputeOperandDstCount(GenTree* operand)
{
// GT_ARGPLACE is the only non-LIR node that is currently in the trees at this stage, though
// note that it is not in the linear order.
if (operand->OperIs(GT_ARGPLACE))
{
return 0;
}
if (operand->isContained())
{
int dstCount = 0;
for (GenTree* op : operand->Operands())
{
dstCount += ComputeOperandDstCount(op);
}
return dstCount;
}
if (operand->IsUnusedValue())
{
// Operands that define an unused value do not produce any registers.
return 0;
}
if (operand->IsValue())
{
// Operands that are values and are not contained consume all of their operands
// and produce one or more registers.
return operand->GetRegisterDstCount(compiler);
}
else
{
// This must be one of the operand types that are neither contained nor produce a value.
// Stores and void-typed operands may be encountered when processing call nodes, which contain
// pointers to argument setup stores.
assert(operand->OperIsStore() || operand->OperIsBlkOp() || operand->OperIsPutArgStk() ||
operand->OperIsCompare() || operand->OperIs(GT_CMP) || operand->TypeGet() == TYP_VOID);
return 0;
}
}
//------------------------------------------------------------------------
// ComputeAvailableSrcCount: computes the number of registers available as
// sources for a node.
//
// This is simply the sum of the number of registers produced by each
// operand to the node.
//
// Arguments:
// node - The node for which to compute a source count.
//
// Return Value:
// The number of registers available as sources for `node`.
//
int LinearScan::ComputeAvailableSrcCount(GenTree* node)
{
int numSources = 0;
for (GenTree* operand : node->Operands())
{
numSources += ComputeOperandDstCount(operand);
}
return numSources;
}
#endif // DEBUG
//------------------------------------------------------------------------
// buildRefPositionsForNode: The main entry point for building the RefPositions
// and "tree temp" Intervals for a given node.
//
// Arguments:
// tree - The node for which we are building RefPositions
// currentLoc - The LsraLocation of the given node
//
void LinearScan::buildRefPositionsForNode(GenTree* tree, LsraLocation currentLoc)
{
// The LIR traversal doesn't visit GT_ARGPLACE nodes.
// GT_CLS_VAR nodes should have been eliminated by rationalizer.
assert(tree->OperGet() != GT_ARGPLACE);
assert(tree->OperGet() != GT_CLS_VAR);
// The set of internal temporary registers used by this node are stored in the
// gtRsvdRegs register mask. Clear it out.
tree->gtRsvdRegs = RBM_NONE;
#ifdef DEBUG
if (VERBOSE)
{
dumpDefList();
compiler->gtDispTree(tree, nullptr, nullptr, true);
}
#endif // DEBUG
if (tree->isContained())
{
#ifdef TARGET_XARCH
// On XArch we can have contained candidate lclVars if they are part of a RMW
// address computation. In this case we need to check whether it is a last use.
if (tree->IsLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
LclVarDsc* const varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon());
if (isCandidateVar(varDsc))
{
assert(varDsc->lvTracked);
unsigned varIndex = varDsc->lvVarIndex;
VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex);
}
}
#else // TARGET_XARCH
assert(!isCandidateLocalRef(tree));
#endif // TARGET_XARCH
JITDUMP("Contained\n");
return;
}
#ifdef DEBUG
// If we are constraining the registers for allocation, we will modify all the RefPositions
// we've built for this node after we've created them. In order to do that, we'll remember
// the last RefPosition prior to those created for this node.
RefPositionIterator refPositionMark = refPositions.backPosition();
int oldDefListCount = defList.Count();
#endif // DEBUG
int consume = BuildNode(tree);
#ifdef DEBUG
int newDefListCount = defList.Count();
// Currently produce is unused, but need to strengthen an assert to check if produce is
// as expected. See https://github.com/dotnet/runtime/issues/8678
int produce = newDefListCount - oldDefListCount;
assert((consume == 0) || (ComputeAvailableSrcCount(tree) == consume));
// If we are constraining registers, modify all the RefPositions we've just built to specify the
// minimum reg count required.
if ((getStressLimitRegs() != LSRA_LIMIT_NONE) || (getSelectionHeuristics() != LSRA_SELECT_DEFAULT))
{
// The number of registers required for a tree node is the sum of
// { RefTypeUses } + { RefTypeDef for the node itself } + specialPutArgCount
// This is the minimum set of registers that needs to be ensured in the candidate set of ref positions created.
//
// First, we count them.
unsigned minRegCount = 0;
RefPositionIterator iter = refPositionMark;
for (iter++; iter != refPositions.end(); iter++)
{
RefPosition* newRefPosition = &(*iter);
if (newRefPosition->isIntervalRef())
{
if ((newRefPosition->refType == RefTypeUse) ||
((newRefPosition->refType == RefTypeDef) && !newRefPosition->getInterval()->isInternal))
{
minRegCount++;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
else if (newRefPosition->refType == RefTypeUpperVectorSave)
{
minRegCount++;
}
#endif
if (newRefPosition->getInterval()->isSpecialPutArg)
{
minRegCount++;
}
}
}
if (tree->OperIsPutArgSplit())
{
// While we have attempted to account for any "specialPutArg" defs above, we're only looking at RefPositions
// created for this node. We must be defining at least one register in the PutArgSplit, so conservatively
// add one less than the maximum number of registers args to 'minRegCount'.
minRegCount += MAX_REG_ARG - 1;
}
for (refPositionMark++; refPositionMark != refPositions.end(); refPositionMark++)
{
RefPosition* newRefPosition = &(*refPositionMark);
unsigned minRegCountForRef = minRegCount;
if (RefTypeIsUse(newRefPosition->refType) && newRefPosition->delayRegFree)
{
// If delayRegFree, then Use will interfere with the destination of the consuming node.
// Therefore, we also need add the kill set of the consuming node to minRegCount.
//
// For example consider the following IR on x86, where v01 and v02
// are method args coming in ecx and edx respectively.
// GT_DIV(v01, v02)
//
// For GT_DIV, the minRegCount will be 3 without adding kill set of GT_DIV node.
//
// Assume further JitStressRegs=2, which would constrain candidates to callee trashable
// regs { eax, ecx, edx } on use positions of v01 and v02. LSRA allocates ecx for v01.
// The use position of v02 cannot be allocated a reg since it is marked delay-reg free and
// {eax,edx} are getting killed before the def of GT_DIV. For this reason, minRegCount for
// the use position of v02 also needs to take into account the kill set of its consuming node.
regMaskTP killMask = getKillSetForNode(tree);
if (killMask != RBM_NONE)
{
minRegCountForRef += genCountBits(killMask);
}
}
else if ((newRefPosition->refType) == RefTypeDef && (newRefPosition->getInterval()->isSpecialPutArg))
{
minRegCountForRef++;
}
newRefPosition->minRegCandidateCount = minRegCountForRef;
if (newRefPosition->IsActualRef() && doReverseCallerCallee())
{
Interval* interval = newRefPosition->getInterval();
regMaskTP oldAssignment = newRefPosition->registerAssignment;
regMaskTP calleeSaveMask = calleeSaveRegs(interval->registerType);
newRefPosition->registerAssignment =
getConstrainedRegMask(oldAssignment, calleeSaveMask, minRegCountForRef);
if ((newRefPosition->registerAssignment != oldAssignment) && (newRefPosition->refType == RefTypeUse) &&
!interval->isLocalVar)
{
checkConflictingDefUse(newRefPosition);
}
}
}
}
#endif // DEBUG
JITDUMP("\n");
}
static const regNumber lsraRegOrder[] = {REG_VAR_ORDER};
const unsigned lsraRegOrderSize = ArrLen(lsraRegOrder);
static const regNumber lsraRegOrderFlt[] = {REG_VAR_ORDER_FLT};
const unsigned lsraRegOrderFltSize = ArrLen(lsraRegOrderFlt);
//------------------------------------------------------------------------
// buildPhysRegRecords: Make an interval for each physical register
//
void LinearScan::buildPhysRegRecords()
{
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
RegRecord* curr = &physRegs[reg];
curr->init(reg);
}
for (unsigned int i = 0; i < lsraRegOrderSize; i++)
{
regNumber reg = lsraRegOrder[i];
RegRecord* curr = &physRegs[reg];
curr->regOrder = (unsigned char)i;
}
for (unsigned int i = 0; i < lsraRegOrderFltSize; i++)
{
regNumber reg = lsraRegOrderFlt[i];
RegRecord* curr = &physRegs[reg];
curr->regOrder = (unsigned char)i;
}
}
//------------------------------------------------------------------------
// insertZeroInitRefPositions: Handle lclVars that are live-in to the first block
//
// Notes:
// Prior to calling this method, 'currentLiveVars' must be set to the set of register
// candidate variables that are liveIn to the first block.
// For each register candidate that is live-in to the first block:
// - If it is a GC ref, or if compInitMem is set, a ZeroInit RefPosition will be created.
// - Otherwise, it will be marked as spilled, since it will not be assigned a register
// on entry and will be loaded from memory on the undefined path.
// Note that, when the compInitMem option is not set, we may encounter these on
// paths that are protected by the same condition as an earlier def. However, since
// we don't do the analysis to determine this - and couldn't rely on always identifying
// such cases even if we tried - we must conservatively treat the undefined path as
// being possible. This is a relatively rare case, so the introduced conservatism is
// not expected to warrant the analysis required to determine the best placement of
// an initialization.
//
void LinearScan::insertZeroInitRefPositions()
{
assert(enregisterLocalVars);
#ifdef DEBUG
VARSET_TP expectedLiveVars(VarSetOps::Intersection(compiler, registerCandidateVars, compiler->fgFirstBB->bbLiveIn));
assert(VarSetOps::Equal(compiler, currentLiveVars, expectedLiveVars));
#endif // DEBUG
// insert defs for this, then a block boundary
VarSetOps::Iter iter(compiler, currentLiveVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
if (!varDsc->lvIsParam && isCandidateVar(varDsc))
{
JITDUMP("V%02u was live in to first block:", compiler->lvaTrackedIndexToLclNum(varIndex));
Interval* interval = getIntervalForLocalVar(varIndex);
if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()))
{
varDsc->lvMustInit = true;
// OSR will handle init of locals and promoted fields thereof
if (compiler->lvaIsOSRLocal(compiler->lvaTrackedIndexToLclNum(varIndex)))
{
JITDUMP(" will be initialized by OSR\n");
// setIntervalAsSpilled(interval);
varDsc->lvMustInit = false;
}
JITDUMP(" creating ZeroInit\n");
RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeZeroInit, nullptr /* theTreeNode */,
allRegs(interval->registerType));
pos->setRegOptional(true);
}
else
{
setIntervalAsSpilled(interval);
JITDUMP(" marking as spilled\n");
}
}
}
// We must also insert zero-inits for any finallyVars if they are refs or if compInitMem is true.
if (compiler->lvaEnregEHVars)
{
VarSetOps::Iter iter(compiler, finallyVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
if (!varDsc->lvIsParam && isCandidateVar(varDsc))
{
JITDUMP("V%02u is a finally var:", compiler->lvaTrackedIndexToLclNum(varIndex));
Interval* interval = getIntervalForLocalVar(varIndex);
if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()))
{
if (interval->recentRefPosition == nullptr)
{
JITDUMP(" creating ZeroInit\n");
RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeZeroInit,
nullptr /* theTreeNode */, allRegs(interval->registerType));
pos->setRegOptional(true);
varDsc->lvMustInit = true;
}
else
{
// We must only generate one entry RefPosition for each Interval. Since this is not
// a parameter, it can't be RefTypeParamDef, so it must be RefTypeZeroInit, which
// we must have generated for the live-in case above.
assert(interval->recentRefPosition->refType == RefTypeZeroInit);
JITDUMP(" already ZeroInited\n");
}
}
}
}
}
}
#if defined(UNIX_AMD64_ABI)
//------------------------------------------------------------------------
// unixAmd64UpdateRegStateForArg: Sets the register state for an argument of type STRUCT for System V systems.
//
// Arguments:
// argDsc - the LclVarDsc for the argument of interest
//
// Notes:
// See Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *argDsc) in regalloc.cpp
// for how state for argument is updated for unix non-structs and Windows AMD64 structs.
//
void LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc)
{
assert(varTypeIsStruct(argDsc));
RegState* intRegState = &compiler->codeGen->intRegState;
RegState* floatRegState = &compiler->codeGen->floatRegState;
if ((argDsc->GetArgReg() != REG_STK) && (argDsc->GetArgReg() != REG_NA))
{
if (genRegMask(argDsc->GetArgReg()) & (RBM_ALLFLOAT))
{
assert(genRegMask(argDsc->GetArgReg()) & (RBM_FLTARG_REGS));
floatRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetArgReg());
}
else
{
assert(genRegMask(argDsc->GetArgReg()) & (RBM_ARG_REGS));
intRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetArgReg());
}
}
if ((argDsc->GetOtherArgReg() != REG_STK) && (argDsc->GetOtherArgReg() != REG_NA))
{
if (genRegMask(argDsc->GetOtherArgReg()) & (RBM_ALLFLOAT))
{
assert(genRegMask(argDsc->GetOtherArgReg()) & (RBM_FLTARG_REGS));
floatRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetOtherArgReg());
}
else
{
assert(genRegMask(argDsc->GetOtherArgReg()) & (RBM_ARG_REGS));
intRegState->rsCalleeRegArgMaskLiveIn |= genRegMask(argDsc->GetOtherArgReg());
}
}
}
#endif // defined(UNIX_AMD64_ABI)
//------------------------------------------------------------------------
// updateRegStateForArg: Updates rsCalleeRegArgMaskLiveIn for the appropriate
// regState (either compiler->intRegState or compiler->floatRegState),
// with the lvArgReg on "argDsc"
//
// Arguments:
// argDsc - the argument for which the state is to be updated.
//
// Return Value: None
//
// Assumptions:
// The argument is live on entry to the function
// (or is untracked and therefore assumed live)
//
// Notes:
// This relies on a method in regAlloc.cpp that is shared between LSRA
// and regAlloc. It is further abstracted here because regState is updated
// separately for tracked and untracked variables in LSRA.
//
void LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
{
#if defined(UNIX_AMD64_ABI)
// For System V AMD64 calls the argDsc can have 2 registers (for structs.)
// Handle them here.
if (varTypeIsStruct(argDsc))
{
unixAmd64UpdateRegStateForArg(argDsc);
}
else
#endif // defined(UNIX_AMD64_ABI)
{
RegState* intRegState = &compiler->codeGen->intRegState;
RegState* floatRegState = &compiler->codeGen->floatRegState;
bool isFloat = emitter::isFloatReg(argDsc->GetArgReg());
if (argDsc->lvIsHfaRegArg())
{
isFloat = true;
}
if (isFloat)
{
JITDUMP("Float arg V%02u in reg %s\n", compiler->lvaGetLclNum(argDsc), getRegName(argDsc->GetArgReg()));
compiler->raUpdateRegStateForArg(floatRegState, argDsc);
}
else
{
JITDUMP("Int arg V%02u in reg %s\n", compiler->lvaGetLclNum(argDsc), getRegName(argDsc->GetArgReg()));
#if FEATURE_MULTIREG_ARGS
if (argDsc->GetOtherArgReg() != REG_NA)
{
JITDUMP("(second half) in reg %s\n", getRegName(argDsc->GetOtherArgReg()));
}
#endif // FEATURE_MULTIREG_ARGS
compiler->raUpdateRegStateForArg(intRegState, argDsc);
}
}
}
//------------------------------------------------------------------------
// buildIntervals: The main entry point for building the data structures over
// which we will do register allocation.
//
void LinearScan::buildIntervals()
{
BasicBlock* block;
JITDUMP("\nbuildIntervals ========\n");
// Build (empty) records for all of the physical registers
buildPhysRegRecords();
#ifdef DEBUG
if (VERBOSE)
{
printf("\n-----------------\n");
printf("LIVENESS:\n");
printf("-----------------\n");
for (BasicBlock* const block : compiler->Blocks())
{
printf(FMT_BB " use def in out\n", block->bbNum);
dumpConvertedVarSet(compiler, block->bbVarUse);
printf("\n");
dumpConvertedVarSet(compiler, block->bbVarDef);
printf("\n");
dumpConvertedVarSet(compiler, block->bbLiveIn);
printf("\n");
dumpConvertedVarSet(compiler, block->bbLiveOut);
printf("\n");
}
}
#endif // DEBUG
#if DOUBLE_ALIGN
// We will determine whether we should double align the frame during
// identifyCandidates(), but we initially assume that we will not.
doDoubleAlign = false;
#endif
identifyCandidates();
// Figure out if we're going to use a frame pointer. We need to do this before building
// the ref positions, because those objects will embed the frame register in various register masks
// if the frame pointer is not reserved. If we decide to have a frame pointer, setFrameType() will
// remove the frame pointer from the masks.
setFrameType();
DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_PRE));
// second part:
JITDUMP("\nbuildIntervals second part ========\n");
currentLoc = 0;
// TODO-Cleanup: This duplicates prior behavior where entry (ParamDef) RefPositions were
// being assigned the bbNum of the last block traversed in the 2nd phase of Lowering.
// Previously, the block sequencing was done for the (formerly separate) Build pass,
// and the curBBNum was left as the last block sequenced. This block was then used to set the
// weight for the entry (ParamDef) RefPositions. It would be logical to set this to the
// normalized entry weight (compiler->fgCalledCount), but that results in a net regression.
if (!blockSequencingDone)
{
setBlockSequence();
}
// Next, create ParamDef RefPositions for all the tracked parameters, in order of their varIndex.
// Assign these RefPositions to the (nonexistent) BB0.
curBBNum = 0;
RegState* intRegState = &compiler->codeGen->intRegState;
RegState* floatRegState = &compiler->codeGen->floatRegState;
intRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE;
floatRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE;
for (unsigned int varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
LclVarDsc* argDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
if (!argDsc->lvIsParam)
{
continue;
}
// Only reserve a register if the argument is actually used.
// Is it dead on entry? If compJmpOpUsed is true, then the arguments
// have to be kept alive, so we have to consider it as live on entry.
// Use lvRefCnt instead of checking bbLiveIn because if it's volatile we
// won't have done dataflow on it, but it needs to be marked as live-in so
// it will get saved in the prolog.
if (!compiler->compJmpOpUsed && argDsc->lvRefCnt() == 0 && !compiler->opts.compDbgCode)
{
continue;
}
if (argDsc->lvIsRegArg)
{
updateRegStateForArg(argDsc);
}
if (isCandidateVar(argDsc))
{
Interval* interval = getIntervalForLocalVar(varIndex);
const var_types regType = argDsc->GetRegisterType();
regMaskTP mask = allRegs(regType);
if (argDsc->lvIsRegArg)
{
// Set this interval as currently assigned to that register
regNumber inArgReg = argDsc->GetArgReg();
assert(inArgReg < REG_COUNT);
mask = genRegMask(inArgReg);
assignPhysReg(inArgReg, interval);
INDEBUG(registersToDump |= getRegMask(inArgReg, interval->registerType));
}
RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, mask);
pos->setRegOptional(true);
}
else if (varTypeIsStruct(argDsc->lvType))
{
for (unsigned fieldVarNum = argDsc->lvFieldLclStart;
fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum)
{
const LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum);
if (fieldVarDsc->lvLRACandidate)
{
assert(fieldVarDsc->lvTracked);
Interval* interval = getIntervalForLocalVar(fieldVarDsc->lvVarIndex);
RefPosition* pos =
newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, allRegs(TypeGet(fieldVarDsc)));
pos->setRegOptional(true);
}
}
}
else
{
// We can overwrite the register (i.e. codegen saves it on entry)
assert(argDsc->lvRefCnt() == 0 || !argDsc->lvIsRegArg || argDsc->lvDoNotEnregister ||
!argDsc->lvLRACandidate || (varTypeIsFloating(argDsc->TypeGet()) && compiler->opts.compDbgCode));
}
}
// Now set up the reg state for the non-tracked args
// (We do this here because we want to generate the ParamDef RefPositions in tracked
// order, so that loop doesn't hit the non-tracked args)
for (unsigned argNum = 0; argNum < compiler->info.compArgsCount; argNum++)
{
LclVarDsc* argDsc = compiler->lvaGetDesc(argNum);
if (argDsc->lvPromotedStruct())
{
for (unsigned fieldVarNum = argDsc->lvFieldLclStart;
fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum)
{
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldVarNum);
noway_assert(fieldVarDsc->lvIsParam);
if (!fieldVarDsc->lvTracked && fieldVarDsc->lvIsRegArg)
{
updateRegStateForArg(fieldVarDsc);
}
}
}
else
{
noway_assert(argDsc->lvIsParam);
if (!argDsc->lvTracked && argDsc->lvIsRegArg)
{
updateRegStateForArg(argDsc);
}
}
}
// If there is a secret stub param, it is also live in
if (compiler->info.compPublishStubParam)
{
intRegState->rsCalleeRegArgMaskLiveIn |= RBM_SECRET_STUB_PARAM;
}
BasicBlock* predBlock = nullptr;
BasicBlock* prevBlock = nullptr;
// Initialize currentLiveVars to the empty set. We will set it to the current
// live-in at the entry to each block (this will include the incoming args on
// the first block).
VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::MakeEmpty(compiler));
for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
JITDUMP("\nNEW BLOCK " FMT_BB "\n", block->bbNum);
bool predBlockIsAllocated = false;
predBlock = findPredBlockForLiveIn(block, prevBlock DEBUGARG(&predBlockIsAllocated));
if (predBlock != nullptr)
{
JITDUMP("\n\nSetting " FMT_BB " as the predecessor for determining incoming variable registers of " FMT_BB
"\n",
predBlock->bbNum, block->bbNum);
assert(predBlock->bbNum <= bbNumMaxBeforeResolution);
blockInfo[block->bbNum].predBBNum = predBlock->bbNum;
}
if (enregisterLocalVars)
{
VarSetOps::AssignNoCopy(compiler, currentLiveVars,
VarSetOps::Intersection(compiler, registerCandidateVars, block->bbLiveIn));
if (block == compiler->fgFirstBB)
{
insertZeroInitRefPositions();
// The first real location is at 1; 0 is for the entry.
currentLoc = 1;
}
// For blocks that don't have EHBoundaryIn, we need DummyDefs for cases where "predBlock" isn't
// really a predecessor.
// Note that it's possible to have uses of unitialized variables, in which case even the first
// block may require DummyDefs, which we are not currently adding - this means that these variables
// will always be considered to be in memory on entry (and reloaded when the use is encountered).
// TODO-CQ: Consider how best to tune this. Currently, if we create DummyDefs for uninitialized
// variables (which may actually be initialized along the dynamically executed paths, but not
// on all static paths), we wind up with excessive liveranges for some of these variables.
if (!blockInfo[block->bbNum].hasEHBoundaryIn)
{
// Any lclVars live-in on a non-EH boundary edge are resolution candidates.
VarSetOps::UnionD(compiler, resolutionCandidateVars, currentLiveVars);
if (block != compiler->fgFirstBB)
{
VARSET_TP newLiveIn(VarSetOps::MakeCopy(compiler, currentLiveVars));
if (predBlock != nullptr)
{
// Compute set difference: newLiveIn = currentLiveVars - predBlock->bbLiveOut
VarSetOps::DiffD(compiler, newLiveIn, predBlock->bbLiveOut);
}
// Don't create dummy defs for EH vars; we'll load them from the stack as/when needed.
VarSetOps::DiffD(compiler, newLiveIn, exceptVars);
// Create dummy def RefPositions
if (!VarSetOps::IsEmpty(compiler, newLiveIn))
{
// If we are using locations from a predecessor, we should never require DummyDefs.
assert(!predBlockIsAllocated);
JITDUMP("Creating dummy definitions\n");
VarSetOps::Iter iter(compiler, newLiveIn);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
// Add a dummyDef for any candidate vars that are in the "newLiveIn" set.
LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
assert(isCandidateVar(varDsc));
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr,
allRegs(interval->registerType));
pos->setRegOptional(true);
}
JITDUMP("Finished creating dummy definitions\n\n");
}
}
}
}
// Add a dummy RefPosition to mark the block boundary.
// Note that we do this AFTER adding the exposed uses above, because the
// register positions for those exposed uses need to be recorded at
// this point.
RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE);
currentLoc += 2;
JITDUMP("\n");
if (firstColdLoc == MaxLocation)
{
if (block->isRunRarely())
{
firstColdLoc = currentLoc;
JITDUMP("firstColdLoc = %d\n", firstColdLoc);
}
}
else
{
// TODO: We'd like to assert the following but we don't currently ensure that only
// "RunRarely" blocks are contiguous.
// (The funclets will generally be last, but we don't follow layout order, so we
// don't have to preserve that in the block sequence.)
// assert(block->isRunRarely());
}
// For frame poisoning we generate code into scratch BB right after prolog since
// otherwise the prolog might become too large. In this case we will put the poison immediate
// into the scratch register, so it will be killed here.
if (compiler->compShouldPoisonFrame() && compiler->fgFirstBBisScratch() && block == compiler->fgFirstBB)
{
regMaskTP killed;
#if defined(TARGET_XARCH)
// Poisoning uses EAX for small vars and rep stosd that kills edi, ecx and eax for large vars.
killed = RBM_EDI | RBM_ECX | RBM_EAX;
#else
// Poisoning uses REG_SCRATCH for small vars and memset helper for big vars.
killed = genRegMask(REG_SCRATCH) | compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET);
#endif
addRefsForPhysRegMask(killed, currentLoc + 1, RefTypeKill, true);
currentLoc += 2;
}
LIR::Range& blockRange = LIR::AsRange(block);
for (GenTree* node : blockRange)
{
// We increment the location of each tree node by 2 so that the node definition, if any,
// is at a new location and doesn't interfere with the uses.
// For multi-reg local stores, the 'BuildMultiRegStoreLoc' method will further increment the
// location by 2 for each destination register beyond the first.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
node->gtSeqNum = currentLoc;
// In DEBUG, we want to set the gtRegTag to GT_REGTAG_REG, so that subsequent dumps will show the register
// value.
// Although this looks like a no-op it sets the tag.
node->SetRegNum(node->GetRegNum());
#endif
buildRefPositionsForNode(node, currentLoc);
#ifdef DEBUG
if (currentLoc > maxNodeLocation)
{
maxNodeLocation = currentLoc;
}
#endif // DEBUG
currentLoc += 2;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// At the end of each block, create upperVectorRestores for any largeVectorVars that may be
// partiallySpilled (during the build phase all intervals will be marked isPartiallySpilled if
// they *may) be partially spilled at any point.
if (enregisterLocalVars)
{
VarSetOps::Iter largeVectorVarsIter(compiler, largeVectorVars);
unsigned largeVectorVarIndex = 0;
while (largeVectorVarsIter.NextElem(&largeVectorVarIndex))
{
Interval* lclVarInterval = getIntervalForLocalVar(largeVectorVarIndex);
buildUpperVectorRestoreRefPosition(lclVarInterval, currentLoc, nullptr);
}
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Note: the visited set is cleared in LinearScan::doLinearScan()
markBlockVisited(block);
if (!defList.IsEmpty())
{
INDEBUG(dumpDefList());
assert(!"Expected empty defList at end of block");
}
if (enregisterLocalVars)
{
// Insert exposed uses for a lclVar that is live-out of 'block' but not live-in to the
// next block, or any unvisited successors.
// This will address lclVars that are live on a backedge, as well as those that are kept
// live at a GT_JMP.
//
// Blocks ending with "jmp method" are marked as BBJ_HAS_JMP,
// and jmp call is represented using GT_JMP node which is a leaf node.
// Liveness phase keeps all the arguments of the method live till the end of
// block by adding them to liveout set of the block containing GT_JMP.
//
// The target of a GT_JMP implicitly uses all the current method arguments, however
// there are no actual references to them. This can cause LSRA to assert, because
// the variables are live but it sees no references. In order to correctly model the
// liveness of these arguments, we add dummy exposed uses, in the same manner as for
// backward branches. This will happen automatically via expUseSet.
//
// Note that a block ending with GT_JMP has no successors and hence the variables
// for which dummy use ref positions are added are arguments of the method.
VARSET_TP expUseSet(VarSetOps::MakeCopy(compiler, block->bbLiveOut));
VarSetOps::IntersectionD(compiler, expUseSet, registerCandidateVars);
BasicBlock* nextBlock = getNextBlock();
if (nextBlock != nullptr)
{
VarSetOps::DiffD(compiler, expUseSet, nextBlock->bbLiveIn);
}
for (BasicBlock* succ : block->GetAllSuccs(compiler))
{
if (VarSetOps::IsEmpty(compiler, expUseSet))
{
break;
}
if (isBlockVisited(succ))
{
continue;
}
VarSetOps::DiffD(compiler, expUseSet, succ->bbLiveIn);
}
if (!VarSetOps::IsEmpty(compiler, expUseSet))
{
JITDUMP("Exposed uses:");
VarSetOps::Iter iter(compiler, expUseSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
assert(isCandidateVar(varDsc));
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* pos =
newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
pos->setRegOptional(true);
JITDUMP(" V%02u", varNum);
}
JITDUMP("\n");
}
// Clear the "last use" flag on any vars that are live-out from this block.
VARSET_TP bbLiveDefs(VarSetOps::Intersection(compiler, registerCandidateVars, block->bbLiveOut));
VarSetOps::Iter iter(compiler, bbLiveDefs);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
LclVarDsc* const varDsc = compiler->lvaGetDesc(varNum);
assert(isCandidateVar(varDsc));
RefPosition* const lastRP = getIntervalForLocalVar(varIndex)->lastRefPosition;
// We should be able to assert that lastRP is non-null if it is live-out, but sometimes liveness
// lies.
if ((lastRP != nullptr) && (lastRP->bbNum == block->bbNum))
{
lastRP->lastUse = false;
}
}
#ifdef DEBUG
checkLastUses(block);
if (VERBOSE)
{
printf("use: ");
dumpConvertedVarSet(compiler, block->bbVarUse);
printf("\ndef: ");
dumpConvertedVarSet(compiler, block->bbVarDef);
printf("\n");
}
#endif // DEBUG
}
prevBlock = block;
}
if (enregisterLocalVars)
{
if (compiler->lvaKeepAliveAndReportThis())
{
// If we need to KeepAliveAndReportThis, add a dummy exposed use of it at the end
unsigned keepAliveVarNum = compiler->info.compThisArg;
assert(compiler->info.compIsStatic == false);
const LclVarDsc* varDsc = compiler->lvaGetDesc(keepAliveVarNum);
if (isCandidateVar(varDsc))
{
JITDUMP("Adding exposed use of this, for lvaKeepAliveAndReportThis\n");
Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex);
RefPosition* pos =
newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
pos->setRegOptional(true);
}
}
// Adjust heuristics for writeThru intervals.
if (compiler->compHndBBtabCount > 0)
{
VarSetOps::Iter iter(compiler, exceptVars);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
Interval* interval = getIntervalForLocalVar(varIndex);
assert(interval->isWriteThru);
weight_t weight = varDsc->lvRefCntWtd();
// We'd like to only allocate registers for EH vars that have enough uses
// to compensate for the additional registers being live (and for the possibility
// that we may have to insert an additional copy).
// However, we don't currently have that information available. Instead, we'll
// aggressively assume that these vars are defined once, at their first RefPosition.
//
RefPosition* firstRefPosition = interval->firstRefPosition;
// Incoming reg args are given an initial weight of 2 * BB_UNITY_WEIGHT
// (see lvaComputeRefCounts(); this may be reviewed/changed in future).
//
weight_t initialWeight = (firstRefPosition->refType == RefTypeParamDef)
? (2 * BB_UNITY_WEIGHT)
: blockInfo[firstRefPosition->bbNum].weight;
weight -= initialWeight;
// If the remaining weight is less than the initial weight, we'd like to allocate it only
// opportunistically, but we don't currently have a mechanism to do so.
// For now, we'll just avoid using callee-save registers if the weight is too low.
if (interval->preferCalleeSave)
{
// The benefit of a callee-save register isn't as high as it would be for a normal arg.
// We'll have at least the cost of saving & restoring the callee-save register,
// so we won't break even until we have at least 4 * BB_UNITY_WEIGHT.
// Given that we also don't have a good way to tell whether the variable is live
// across a call in the non-EH code, we'll be extra conservative about this.
// Note that for writeThru intervals we don't update the preferences to be only callee-save.
unsigned calleeSaveCount =
(varTypeUsesFloatReg(interval->registerType)) ? CNT_CALLEE_SAVED_FLOAT : CNT_CALLEE_ENREG;
if ((weight <= (BB_UNITY_WEIGHT * 7)) || varDsc->lvVarIndex >= calleeSaveCount)
{
// If this is relatively low weight, don't prefer callee-save at all.
interval->preferCalleeSave = false;
}
else
{
// In other cases, we'll add in the callee-save regs to the preferences, but not clear
// the non-callee-save regs . We also handle this case specially in tryAllocateFreeReg().
interval->registerPreferences |= calleeSaveRegs(interval->registerType);
}
}
}
}
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
for (unsigned lclNum = 0; lclNum < compiler->lvaCount; lclNum++)
{
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
if (varDsc->lvLRACandidate)
{
JITDUMP("Adding exposed use of V%02u for LsraExtendLifetimes\n", lclNum);
Interval* interval = getIntervalForLocalVar(varDsc->lvVarIndex);
RefPosition* pos =
newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
pos->setRegOptional(true);
}
}
}
#endif // DEBUG
}
// If the last block has successors, create a RefTypeBB to record
// what's live
if (prevBlock->NumSucc(compiler) > 0)
{
RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE);
}
#ifdef DEBUG
// Make sure we don't have any blocks that were not visited
for (BasicBlock* const block : compiler->Blocks())
{
assert(isBlockVisited(block));
}
if (VERBOSE)
{
lsraDumpIntervals("BEFORE VALIDATING INTERVALS");
dumpRefPositions("BEFORE VALIDATING INTERVALS");
}
validateIntervals();
#endif // DEBUG
}
#ifdef DEBUG
//------------------------------------------------------------------------
// validateIntervals: A DEBUG-only method that checks that:
// - the lclVar RefPositions do not reflect uses of undefined values
// - A singleDef interval should have just first RefPosition as RefTypeDef.
//
// TODO-Cleanup: If an undefined use is encountered, it merely prints a message
// but probably assert.
//
void LinearScan::validateIntervals()
{
if (enregisterLocalVars)
{
for (unsigned i = 0; i < compiler->lvaTrackedCount; i++)
{
if (!compiler->lvaGetDescByTrackedIndex(i)->lvLRACandidate)
{
continue;
}
Interval* interval = getIntervalForLocalVar(i);
bool defined = false;
unsigned lastUseBBNum = 0;
JITDUMP("-----------------\n");
for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
{
if (VERBOSE)
{
ref->dump(this);
}
RefType refType = ref->refType;
if (!defined && RefTypeIsUse(refType) && (lastUseBBNum == ref->bbNum))
{
if (!ref->lastUse)
{
if (compiler->info.compMethodName != nullptr)
{
JITDUMP("%s: ", compiler->info.compMethodName);
}
JITDUMP("LocalVar V%02u: undefined use at %u\n", interval->varNum, ref->nodeLocation);
assert(false);
}
}
// For single-def intervals, the only the first refposition should be a RefTypeDef
if (interval->isSingleDef && RefTypeIsDef(refType))
{
assert(ref == interval->firstRefPosition);
}
// Note that there can be multiple last uses if they are on disjoint paths,
// so we can't really check the lastUse flag
if (ref->lastUse)
{
defined = false;
lastUseBBNum = ref->bbNum;
}
if (RefTypeIsDef(refType))
{
defined = true;
}
}
}
}
}
#endif // DEBUG
#if defined(TARGET_XARCH) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// setTgtPref: Set a preference relationship between the given Interval
// and a Use RefPosition.
//
// Arguments:
// interval - An interval whose defining instruction has tgtPrefUse as a use
// tgtPrefUse - The use RefPosition
//
// Notes:
// This is called when we would like tgtPrefUse and this def to get the same register.
// This is only desirable if the use is a last use, which it is if it is a non-local,
// *or* if it is a lastUse.
// Note that we don't yet have valid lastUse information in the RefPositions that we're building
// (every RefPosition is set as a lastUse until we encounter a new use), so we have to rely on the treeNode.
// This may be called for multiple uses, in which case 'interval' will only get preferenced at most
// to the first one (if it didn't already have a 'relatedInterval'.
//
void setTgtPref(Interval* interval, RefPosition* tgtPrefUse)
{
if (tgtPrefUse != nullptr)
{
Interval* useInterval = tgtPrefUse->getInterval();
if (!useInterval->isLocalVar || (tgtPrefUse->treeNode == nullptr) ||
((tgtPrefUse->treeNode->gtFlags & GTF_VAR_DEATH) != 0))
{
// Set the use interval as related to the interval we're defining.
useInterval->assignRelatedIntervalIfUnassigned(interval);
}
}
}
#endif // TARGET_XARCH || FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// BuildDef: Build a RefTypeDef RefPosition for the given node
//
// Arguments:
// tree - The node that defines a register
// dstCandidates - The candidate registers for the definition
// multiRegIdx - The index of the definition, defaults to zero.
// Only non-zero for multi-reg nodes.
//
// Return Value:
// The newly created RefPosition.
//
// Notes:
// Adds the RefInfo for the definition to the defList.
//
RefPosition* LinearScan::BuildDef(GenTree* tree, regMaskTP dstCandidates, int multiRegIdx)
{
assert(!tree->isContained());
if (dstCandidates != RBM_NONE)
{
assert((tree->GetRegNum() == REG_NA) || (dstCandidates == genRegMask(tree->GetRegByIndex(multiRegIdx))));
}
RegisterType type;
if (!tree->IsMultiRegNode())
{
type = getDefType(tree);
}
else
{
type = tree->GetRegTypeByIndex(multiRegIdx);
}
if (varTypeUsesFloatReg(type))
{
compiler->compFloatingPointUsed = true;
}
Interval* interval = newInterval(type);
if (tree->GetRegNum() != REG_NA)
{
if (!tree->IsMultiRegNode() || (multiRegIdx == 0))
{
assert((dstCandidates == RBM_NONE) || (dstCandidates == genRegMask(tree->GetRegNum())));
dstCandidates = genRegMask(tree->GetRegNum());
}
else
{
assert(isSingleRegister(dstCandidates));
}
}
#ifdef TARGET_X86
else if (varTypeIsByte(tree))
{
if (dstCandidates == RBM_NONE)
{
dstCandidates = allRegs(TYP_INT);
}
dstCandidates &= ~RBM_NON_BYTE_REGS;
assert(dstCandidates != RBM_NONE);
}
#endif // TARGET_X86
if (pendingDelayFree)
{
interval->hasInterferingUses = true;
// pendingDelayFree = false;
}
RefPosition* defRefPosition =
newRefPosition(interval, currentLoc + 1, RefTypeDef, tree, dstCandidates, multiRegIdx);
if (tree->IsUnusedValue())
{
defRefPosition->isLocalDefUse = true;
defRefPosition->lastUse = true;
}
else
{
RefInfoListNode* refInfo = listNodePool.GetNode(defRefPosition, tree);
defList.Append(refInfo);
}
#if defined(TARGET_XARCH) || defined(FEATURE_HW_INTRINSICS)
setTgtPref(interval, tgtPrefUse);
setTgtPref(interval, tgtPrefUse2);
#endif // TARGET_XARCH
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
assert(!interval->isPartiallySpilled);
#endif
return defRefPosition;
}
//------------------------------------------------------------------------
// BuildDef: Build one or more RefTypeDef RefPositions for the given node
//
// Arguments:
// tree - The node that defines a register
// dstCount - The number of registers defined by the node
// dstCandidates - the candidate registers for the definition
//
// Notes:
// Adds the RefInfo for the definitions to the defList.
//
void LinearScan::BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates)
{
bool fixedReg = false;
if ((dstCount > 1) && (dstCandidates != RBM_NONE) && ((int)genCountBits(dstCandidates) == dstCount))
{
fixedReg = true;
}
const ReturnTypeDesc* retTypeDesc = nullptr;
if (tree->IsMultiRegCall())
{
retTypeDesc = tree->AsCall()->GetReturnTypeDesc();
}
for (int i = 0; i < dstCount; i++)
{
regMaskTP thisDstCandidates;
if (fixedReg)
{
// In case of multi-reg call node, we have to query the i'th position return register.
// For all other cases of multi-reg definitions, the registers must be in sequential order.
if (retTypeDesc != nullptr)
{
thisDstCandidates = genRegMask(tree->AsCall()->GetReturnTypeDesc()->GetABIReturnReg(i));
assert((dstCandidates & thisDstCandidates) != RBM_NONE);
}
else
{
thisDstCandidates = genFindLowestBit(dstCandidates);
}
dstCandidates &= ~thisDstCandidates;
}
else
{
thisDstCandidates = dstCandidates;
}
BuildDef(tree, thisDstCandidates, i);
}
}
//------------------------------------------------------------------------
// BuildDef: Build one or more RefTypeDef RefPositions for the given node,
// as well as kills as specified by the given mask.
//
// Arguments:
// tree - The node that defines a register
// dstCount - The number of registers defined by the node
// dstCandidates - The candidate registers for the definition
// killMask - The mask of registers killed by this node
//
// Notes:
// Adds the RefInfo for the definitions to the defList.
// The def and kill functionality is folded into a single method so that the
// save and restores of upper vector registers can be bracketed around the def.
//
void LinearScan::BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask)
{
assert(killMask == getKillSetForNode(tree));
// Call this even when killMask is RBM_NONE, as we have to check for some special cases
buildKillPositionsForNode(tree, currentLoc + 1, killMask);
if (killMask != RBM_NONE)
{
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Build RefPositions to account for the fact that, even in a callee-save register, the upper half of any large
// vector will be killed by a call.
// We actually need to find any calls that kill the upper-half of the callee-save vector registers.
// But we will use as a proxy any node that kills floating point registers.
// (Note that some calls are masquerading as other nodes at this point so we can't just check for calls.)
// We call this unconditionally for such nodes, as we will create RefPositions for any large vector tree temps
// even if 'enregisterLocalVars' is false, or 'liveLargeVectors' is empty, though currently the allocation
// phase will fully (rather than partially) spill those, so we don't need to build the UpperVectorRestore
// RefPositions in that case.
// This must be done after the kills, so that we know which large vectors are still live.
//
if ((killMask & RBM_FLT_CALLEE_TRASH) != RBM_NONE)
{
buildUpperVectorSaveRefPositions(tree, currentLoc + 1, killMask);
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
// Now, create the Def(s)
BuildDefs(tree, dstCount, dstCandidates);
}
//------------------------------------------------------------------------
// BuildUse: Remove the RefInfoListNode for the given multi-reg index of the given node from
// the defList, and build a use RefPosition for the associated Interval.
//
// Arguments:
// operand - The node of interest
// candidates - The register candidates for the use
// multiRegIdx - The index of the multireg def/use
//
// Return Value:
// The newly created use RefPosition
//
// Notes:
// The node must not be contained, and must have been processed by buildRefPositionsForNode().
//
RefPosition* LinearScan::BuildUse(GenTree* operand, regMaskTP candidates, int multiRegIdx)
{
assert(!operand->isContained());
Interval* interval;
bool regOptional = operand->IsRegOptional();
if (isCandidateLocalRef(operand))
{
interval = getIntervalForLocalVarNode(operand->AsLclVarCommon());
// We have only approximate last-use information at this point. This is because the
// execution order doesn't actually reflect the true order in which the localVars
// are referenced - but the order of the RefPositions will, so we recompute it after
// RefPositions are built.
// Use the old value for setting currentLiveVars - note that we do this with the
// not-quite-correct setting of lastUse. However, this is OK because
// 1) this is only for preferencing, which doesn't require strict correctness, and
// 2) the cases where these out-of-order uses occur should not overlap a kill.
// TODO-Throughput: clean this up once we have the execution order correct. At that point
// we can update currentLiveVars at the same place that we create the RefPosition.
if ((operand->gtFlags & GTF_VAR_DEATH) != 0)
{
unsigned varIndex = interval->getVarIndex(compiler);
VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex);
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
buildUpperVectorRestoreRefPosition(interval, currentLoc, operand);
#endif
}
else if (operand->IsMultiRegLclVar())
{
assert(compiler->lvaEnregMultiRegVars);
LclVarDsc* varDsc = compiler->lvaGetDesc(operand->AsLclVar());
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + multiRegIdx);
interval = getIntervalForLocalVar(fieldVarDsc->lvVarIndex);
if (operand->AsLclVar()->IsLastUse(multiRegIdx))
{
VarSetOps::RemoveElemD(compiler, currentLiveVars, fieldVarDsc->lvVarIndex);
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
buildUpperVectorRestoreRefPosition(interval, currentLoc, operand);
#endif
}
else
{
RefInfoListNode* refInfo = defList.removeListNode(operand, multiRegIdx);
RefPosition* defRefPos = refInfo->ref;
assert(defRefPos->multiRegIdx == multiRegIdx);
interval = defRefPos->getInterval();
listNodePool.ReturnNode(refInfo);
operand = nullptr;
}
RefPosition* useRefPos = newRefPosition(interval, currentLoc, RefTypeUse, operand, candidates, multiRegIdx);
useRefPos->setRegOptional(regOptional);
return useRefPos;
}
//------------------------------------------------------------------------
// BuildIndirUses: Build Use RefPositions for an indirection that might be contained
//
// Arguments:
// indirTree - The indirection node of interest
//
// Return Value:
// The number of source registers used by the *parent* of this node.
//
// Notes:
// This method may only be used if the candidates are the same for all sources.
//
int LinearScan::BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates)
{
GenTree* const addr = indirTree->gtOp1;
return BuildAddrUses(addr, candidates);
}
int LinearScan::BuildAddrUses(GenTree* addr, regMaskTP candidates)
{
if (!addr->isContained())
{
BuildUse(addr, candidates);
return 1;
}
if (!addr->OperIs(GT_LEA))
{
return 0;
}
GenTreeAddrMode* const addrMode = addr->AsAddrMode();
unsigned srcCount = 0;
if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained())
{
BuildUse(addrMode->Base(), candidates);
srcCount++;
}
if (addrMode->Index() != nullptr)
{
if (!addrMode->Index()->isContained())
{
BuildUse(addrMode->Index(), candidates);
srcCount++;
}
#ifdef TARGET_ARM64
else if (addrMode->Index()->OperIs(GT_BFIZ))
{
GenTreeCast* cast = addrMode->Index()->gtGetOp1()->AsCast();
assert(cast->isContained());
BuildUse(cast->CastOp(), candidates);
srcCount++;
}
#endif
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildOperandUses: Build Use RefPositions for an operand that might be contained.
//
// Arguments:
// node - The node of interest
//
// Return Value:
// The number of source registers used by the *parent* of this node.
//
int LinearScan::BuildOperandUses(GenTree* node, regMaskTP candidates)
{
if (!node->isContained())
{
BuildUse(node, candidates);
return 1;
}
#ifdef TARGET_ARM64
// Must happen before OperIsHWIntrinsic case,
// but this occurs when a vector zero node is marked as contained.
if (node->IsVectorZero())
{
return 0;
}
#endif
#if !defined(TARGET_64BIT)
if (node->OperIs(GT_LONG))
{
return BuildBinaryUses(node->AsOp(), candidates);
}
#endif // !defined(TARGET_64BIT)
if (node->OperIsIndir())
{
return BuildIndirUses(node->AsIndir(), candidates);
}
if (node->OperIs(GT_LEA))
{
return BuildAddrUses(node, candidates);
}
#ifdef FEATURE_HW_INTRINSICS
if (node->OperIsHWIntrinsic())
{
if (node->AsHWIntrinsic()->OperIsMemoryLoad())
{
return BuildAddrUses(node->AsHWIntrinsic()->Op(1));
}
assert(node->AsHWIntrinsic()->GetOperandCount() == 1);
BuildUse(node->AsHWIntrinsic()->Op(1), candidates);
return 1;
}
#endif // FEATURE_HW_INTRINSICS
#ifdef TARGET_ARM64
if (node->OperIs(GT_MUL))
{
// Can be contained for MultiplyAdd on arm64
return BuildBinaryUses(node->AsOp(), candidates);
}
if (node->OperIs(GT_NEG, GT_CAST, GT_LSH))
{
// GT_NEG can be contained for MultiplyAdd on arm64
// GT_CAST and GT_LSH for ADD with sign/zero extension
return BuildOperandUses(node->gtGetOp1(), candidates);
}
#endif
return 0;
}
//------------------------------------------------------------------------
// setDelayFree: Mark a RefPosition as delayRegFree, and set pendingDelayFree
//
// Arguments:
// use - The use RefPosition to mark
//
void LinearScan::setDelayFree(RefPosition* use)
{
use->delayRegFree = true;
pendingDelayFree = true;
}
//------------------------------------------------------------------------
// BuildDelayFreeUses: Build Use RefPositions for an operand that might be contained,
// and which may need to be marked delayRegFree
//
// Arguments:
// node - The node of interest
// rmwNode - The node that has RMW semantics (if applicable)
// candidates - The set of candidates for the uses
//
// Return Value:
// The number of source registers used by the *parent* of this node.
//
int LinearScan::BuildDelayFreeUses(GenTree* node, GenTree* rmwNode, regMaskTP candidates)
{
RefPosition* use = nullptr;
Interval* rmwInterval = nullptr;
bool rmwIsLastUse = false;
GenTree* addr = nullptr;
if ((rmwNode != nullptr) && isCandidateLocalRef(rmwNode))
{
rmwInterval = getIntervalForLocalVarNode(rmwNode->AsLclVar());
// Note: we don't handle multi-reg vars here. It's not clear that there are any cases
// where we'd encounter a multi-reg var in an RMW context.
assert(!rmwNode->AsLclVar()->IsMultiReg());
rmwIsLastUse = rmwNode->AsLclVar()->IsLastUse(0);
}
if (!node->isContained())
{
use = BuildUse(node, candidates);
}
#ifdef TARGET_ARM64
// Must happen before OperIsHWIntrinsic case,
// but this occurs when a vector zero node is marked as contained.
else if (node->IsVectorZero())
{
return 0;
}
#endif
#ifdef FEATURE_HW_INTRINSICS
else if (node->OperIsHWIntrinsic())
{
assert(node->AsHWIntrinsic()->GetOperandCount() == 1);
use = BuildUse(node->AsHWIntrinsic()->Op(1), candidates);
}
#endif
else if (!node->OperIsIndir())
{
return 0;
}
else
{
GenTreeIndir* indirTree = node->AsIndir();
addr = indirTree->gtOp1;
if (!addr->isContained())
{
use = BuildUse(addr, candidates);
}
else if (!addr->OperIs(GT_LEA))
{
return 0;
}
}
if (use != nullptr)
{
// If node != rmwNode, then definitely node should be marked as "delayFree".
// However, if node == rmwNode, then we can mark node as "delayFree" only if
// none of the node/rmwNode are the last uses. If either of them are last use,
// we can safely reuse the rmwNode as destination.
if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse))
{
setDelayFree(use);
}
return 1;
}
// If we reach here we have a contained LEA in 'addr'.
GenTreeAddrMode* const addrMode = addr->AsAddrMode();
unsigned srcCount = 0;
if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained())
{
use = BuildUse(addrMode->Base(), candidates);
if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse))
{
setDelayFree(use);
}
srcCount++;
}
if ((addrMode->Index() != nullptr) && !addrMode->Index()->isContained())
{
use = BuildUse(addrMode->Index(), candidates);
if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse))
{
setDelayFree(use);
}
srcCount++;
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildBinaryUses: Get the RefInfoListNodes for the operands of the
// given node, and build uses for them.
//
// Arguments:
// node - a GenTreeOp
//
// Return Value:
// The number of actual register operands.
//
// Notes:
// The operands must already have been processed by buildRefPositionsForNode, and their
// RefInfoListNodes placed in the defList.
//
int LinearScan::BuildBinaryUses(GenTreeOp* node, regMaskTP candidates)
{
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2IfPresent();
#ifdef TARGET_XARCH
if (node->OperIsBinary() && isRMWRegOper(node))
{
assert(op2 != nullptr);
return BuildRMWUses(node, op1, op2, candidates);
}
#endif // TARGET_XARCH
int srcCount = 0;
if (op1 != nullptr)
{
srcCount += BuildOperandUses(op1, candidates);
}
if (op2 != nullptr)
{
srcCount += BuildOperandUses(op2, candidates);
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildStoreLocDef: Build a definition RefPosition for a local store
//
// Arguments:
// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
//
// Notes:
// This takes an index to enable building multiple defs for a multi-reg local.
//
void LinearScan::BuildStoreLocDef(GenTreeLclVarCommon* storeLoc,
LclVarDsc* varDsc,
RefPosition* singleUseRef,
int index)
{
assert(varDsc->lvTracked);
unsigned varIndex = varDsc->lvVarIndex;
Interval* varDefInterval = getIntervalForLocalVar(varIndex);
if (!storeLoc->IsLastUse(index))
{
VarSetOps::AddElemD(compiler, currentLiveVars, varIndex);
}
if (singleUseRef != nullptr)
{
Interval* srcInterval = singleUseRef->getInterval();
if (srcInterval->relatedInterval == nullptr)
{
// Preference the source to the dest, unless this is a non-last-use localVar.
// Note that the last-use info is not correct, but it is a better approximation than preferencing
// the source to the dest, if the source's lifetime extends beyond the dest.
if (!srcInterval->isLocalVar || (singleUseRef->treeNode->gtFlags & GTF_VAR_DEATH) != 0)
{
srcInterval->assignRelatedInterval(varDefInterval);
}
}
else if (!srcInterval->isLocalVar)
{
// Preference the source to dest, if src is not a local var.
srcInterval->assignRelatedInterval(varDefInterval);
}
}
regMaskTP defCandidates = RBM_NONE;
var_types type = varDsc->GetRegisterType();
#ifdef TARGET_X86
if (varTypeIsByte(type))
{
defCandidates = allByteRegs();
}
else
{
defCandidates = allRegs(type);
}
#else
defCandidates = allRegs(type);
#endif // TARGET_X86
RefPosition* def = newRefPosition(varDefInterval, currentLoc + 1, RefTypeDef, storeLoc, defCandidates, index);
if (varDefInterval->isWriteThru)
{
// We always make write-thru defs reg-optional, as we can store them if they don't
// get a register.
def->regOptional = true;
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (Compiler::varTypeNeedsPartialCalleeSave(varDefInterval->registerType))
{
varDefInterval->isPartiallySpilled = false;
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
}
//------------------------------------------------------------------------
// BuildMultiRegStoreLoc: Set register requirements for a store of a lclVar
//
// Arguments:
// storeLoc - the multireg local store (GT_STORE_LCL_VAR)
//
// Returns:
// The number of source registers read.
//
int LinearScan::BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc)
{
GenTree* op1 = storeLoc->gtGetOp1();
unsigned int dstCount = storeLoc->GetFieldCount(compiler);
unsigned int srcCount = dstCount;
LclVarDsc* varDsc = compiler->lvaGetDesc(storeLoc);
assert(compiler->lvaEnregMultiRegVars);
assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
bool isMultiRegSrc = op1->IsMultiRegNode();
// The source must be:
// - a multi-reg source
// - an enregisterable SIMD type, or
// - in-memory local
//
if (isMultiRegSrc)
{
assert(op1->GetMultiRegCount(compiler) == srcCount);
}
else if (varTypeIsEnregisterable(op1))
{
// Create a delay free use, as we'll have to use it to create each field
RefPosition* use = BuildUse(op1, RBM_NONE);
setDelayFree(use);
srcCount = 1;
}
else
{
// Otherwise we must have an in-memory struct lclVar.
// We will just load directly into the register allocated for this lclVar,
// so we don't need to build any uses.
assert(op1->OperIs(GT_LCL_VAR) && op1->isContained() && op1->TypeIs(TYP_STRUCT));
srcCount = 0;
}
// For multi-reg local stores of multi-reg sources, the code generator will read each source
// register, and then move it, if needed, to the destination register. These nodes have
// 2*N locations where N is the number of registers, so that the liveness can
// be reflected accordingly.
//
for (unsigned int i = 0; i < dstCount; ++i)
{
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i);
RefPosition* singleUseRef = nullptr;
if (isMultiRegSrc)
{
regMaskTP srcCandidates = RBM_NONE;
#ifdef TARGET_X86
var_types type = fieldVarDsc->TypeGet();
if (varTypeIsByte(type))
{
srcCandidates = allByteRegs();
}
#endif // TARGET_X86
singleUseRef = BuildUse(op1, srcCandidates, i);
}
assert(isCandidateVar(fieldVarDsc));
BuildStoreLocDef(storeLoc, fieldVarDsc, singleUseRef, i);
if (isMultiRegSrc && (i < (dstCount - 1)))
{
currentLoc += 2;
}
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildStoreLoc: Set register requirements for a store of a lclVar
//
// Arguments:
// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
//
// Notes:
// This involves:
// - Setting the appropriate candidates.
// - Handling of contained immediates.
// - Requesting an internal register for SIMD12 stores.
//
int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc)
{
GenTree* op1 = storeLoc->gtGetOp1();
int srcCount;
RefPosition* singleUseRef = nullptr;
LclVarDsc* varDsc = compiler->lvaGetDesc(storeLoc);
if (storeLoc->IsMultiRegLclVar())
{
return BuildMultiRegStoreLoc(storeLoc->AsLclVar());
}
// First, define internal registers.
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(storeLoc) && !op1->IsCnsIntOrI() && (storeLoc->TypeGet() == TYP_SIMD12))
{
// Need an additional register to extract upper 4 bytes of Vector3,
// it has to be float for x86.
buildInternalFloatRegisterDefForNode(storeLoc, allSIMDRegs());
}
#endif // FEATURE_SIMD
// Second, use source registers.
if (op1->IsMultiRegNode() && (op1->GetMultiRegCount(compiler) > 1))
{
// This is the case where the source produces multiple registers.
// This must be a store lclvar.
assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
srcCount = op1->GetMultiRegCount(compiler);
for (int i = 0; i < srcCount; ++i)
{
BuildUse(op1, RBM_NONE, i);
}
#if defined(FEATURE_SIMD) && defined(TARGET_X86)
if (TargetOS::IsWindows && !compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
if (varTypeIsSIMD(storeLoc) && op1->IsCall())
{
// Need an additional register to create a SIMD8 from EAX/EDX without SSE4.1.
buildInternalFloatRegisterDefForNode(storeLoc, allSIMDRegs());
if (isCandidateVar(varDsc))
{
// This internal register must be different from the target register.
setInternalRegsDelayFree = true;
}
}
}
#endif // FEATURE_SIMD && TARGET_X86
}
else if (op1->isContained() && op1->OperIs(GT_BITCAST))
{
GenTree* bitCastSrc = op1->gtGetOp1();
RegisterType registerType = bitCastSrc->TypeGet();
singleUseRef = BuildUse(bitCastSrc, allRegs(registerType));
Interval* srcInterval = singleUseRef->getInterval();
assert(srcInterval->registerType == registerType);
srcCount = 1;
}
#ifndef TARGET_64BIT
else if (varTypeIsLong(op1))
{
// GT_MUL_LONG is handled by the IsMultiRegNode case above.
assert(op1->OperIs(GT_LONG));
assert(op1->isContained() && !op1->gtGetOp1()->isContained() && !op1->gtGetOp2()->isContained());
srcCount = BuildBinaryUses(op1->AsOp());
assert(srcCount == 2);
}
#endif // !TARGET_64BIT
else if (op1->isContained())
{
#ifdef TARGET_XARCH
if (varTypeIsSIMD(storeLoc))
{
// This is the zero-init case, and we need a register to hold the zero.
// (On Arm64 we can just store REG_ZR.)
assert(op1->IsSIMDZero());
singleUseRef = BuildUse(op1->gtGetOp1());
srcCount = 1;
}
else
#endif
{
srcCount = 0;
}
}
else
{
srcCount = 1;
regMaskTP srcCandidates = RBM_NONE;
#ifdef TARGET_X86
var_types type = varDsc->GetRegisterType(storeLoc);
if (varTypeIsByte(type))
{
srcCandidates = allByteRegs();
}
#endif // TARGET_X86
singleUseRef = BuildUse(op1, srcCandidates);
}
// Third, use internal registers.
#ifdef TARGET_ARM
if (storeLoc->OperIs(GT_STORE_LCL_FLD) && storeLoc->AsLclFld()->IsOffsetMisaligned())
{
buildInternalIntRegisterDefForNode(storeLoc); // to generate address.
buildInternalIntRegisterDefForNode(storeLoc); // to move float into an int reg.
if (storeLoc->TypeIs(TYP_DOUBLE))
{
buildInternalIntRegisterDefForNode(storeLoc); // to move the second half into an int reg.
}
}
#endif // TARGET_ARM
#if defined(FEATURE_SIMD) || defined(TARGET_ARM)
buildInternalRegisterUses();
#endif // FEATURE_SIMD || TARGET_ARM
// Fourth, define destination registers.
// Add the lclVar to currentLiveVars (if it will remain live)
if (isCandidateVar(varDsc))
{
BuildStoreLocDef(storeLoc, varDsc, singleUseRef, 0);
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildSimple: Builds use RefPositions for trees requiring no special handling
//
// Arguments:
// tree - The node of interest
//
// Return Value:
// The number of use RefPositions created
//
int LinearScan::BuildSimple(GenTree* tree)
{
unsigned kind = tree->OperKind();
int srcCount = 0;
if ((kind & GTK_LEAF) == 0)
{
assert((kind & GTK_SMPOP) != 0);
srcCount = BuildBinaryUses(tree->AsOp());
}
if (tree->IsValue())
{
BuildDef(tree);
}
return srcCount;
}
//------------------------------------------------------------------------
// BuildReturn: Set the NodeInfo for a GT_RETURN.
//
// Arguments:
// tree - The node of interest
//
// Return Value:
// The number of sources consumed by this node.
//
int LinearScan::BuildReturn(GenTree* tree)
{
GenTree* op1 = tree->gtGetOp1();
#if !defined(TARGET_64BIT)
if (tree->TypeGet() == TYP_LONG)
{
assert((op1->OperGet() == GT_LONG) && op1->isContained());
GenTree* loVal = op1->gtGetOp1();
GenTree* hiVal = op1->gtGetOp2();
BuildUse(loVal, RBM_LNGRET_LO);
BuildUse(hiVal, RBM_LNGRET_HI);
return 2;
}
else
#endif // !defined(TARGET_64BIT)
if ((tree->TypeGet() != TYP_VOID) && !op1->isContained())
{
regMaskTP useCandidates = RBM_NONE;
#if FEATURE_MULTIREG_RET
#ifdef TARGET_ARM64
if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar())
{
useCandidates = allSIMDRegs();
if (op1->OperGet() == GT_LCL_VAR)
{
assert(op1->TypeGet() != TYP_SIMD32);
useCandidates = RBM_DOUBLERET;
}
BuildUse(op1, useCandidates);
return 1;
}
#endif // TARGET_ARM64
if (varTypeIsStruct(tree))
{
// op1 has to be either a lclvar or a multi-reg returning call
if ((op1->OperGet() == GT_LCL_VAR) && !op1->IsMultiRegLclVar())
{
BuildUse(op1, useCandidates);
}
else
{
noway_assert(op1->IsMultiRegCall() || op1->IsMultiRegLclVar());
int srcCount;
ReturnTypeDesc nonCallRetTypeDesc;
const ReturnTypeDesc* pRetTypeDesc;
if (op1->OperIs(GT_CALL))
{
pRetTypeDesc = op1->AsCall()->GetReturnTypeDesc();
}
else
{
assert(compiler->lvaEnregMultiRegVars);
LclVarDsc* varDsc = compiler->lvaGetDesc(op1->AsLclVar());
nonCallRetTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd(),
compiler->info.compCallConv);
pRetTypeDesc = &nonCallRetTypeDesc;
assert(compiler->lvaGetDesc(op1->AsLclVar())->lvFieldCnt == nonCallRetTypeDesc.GetReturnRegCount());
}
srcCount = pRetTypeDesc->GetReturnRegCount();
// For any source that's coming from a different register file, we need to ensure that
// we reserve the specific ABI register we need.
bool hasMismatchedRegTypes = false;
if (op1->IsMultiRegLclVar())
{
for (int i = 0; i < srcCount; i++)
{
RegisterType srcType = regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i));
RegisterType dstType = regType(pRetTypeDesc->GetReturnRegType(i));
if (srcType != dstType)
{
hasMismatchedRegTypes = true;
regMaskTP dstRegMask = genRegMask(pRetTypeDesc->GetABIReturnReg(i));
if (varTypeUsesFloatReg(dstType))
{
buildInternalFloatRegisterDefForNode(tree, dstRegMask);
}
else
{
buildInternalIntRegisterDefForNode(tree, dstRegMask);
}
}
}
}
for (int i = 0; i < srcCount; i++)
{
// We will build uses of the type of the operand registers/fields, and the codegen
// for return will move as needed.
if (!hasMismatchedRegTypes || (regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)) ==
regType(pRetTypeDesc->GetReturnRegType(i))))
{
BuildUse(op1, genRegMask(pRetTypeDesc->GetABIReturnReg(i)), i);
}
else
{
BuildUse(op1, RBM_NONE, i);
}
}
if (hasMismatchedRegTypes)
{
buildInternalRegisterUses();
}
return srcCount;
}
}
else
#endif // FEATURE_MULTIREG_RET
{
// Non-struct type return - determine useCandidates
switch (tree->TypeGet())
{
case TYP_VOID:
useCandidates = RBM_NONE;
break;
case TYP_FLOAT:
useCandidates = RBM_FLOATRET;
break;
case TYP_DOUBLE:
// We ONLY want the valid double register in the RBM_DOUBLERET mask.
useCandidates = (RBM_DOUBLERET & RBM_ALLDOUBLE);
break;
case TYP_LONG:
useCandidates = RBM_LNGRET;
break;
default:
useCandidates = RBM_INTRET;
break;
}
BuildUse(op1, useCandidates);
return 1;
}
}
// No kills or defs.
return 0;
}
//------------------------------------------------------------------------
// supportsSpecialPutArg: Determine if we can support specialPutArgs
//
// Return Value:
// True iff specialPutArg intervals can be supported.
//
// Notes:
// See below.
//
bool LinearScan::supportsSpecialPutArg()
{
#if defined(DEBUG) && defined(TARGET_X86)
// On x86, `LSRA_LIMIT_CALLER` is too restrictive to allow the use of special put args: this stress mode
// leaves only three registers allocatable--eax, ecx, and edx--of which the latter two are also used for the
// first two integral arguments to a call. This can leave us with too few registers to succesfully allocate in
// situations like the following:
//
// t1026 = lclVar ref V52 tmp35 u:3 REG NA <l:$3a1, c:$98d>
//
// /--* t1026 ref
// t1352 = * putarg_reg ref REG NA
//
// t342 = lclVar int V14 loc6 u:4 REG NA $50c
//
// t343 = const int 1 REG NA $41
//
// /--* t342 int
// +--* t343 int
// t344 = * + int REG NA $495
//
// t345 = lclVar int V04 arg4 u:2 REG NA $100
//
// /--* t344 int
// +--* t345 int
// t346 = * % int REG NA $496
//
// /--* t346 int
// t1353 = * putarg_reg int REG NA
//
// t1354 = lclVar ref V52 tmp35 (last use) REG NA
//
// /--* t1354 ref
// t1355 = * lea(b+0) byref REG NA
//
// Here, the first `putarg_reg` would normally be considered a special put arg, which would remove `ecx` from the
// set of allocatable registers, leaving only `eax` and `edx`. The allocator will then fail to allocate a register
// for the def of `t345` if arg4 is not a register candidate: the corresponding ref position will be constrained to
// { `ecx`, `ebx`, `esi`, `edi` }, which `LSRA_LIMIT_CALLER` will further constrain to `ecx`, which will not be
// available due to the special put arg.
return getStressLimitRegs() != LSRA_LIMIT_CALLER;
#else
return true;
#endif
}
//------------------------------------------------------------------------
// BuildPutArgReg: Set the NodeInfo for a PUTARG_REG.
//
// Arguments:
// node - The PUTARG_REG node.
// argReg - The register in which to pass the argument.
// info - The info for the node's using call.
// isVarArgs - True if the call uses a varargs calling convention.
// callHasFloatRegArgs - Set to true if this PUTARG_REG uses an FP register.
//
// Return Value:
// None.
//
int LinearScan::BuildPutArgReg(GenTreeUnOp* node)
{
assert(node != nullptr);
assert(node->OperIsPutArgReg());
regNumber argReg = node->GetRegNum();
assert(argReg != REG_NA);
bool isSpecialPutArg = false;
int srcCount = 1;
GenTree* op1 = node->gtGetOp1();
// First, handle the GT_OBJ case, which loads into the arg register
// (so we don't set the use to prefer that register for the source address).
if (op1->OperIs(GT_OBJ))
{
GenTreeObj* obj = op1->AsObj();
GenTree* addr = obj->Addr();
unsigned size = obj->GetLayout()->GetSize();
assert(size <= MAX_PASS_SINGLEREG_BYTES);
if (addr->OperIsLocalAddr())
{
// We don't need a source register.
assert(addr->isContained());
srcCount = 0;
}
else if (!isPow2(size))
{
// We'll need an internal register to do the odd-size load.
// This can only happen with integer registers.
assert(genIsValidIntReg(argReg));
buildInternalIntRegisterDefForNode(node);
BuildUse(addr);
buildInternalRegisterUses();
}
return srcCount;
}
// To avoid redundant moves, have the argument operand computed in the
// register in which the argument is passed to the call.
regMaskTP argMask = genRegMask(argReg);
RefPosition* use = BuildUse(op1, argMask);
if (supportsSpecialPutArg() && isCandidateLocalRef(op1) && ((op1->gtFlags & GTF_VAR_DEATH) == 0))
{
// This is the case for a "pass-through" copy of a lclVar. In the case where it is a non-last-use,
// we don't want the def of the copy to kill the lclVar register, if it is assigned the same register
// (which is actually what we hope will happen).
JITDUMP("Setting putarg_reg as a pass-through of a non-last use lclVar\n");
// Preference the destination to the interval of the first register defined by the first operand.
assert(use->getInterval()->isLocalVar);
isSpecialPutArg = true;
}
#ifdef TARGET_ARM
// If type of node is `long` then it is actually `double`.
// The actual `long` types must have been transformed as a field list with two fields.
if (node->TypeGet() == TYP_LONG)
{
srcCount++;
regMaskTP argMaskHi = genRegMask(REG_NEXT(argReg));
assert(genRegArgNext(argReg) == REG_NEXT(argReg));
use = BuildUse(op1, argMaskHi, 1);
BuildDef(node, argMask, 0);
BuildDef(node, argMaskHi, 1);
}
else
#endif // TARGET_ARM
{
RefPosition* def = BuildDef(node, argMask);
if (isSpecialPutArg)
{
def->getInterval()->isSpecialPutArg = true;
def->getInterval()->assignRelatedInterval(use->getInterval());
}
}
return srcCount;
}
//------------------------------------------------------------------------
// HandleFloatVarArgs: Handle additional register requirements for a varargs call
//
// Arguments:
// call - The call node of interest
// argNode - The current argument
//
// Return Value:
// None.
//
// Notes:
// In the case of a varargs call, the ABI dictates that if we have floating point args,
// we must pass the enregistered arguments in both the integer and floating point registers.
// Since the integer register is not associated with the arg node, we will reserve it as
// an internal register on the call so that it is not used during the evaluation of the call node
// (e.g. for the target).
void LinearScan::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs)
{
if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode))
{
*callHasFloatRegArgs = true;
// We'll have to return the internal def and then later create a use for it.
regNumber argReg = argNode->GetRegNum();
regNumber targetReg = compiler->getCallArgIntRegister(argReg);
buildInternalIntRegisterDefForNode(call, genRegMask(targetReg));
}
}
//------------------------------------------------------------------------
// BuildGCWriteBarrier: Handle additional register requirements for a GC write barrier
//
// Arguments:
// tree - The STORE_IND for which a write barrier is required
//
int LinearScan::BuildGCWriteBarrier(GenTree* tree)
{
GenTree* addr = tree->gtGetOp1();
GenTree* src = tree->gtGetOp2();
// In the case where we are doing a helper assignment, even if the dst
// is an indir through an lea, we need to actually instantiate the
// lea in a register
assert(!addr->isContained() && !src->isContained());
regMaskTP addrCandidates = RBM_ARG_0;
regMaskTP srcCandidates = RBM_ARG_1;
#if defined(TARGET_ARM64)
// the 'addr' goes into x14 (REG_WRITE_BARRIER_DST)
// the 'src' goes into x15 (REG_WRITE_BARRIER_SRC)
//
addrCandidates = RBM_WRITE_BARRIER_DST;
srcCandidates = RBM_WRITE_BARRIER_SRC;
#elif defined(TARGET_X86) && NOGC_WRITE_BARRIERS
bool useOptimizedWriteBarrierHelper = compiler->codeGen->genUseOptimizedWriteBarriers(tree, src);
if (useOptimizedWriteBarrierHelper)
{
// Special write barrier:
// op1 (addr) goes into REG_WRITE_BARRIER (rdx) and
// op2 (src) goes into any int register.
addrCandidates = RBM_WRITE_BARRIER;
srcCandidates = RBM_WRITE_BARRIER_SRC;
}
#endif // defined(TARGET_X86) && NOGC_WRITE_BARRIERS
BuildUse(addr, addrCandidates);
BuildUse(src, srcCandidates);
regMaskTP killMask = getKillSetForStoreInd(tree->AsStoreInd());
buildKillPositionsForNode(tree, currentLoc + 1, killMask);
return 2;
}
//------------------------------------------------------------------------
// BuildCmp: Set the register requirements for a compare.
//
// Arguments:
// tree - The node of interest
//
// Return Value:
// None.
//
int LinearScan::BuildCmp(GenTree* tree)
{
assert(tree->OperIsCompare() || tree->OperIs(GT_CMP) || tree->OperIs(GT_JCMP));
regMaskTP dstCandidates = RBM_NONE;
regMaskTP op1Candidates = RBM_NONE;
regMaskTP op2Candidates = RBM_NONE;
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
#ifdef TARGET_X86
// If the compare is used by a jump, we just need to set the condition codes. If not, then we need
// to store the result into the low byte of a register, which requires the dst be a byteable register.
if (tree->TypeGet() != TYP_VOID)
{
dstCandidates = allByteRegs();
}
bool needByteRegs = false;
if (varTypeIsByte(tree))
{
if (!varTypeIsFloating(op1))
{
needByteRegs = true;
}
}
// Example1: GT_EQ(int, op1 of type ubyte, op2 of type ubyte) - in this case codegen uses
// ubyte as the result of comparison and if the result needs to be materialized into a reg
// simply zero extend it to TYP_INT size. Here is an example of generated code:
// cmp dl, byte ptr[addr mode]
// movzx edx, dl
else if (varTypeIsByte(op1) && varTypeIsByte(op2))
{
needByteRegs = true;
}
// Example2: GT_EQ(int, op1 of type ubyte, op2 is GT_CNS_INT) - in this case codegen uses
// ubyte as the result of the comparison and if the result needs to be materialized into a reg
// simply zero extend it to TYP_INT size.
else if (varTypeIsByte(op1) && op2->IsCnsIntOrI())
{
needByteRegs = true;
}
// Example3: GT_EQ(int, op1 is GT_CNS_INT, op2 of type ubyte) - in this case codegen uses
// ubyte as the result of the comparison and if the result needs to be materialized into a reg
// simply zero extend it to TYP_INT size.
else if (op1->IsCnsIntOrI() && varTypeIsByte(op2))
{
needByteRegs = true;
}
if (needByteRegs)
{
if (!op1->isContained())
{
op1Candidates = allByteRegs();
}
if (!op2->isContained())
{
op2Candidates = allByteRegs();
}
}
#endif // TARGET_X86
int srcCount = BuildOperandUses(op1, op1Candidates);
srcCount += BuildOperandUses(op2, op2Candidates);
if (tree->TypeGet() != TYP_VOID)
{
BuildDef(tree, dstCandidates);
}
return srcCount;
}
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/treelifeupdater.cpp | #include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "treelifeupdater.h"
template <bool ForCodeGen>
TreeLifeUpdater<ForCodeGen>::TreeLifeUpdater(Compiler* compiler)
: compiler(compiler)
, newLife(VarSetOps::MakeEmpty(compiler))
, stackVarDeltaSet(VarSetOps::MakeEmpty(compiler))
, varDeltaSet(VarSetOps::MakeEmpty(compiler))
, gcTrkStkDeltaSet(VarSetOps::MakeEmpty(compiler))
#ifdef DEBUG
, gcVarPtrSetNew(VarSetOps::MakeEmpty(compiler))
, epoch(compiler->GetCurLVEpoch())
#endif // DEBUG
{
}
//------------------------------------------------------------------------
// UpdateLifeFieldVar: Update live sets for only the given field of a multi-reg LclVar node.
//
// Arguments:
// lclNode - the GT_LCL_VAR node.
// multiRegIndex - the index of the field being updated.
//
// Return Value:
// Returns true iff the variable needs to be spilled.
//
// Notes:
// This method need only be used when the fields are dying or going live at different times,
// e.g. when I ready the 0th field/reg of one node and define the 0th field/reg of another
// before reading the subsequent fields/regs.
//
template <bool ForCodeGen>
bool TreeLifeUpdater<ForCodeGen>::UpdateLifeFieldVar(GenTreeLclVar* lclNode, unsigned multiRegIndex)
{
LclVarDsc* parentVarDsc = compiler->lvaGetDesc(lclNode);
assert(parentVarDsc->lvPromoted && (multiRegIndex < parentVarDsc->lvFieldCnt) && lclNode->IsMultiReg() &&
compiler->lvaEnregMultiRegVars);
unsigned fieldVarNum = parentVarDsc->lvFieldLclStart + multiRegIndex;
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(fieldVarNum);
assert(fldVarDsc->lvTracked);
unsigned fldVarIndex = fldVarDsc->lvVarIndex;
assert((lclNode->gtFlags & GTF_VAR_USEASG) == 0);
VarSetOps::Assign(compiler, newLife, compiler->compCurLife);
bool isBorn = ((lclNode->gtFlags & GTF_VAR_DEF) != 0);
bool isDying = !isBorn && lclNode->IsLastUse(multiRegIndex);
// GTF_SPILL will be set if any registers need to be spilled.
GenTreeFlags spillFlags = (lclNode->gtFlags & lclNode->GetRegSpillFlagByIdx(multiRegIndex));
bool spill = ((spillFlags & GTF_SPILL) != 0);
bool isInMemory = false;
if (isBorn || isDying)
{
if (ForCodeGen)
{
regNumber reg = lclNode->GetRegNumByIdx(multiRegIndex);
bool isInReg = fldVarDsc->lvIsInReg() && reg != REG_NA;
isInMemory = !isInReg || fldVarDsc->IsAlwaysAliveInMemory();
if (isInReg)
{
if (isBorn)
{
compiler->codeGen->genUpdateVarReg(fldVarDsc, lclNode, multiRegIndex);
}
compiler->codeGen->genUpdateRegLife(fldVarDsc, isBorn, isDying DEBUGARG(lclNode));
}
}
// First, update the live set
if (isDying)
{
VarSetOps::RemoveElemD(compiler, newLife, fldVarIndex);
}
else
{
VarSetOps::AddElemD(compiler, newLife, fldVarIndex);
}
}
if (!VarSetOps::Equal(compiler, compiler->compCurLife, newLife))
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tLive vars: ");
dumpConvertedVarSet(compiler, compiler->compCurLife);
printf(" => ");
dumpConvertedVarSet(compiler, newLife);
printf("\n");
}
#endif // DEBUG
VarSetOps::Assign(compiler, compiler->compCurLife, newLife);
if (ForCodeGen)
{
// Only add vars to the gcInfo.gcVarPtrSetCur if they are currently on stack, since the
// gcInfo.gcTrkStkPtrLcls
// includes all TRACKED vars that EVER live on the stack (i.e. are not always in a register).
VarSetOps::Assign(compiler, gcTrkStkDeltaSet, compiler->codeGen->gcInfo.gcTrkStkPtrLcls);
if (isInMemory && VarSetOps::IsMember(compiler, gcTrkStkDeltaSet, fldVarIndex))
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tGCvars: ");
dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
printf(" => ");
}
#endif // DEBUG
if (isBorn)
{
VarSetOps::AddElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, fldVarIndex);
}
else
{
VarSetOps::RemoveElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, fldVarIndex);
}
#ifdef DEBUG
if (compiler->verbose)
{
dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
printf("\n");
}
#endif // DEBUG
}
#ifdef USING_VARIABLE_LIVE_RANGE
// For each of the LclVarDsc that are reporting change, variable or fields
compiler->codeGen->getVariableLiveKeeper()->siStartOrCloseVariableLiveRange(fldVarDsc, fieldVarNum, isBorn,
isDying);
#endif // USING_VARIABLE_LIVE_RANGE
#ifdef USING_SCOPE_INFO
compiler->codeGen->siUpdate();
#endif // USING_SCOPE_INFO
}
}
if (ForCodeGen && spill)
{
if (VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcTrkStkPtrLcls, fldVarIndex))
{
if (!VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, fldVarIndex))
{
VarSetOps::AddElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, fldVarIndex);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tVar V%02u becoming live\n", fieldVarNum);
}
#endif // DEBUG
}
}
return true;
}
return false;
}
//------------------------------------------------------------------------
// UpdateLifeVar: Update live sets for a given tree.
//
// Arguments:
// tree - the tree which affects liveness.
//
template <bool ForCodeGen>
void TreeLifeUpdater<ForCodeGen>::UpdateLifeVar(GenTree* tree)
{
GenTree* indirAddrLocal = compiler->fgIsIndirOfAddrOfLocal(tree);
assert(tree->OperIsNonPhiLocal() || indirAddrLocal != nullptr);
// Get the local var tree -- if "tree" is "Ldobj(addr(x))", or "ind(addr(x))" this is "x", else it's "tree".
GenTree* lclVarTree = indirAddrLocal;
if (lclVarTree == nullptr)
{
lclVarTree = tree;
}
unsigned int lclNum = lclVarTree->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
#ifdef DEBUG
#if !defined(TARGET_AMD64)
// There are no addr nodes on ARM and we are experimenting with encountering vars in 'random' order.
// Struct fields are not traversed in a consistent order, so ignore them when
// verifying that we see the var nodes in execution order
if (ForCodeGen)
{
if (tree->OperIsIndir())
{
assert(indirAddrLocal != NULL);
}
else if (tree->gtNext != NULL && tree->gtNext->gtOper == GT_ADDR &&
((tree->gtNext->gtNext == NULL || !tree->gtNext->gtNext->OperIsIndir())))
{
assert(tree->IsLocal()); // Can only take the address of a local.
// The ADDR might occur in a context where the address it contributes is eventually
// dereferenced, so we can't say that this is not a use or def.
}
}
#endif // !TARGET_AMD64
#endif // DEBUG
compiler->compCurLifeTree = tree;
VarSetOps::Assign(compiler, newLife, compiler->compCurLife);
// By codegen, a struct may not be TYP_STRUCT, so we have to
// check lvPromoted, for the case where the fields are being
// tracked.
if (!varDsc->lvTracked && !varDsc->lvPromoted)
{
return;
}
// if it's a partial definition then variable "x" must have had a previous, original, site to be born.
bool isBorn;
bool isDying;
// GTF_SPILL will be set on a MultiRegLclVar if any registers need to be spilled.
bool spill = ((lclVarTree->gtFlags & GTF_SPILL) != 0);
bool isMultiRegLocal = lclVarTree->IsMultiRegLclVar();
if (isMultiRegLocal)
{
// We should never have an 'IndirOfAddrOfLocal' for a multi-reg.
assert(lclVarTree == tree);
assert((lclVarTree->gtFlags & GTF_VAR_USEASG) == 0);
isBorn = ((lclVarTree->gtFlags & GTF_VAR_DEF) != 0);
// Note that for multireg locals we can have definitions for which some of those are last uses.
// We don't want to add those to the varDeltaSet because otherwise they will be added as newly
// live.
isDying = !isBorn && lclVarTree->HasLastUse();
}
else
{
isBorn = ((lclVarTree->gtFlags & GTF_VAR_DEF) != 0 && (lclVarTree->gtFlags & GTF_VAR_USEASG) == 0);
isDying = ((lclVarTree->gtFlags & GTF_VAR_DEATH) != 0);
}
// Since all tracked vars are register candidates, but not all are in registers at all times,
// we maintain two separate sets of variables - the total set of variables that are either
// born or dying here, and the subset of those that are on the stack
VarSetOps::ClearD(compiler, stackVarDeltaSet);
if (isBorn || isDying)
{
VarSetOps::ClearD(compiler, varDeltaSet);
if (varDsc->lvTracked)
{
VarSetOps::AddElemD(compiler, varDeltaSet, varDsc->lvVarIndex);
if (ForCodeGen)
{
if (isBorn && varDsc->lvIsRegCandidate() && tree->gtHasReg())
{
compiler->codeGen->genUpdateVarReg(varDsc, tree);
}
bool isInReg = varDsc->lvIsInReg() && tree->GetRegNum() != REG_NA;
bool isInMemory = !isInReg || varDsc->IsAlwaysAliveInMemory();
if (isInReg)
{
compiler->codeGen->genUpdateRegLife(varDsc, isBorn, isDying DEBUGARG(tree));
}
if (isInMemory)
{
VarSetOps::AddElemD(compiler, stackVarDeltaSet, varDsc->lvVarIndex);
}
}
}
else if (ForCodeGen && lclVarTree->IsMultiRegLclVar())
{
assert(varDsc->lvPromoted && compiler->lvaEnregMultiRegVars);
unsigned firstFieldVarNum = varDsc->lvFieldLclStart;
for (unsigned i = 0; i < varDsc->lvFieldCnt; ++i)
{
bool fieldIsSpilled = spill && ((lclVarTree->GetRegSpillFlagByIdx(i) & GTF_SPILL) != 0);
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(firstFieldVarNum + i);
noway_assert(fldVarDsc->lvIsStructField);
assert(fldVarDsc->lvTracked);
unsigned fldVarIndex = fldVarDsc->lvVarIndex;
regNumber reg = lclVarTree->AsLclVar()->GetRegNumByIdx(i);
bool isInReg = fldVarDsc->lvIsInReg() && reg != REG_NA;
bool isInMemory = !isInReg || fldVarDsc->IsAlwaysAliveInMemory();
bool isFieldDying = lclVarTree->AsLclVar()->IsLastUse(i);
if ((isBorn && !isFieldDying) || (!isBorn && isFieldDying))
{
VarSetOps::AddElemD(compiler, varDeltaSet, fldVarIndex);
if (isInMemory)
{
VarSetOps::AddElemD(compiler, stackVarDeltaSet, fldVarIndex);
}
}
if (isInReg)
{
if (isBorn)
{
compiler->codeGen->genUpdateVarReg(fldVarDsc, tree, i);
}
compiler->codeGen->genUpdateRegLife(fldVarDsc, isBorn, isFieldDying DEBUGARG(tree));
// If this was marked for spill, genProduceReg should already have spilled it.
assert(!fieldIsSpilled);
}
}
spill = false;
}
else if (varDsc->lvPromoted)
{
// If hasDeadTrackedFieldVars is true, then, for a LDOBJ(ADDR(<promoted struct local>)),
// *deadTrackedFieldVars indicates which tracked field vars are dying.
bool hasDeadTrackedFieldVars = false;
if (indirAddrLocal != nullptr && isDying)
{
assert(!isBorn); // GTF_VAR_DEATH only set for LDOBJ last use.
VARSET_TP* deadTrackedFieldVars = nullptr;
hasDeadTrackedFieldVars =
compiler->LookupPromotedStructDeathVars(indirAddrLocal, &deadTrackedFieldVars);
if (hasDeadTrackedFieldVars)
{
VarSetOps::Assign(compiler, varDeltaSet, *deadTrackedFieldVars);
}
}
unsigned firstFieldVarNum = varDsc->lvFieldLclStart;
for (unsigned i = 0; i < varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(firstFieldVarNum + i);
noway_assert(fldVarDsc->lvIsStructField);
if (fldVarDsc->lvTracked)
{
unsigned fldVarIndex = fldVarDsc->lvVarIndex;
// We should never see enregistered fields in a struct local unless
// IsMultiRegLclVar() returns true, in which case we've handled this above.
assert(!fldVarDsc->lvIsInReg());
noway_assert(fldVarIndex < compiler->lvaTrackedCount);
if (!hasDeadTrackedFieldVars)
{
VarSetOps::AddElemD(compiler, varDeltaSet, fldVarIndex);
if (ForCodeGen)
{
// We repeat this call here and below to avoid the VarSetOps::IsMember
// test in this, the common case, where we have no deadTrackedFieldVars.
VarSetOps::AddElemD(compiler, stackVarDeltaSet, fldVarIndex);
}
}
else if (ForCodeGen && VarSetOps::IsMember(compiler, varDeltaSet, fldVarIndex))
{
VarSetOps::AddElemD(compiler, stackVarDeltaSet, fldVarIndex);
}
}
}
}
// First, update the live set
if (isDying)
{
// We'd like to be able to assert the following, however if we are walking
// through a qmark/colon tree, we may encounter multiple last-use nodes.
// assert (VarSetOps::IsSubset(compiler, regVarDeltaSet, newLife));
VarSetOps::DiffD(compiler, newLife, varDeltaSet);
}
else
{
// This shouldn't be in newLife, unless this is debug code, in which
// case we keep vars live everywhere, OR the variable is address-exposed,
// OR this block is part of a try block, in which case it may be live at the handler
// Could add a check that, if it's in newLife, that it's also in
// fgGetHandlerLiveVars(compCurBB), but seems excessive
//
// For a dead store, it can be the case that we set both isBorn and isDying to true.
// (We don't eliminate dead stores under MinOpts, so we can't assume they're always
// eliminated.) If it's both, we handled it above.
VarSetOps::UnionD(compiler, newLife, varDeltaSet);
}
}
if (!VarSetOps::Equal(compiler, compiler->compCurLife, newLife))
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tLive vars: ");
dumpConvertedVarSet(compiler, compiler->compCurLife);
printf(" => ");
dumpConvertedVarSet(compiler, newLife);
printf("\n");
}
#endif // DEBUG
VarSetOps::Assign(compiler, compiler->compCurLife, newLife);
if (ForCodeGen)
{
// Only add vars to the gcInfo.gcVarPtrSetCur if they are currently on stack, since the
// gcInfo.gcTrkStkPtrLcls
// includes all TRACKED vars that EVER live on the stack (i.e. are not always in a register).
VarSetOps::Assign(compiler, gcTrkStkDeltaSet, compiler->codeGen->gcInfo.gcTrkStkPtrLcls);
VarSetOps::IntersectionD(compiler, gcTrkStkDeltaSet, stackVarDeltaSet);
if (!VarSetOps::IsEmpty(compiler, gcTrkStkDeltaSet))
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tGCvars: ");
dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
printf(" => ");
}
#endif // DEBUG
if (isBorn)
{
VarSetOps::UnionD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, gcTrkStkDeltaSet);
}
else
{
VarSetOps::DiffD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, gcTrkStkDeltaSet);
}
#ifdef DEBUG
if (compiler->verbose)
{
dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
printf("\n");
}
#endif // DEBUG
}
#ifdef USING_VARIABLE_LIVE_RANGE
// For each of the LclVarDsc that are reporting change, variable or fields
compiler->codeGen->getVariableLiveKeeper()->siStartOrCloseVariableLiveRanges(varDeltaSet, isBorn, isDying);
#endif // USING_VARIABLE_LIVE_RANGE
#ifdef USING_SCOPE_INFO
compiler->codeGen->siUpdate();
#endif // USING_SCOPE_INFO
}
}
if (ForCodeGen && spill)
{
assert(!varDsc->lvPromoted);
compiler->codeGen->genSpillVar(tree);
if (VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcTrkStkPtrLcls, varDsc->lvVarIndex))
{
if (!VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
VarSetOps::AddElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tVar V%02u becoming live\n", compiler->lvaGetLclNum(varDsc));
}
#endif // DEBUG
}
}
}
}
//------------------------------------------------------------------------
// UpdateLife: Determine whether the tree affects liveness, and update liveness sets accordingly.
//
// Arguments:
// tree - the tree which effect on liveness is processed.
//
template <bool ForCodeGen>
void TreeLifeUpdater<ForCodeGen>::UpdateLife(GenTree* tree)
{
assert(compiler->GetCurLVEpoch() == epoch);
// TODO-Cleanup: We shouldn't really be calling this more than once
if (tree == compiler->compCurLifeTree)
{
return;
}
if (!tree->OperIsNonPhiLocal() && compiler->fgIsIndirOfAddrOfLocal(tree) == nullptr)
{
return;
}
UpdateLifeVar(tree);
}
template class TreeLifeUpdater<true>;
template class TreeLifeUpdater<false>;
| #include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "treelifeupdater.h"
template <bool ForCodeGen>
TreeLifeUpdater<ForCodeGen>::TreeLifeUpdater(Compiler* compiler)
: compiler(compiler)
, newLife(VarSetOps::MakeEmpty(compiler))
, stackVarDeltaSet(VarSetOps::MakeEmpty(compiler))
, varDeltaSet(VarSetOps::MakeEmpty(compiler))
, gcTrkStkDeltaSet(VarSetOps::MakeEmpty(compiler))
#ifdef DEBUG
, gcVarPtrSetNew(VarSetOps::MakeEmpty(compiler))
, epoch(compiler->GetCurLVEpoch())
#endif // DEBUG
{
}
//------------------------------------------------------------------------
// UpdateLifeFieldVar: Update live sets for only the given field of a multi-reg LclVar node.
//
// Arguments:
// lclNode - the GT_LCL_VAR node.
// multiRegIndex - the index of the field being updated.
//
// Return Value:
// Returns true iff the variable needs to be spilled.
//
// Notes:
// This method need only be used when the fields are dying or going live at different times,
// e.g. when I ready the 0th field/reg of one node and define the 0th field/reg of another
// before reading the subsequent fields/regs.
//
template <bool ForCodeGen>
bool TreeLifeUpdater<ForCodeGen>::UpdateLifeFieldVar(GenTreeLclVar* lclNode, unsigned multiRegIndex)
{
LclVarDsc* parentVarDsc = compiler->lvaGetDesc(lclNode);
assert(parentVarDsc->lvPromoted && (multiRegIndex < parentVarDsc->lvFieldCnt) && lclNode->IsMultiReg() &&
compiler->lvaEnregMultiRegVars);
unsigned fieldVarNum = parentVarDsc->lvFieldLclStart + multiRegIndex;
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(fieldVarNum);
assert(fldVarDsc->lvTracked);
unsigned fldVarIndex = fldVarDsc->lvVarIndex;
assert((lclNode->gtFlags & GTF_VAR_USEASG) == 0);
VarSetOps::Assign(compiler, newLife, compiler->compCurLife);
bool isBorn = ((lclNode->gtFlags & GTF_VAR_DEF) != 0);
bool isDying = !isBorn && lclNode->IsLastUse(multiRegIndex);
// GTF_SPILL will be set if any registers need to be spilled.
GenTreeFlags spillFlags = (lclNode->gtFlags & lclNode->GetRegSpillFlagByIdx(multiRegIndex));
bool spill = ((spillFlags & GTF_SPILL) != 0);
bool isInMemory = false;
if (isBorn || isDying)
{
if (ForCodeGen)
{
regNumber reg = lclNode->GetRegNumByIdx(multiRegIndex);
bool isInReg = fldVarDsc->lvIsInReg() && reg != REG_NA;
isInMemory = !isInReg || fldVarDsc->IsAlwaysAliveInMemory();
if (isInReg)
{
if (isBorn)
{
compiler->codeGen->genUpdateVarReg(fldVarDsc, lclNode, multiRegIndex);
}
compiler->codeGen->genUpdateRegLife(fldVarDsc, isBorn, isDying DEBUGARG(lclNode));
}
}
// First, update the live set
if (isDying)
{
VarSetOps::RemoveElemD(compiler, newLife, fldVarIndex);
}
else
{
VarSetOps::AddElemD(compiler, newLife, fldVarIndex);
}
}
if (!VarSetOps::Equal(compiler, compiler->compCurLife, newLife))
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tLive vars: ");
dumpConvertedVarSet(compiler, compiler->compCurLife);
printf(" => ");
dumpConvertedVarSet(compiler, newLife);
printf("\n");
}
#endif // DEBUG
VarSetOps::Assign(compiler, compiler->compCurLife, newLife);
if (ForCodeGen)
{
// Only add vars to the gcInfo.gcVarPtrSetCur if they are currently on stack, since the
// gcInfo.gcTrkStkPtrLcls
// includes all TRACKED vars that EVER live on the stack (i.e. are not always in a register).
VarSetOps::Assign(compiler, gcTrkStkDeltaSet, compiler->codeGen->gcInfo.gcTrkStkPtrLcls);
if (isInMemory && VarSetOps::IsMember(compiler, gcTrkStkDeltaSet, fldVarIndex))
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tGCvars: ");
dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
printf(" => ");
}
#endif // DEBUG
if (isBorn)
{
VarSetOps::AddElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, fldVarIndex);
}
else
{
VarSetOps::RemoveElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, fldVarIndex);
}
#ifdef DEBUG
if (compiler->verbose)
{
dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
printf("\n");
}
#endif // DEBUG
}
#ifdef USING_VARIABLE_LIVE_RANGE
// For each of the LclVarDsc that are reporting change, variable or fields
compiler->codeGen->getVariableLiveKeeper()->siStartOrCloseVariableLiveRange(fldVarDsc, fieldVarNum, isBorn,
isDying);
#endif // USING_VARIABLE_LIVE_RANGE
#ifdef USING_SCOPE_INFO
compiler->codeGen->siUpdate();
#endif // USING_SCOPE_INFO
}
}
if (ForCodeGen && spill)
{
if (VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcTrkStkPtrLcls, fldVarIndex))
{
if (!VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, fldVarIndex))
{
VarSetOps::AddElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, fldVarIndex);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tVar V%02u becoming live\n", fieldVarNum);
}
#endif // DEBUG
}
}
return true;
}
return false;
}
//------------------------------------------------------------------------
// UpdateLifeVar: Update live sets for a given tree.
//
// Arguments:
// tree - the tree which affects liveness.
//
template <bool ForCodeGen>
void TreeLifeUpdater<ForCodeGen>::UpdateLifeVar(GenTree* tree)
{
GenTree* indirAddrLocal = compiler->fgIsIndirOfAddrOfLocal(tree);
assert(tree->OperIsNonPhiLocal() || indirAddrLocal != nullptr);
// Get the local var tree -- if "tree" is "Ldobj(addr(x))", or "ind(addr(x))" this is "x", else it's "tree".
GenTree* lclVarTree = indirAddrLocal;
if (lclVarTree == nullptr)
{
lclVarTree = tree;
}
unsigned int lclNum = lclVarTree->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum);
#ifdef DEBUG
#if !defined(TARGET_AMD64)
// There are no addr nodes on ARM and we are experimenting with encountering vars in 'random' order.
// Struct fields are not traversed in a consistent order, so ignore them when
// verifying that we see the var nodes in execution order
if (ForCodeGen)
{
if (tree->OperIsIndir())
{
assert(indirAddrLocal != NULL);
}
else if (tree->gtNext != NULL && tree->gtNext->gtOper == GT_ADDR &&
((tree->gtNext->gtNext == NULL || !tree->gtNext->gtNext->OperIsIndir())))
{
assert(tree->IsLocal()); // Can only take the address of a local.
// The ADDR might occur in a context where the address it contributes is eventually
// dereferenced, so we can't say that this is not a use or def.
}
}
#endif // !TARGET_AMD64
#endif // DEBUG
compiler->compCurLifeTree = tree;
VarSetOps::Assign(compiler, newLife, compiler->compCurLife);
// By codegen, a struct may not be TYP_STRUCT, so we have to
// check lvPromoted, for the case where the fields are being
// tracked.
if (!varDsc->lvTracked && !varDsc->lvPromoted)
{
return;
}
// if it's a partial definition then variable "x" must have had a previous, original, site to be born.
bool isBorn;
bool isDying;
// GTF_SPILL will be set on a MultiRegLclVar if any registers need to be spilled.
bool spill = ((lclVarTree->gtFlags & GTF_SPILL) != 0);
bool isMultiRegLocal = lclVarTree->IsMultiRegLclVar();
if (isMultiRegLocal)
{
// We should never have an 'IndirOfAddrOfLocal' for a multi-reg.
assert(lclVarTree == tree);
assert((lclVarTree->gtFlags & GTF_VAR_USEASG) == 0);
isBorn = ((lclVarTree->gtFlags & GTF_VAR_DEF) != 0);
// Note that for multireg locals we can have definitions for which some of those are last uses.
// We don't want to add those to the varDeltaSet because otherwise they will be added as newly
// live.
isDying = !isBorn && lclVarTree->HasLastUse();
}
else
{
isBorn = ((lclVarTree->gtFlags & GTF_VAR_DEF) != 0 && (lclVarTree->gtFlags & GTF_VAR_USEASG) == 0);
isDying = ((lclVarTree->gtFlags & GTF_VAR_DEATH) != 0);
}
// Since all tracked vars are register candidates, but not all are in registers at all times,
// we maintain two separate sets of variables - the total set of variables that are either
// born or dying here, and the subset of those that are on the stack
VarSetOps::ClearD(compiler, stackVarDeltaSet);
if (isBorn || isDying)
{
VarSetOps::ClearD(compiler, varDeltaSet);
if (varDsc->lvTracked)
{
VarSetOps::AddElemD(compiler, varDeltaSet, varDsc->lvVarIndex);
if (ForCodeGen)
{
if (isBorn && varDsc->lvIsRegCandidate() && tree->gtHasReg(compiler))
{
compiler->codeGen->genUpdateVarReg(varDsc, tree);
}
bool isInReg = varDsc->lvIsInReg() && tree->GetRegNum() != REG_NA;
bool isInMemory = !isInReg || varDsc->IsAlwaysAliveInMemory();
if (isInReg)
{
compiler->codeGen->genUpdateRegLife(varDsc, isBorn, isDying DEBUGARG(tree));
}
if (isInMemory)
{
VarSetOps::AddElemD(compiler, stackVarDeltaSet, varDsc->lvVarIndex);
}
}
}
else if (ForCodeGen && lclVarTree->IsMultiRegLclVar())
{
assert(varDsc->lvPromoted && compiler->lvaEnregMultiRegVars);
unsigned firstFieldVarNum = varDsc->lvFieldLclStart;
for (unsigned i = 0; i < varDsc->lvFieldCnt; ++i)
{
bool fieldIsSpilled = spill && ((lclVarTree->GetRegSpillFlagByIdx(i) & GTF_SPILL) != 0);
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(firstFieldVarNum + i);
noway_assert(fldVarDsc->lvIsStructField);
assert(fldVarDsc->lvTracked);
unsigned fldVarIndex = fldVarDsc->lvVarIndex;
regNumber reg = lclVarTree->AsLclVar()->GetRegNumByIdx(i);
bool isInReg = fldVarDsc->lvIsInReg() && reg != REG_NA;
bool isInMemory = !isInReg || fldVarDsc->IsAlwaysAliveInMemory();
bool isFieldDying = lclVarTree->AsLclVar()->IsLastUse(i);
if ((isBorn && !isFieldDying) || (!isBorn && isFieldDying))
{
VarSetOps::AddElemD(compiler, varDeltaSet, fldVarIndex);
if (isInMemory)
{
VarSetOps::AddElemD(compiler, stackVarDeltaSet, fldVarIndex);
}
}
if (isInReg)
{
if (isBorn)
{
compiler->codeGen->genUpdateVarReg(fldVarDsc, tree, i);
}
compiler->codeGen->genUpdateRegLife(fldVarDsc, isBorn, isFieldDying DEBUGARG(tree));
// If this was marked for spill, genProduceReg should already have spilled it.
assert(!fieldIsSpilled);
}
}
spill = false;
}
else if (varDsc->lvPromoted)
{
// If hasDeadTrackedFieldVars is true, then, for a LDOBJ(ADDR(<promoted struct local>)),
// *deadTrackedFieldVars indicates which tracked field vars are dying.
bool hasDeadTrackedFieldVars = false;
if (indirAddrLocal != nullptr && isDying)
{
assert(!isBorn); // GTF_VAR_DEATH only set for LDOBJ last use.
VARSET_TP* deadTrackedFieldVars = nullptr;
hasDeadTrackedFieldVars =
compiler->LookupPromotedStructDeathVars(indirAddrLocal, &deadTrackedFieldVars);
if (hasDeadTrackedFieldVars)
{
VarSetOps::Assign(compiler, varDeltaSet, *deadTrackedFieldVars);
}
}
unsigned firstFieldVarNum = varDsc->lvFieldLclStart;
for (unsigned i = 0; i < varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fldVarDsc = compiler->lvaGetDesc(firstFieldVarNum + i);
noway_assert(fldVarDsc->lvIsStructField);
if (fldVarDsc->lvTracked)
{
unsigned fldVarIndex = fldVarDsc->lvVarIndex;
// We should never see enregistered fields in a struct local unless
// IsMultiRegLclVar() returns true, in which case we've handled this above.
assert(!fldVarDsc->lvIsInReg());
noway_assert(fldVarIndex < compiler->lvaTrackedCount);
if (!hasDeadTrackedFieldVars)
{
VarSetOps::AddElemD(compiler, varDeltaSet, fldVarIndex);
if (ForCodeGen)
{
// We repeat this call here and below to avoid the VarSetOps::IsMember
// test in this, the common case, where we have no deadTrackedFieldVars.
VarSetOps::AddElemD(compiler, stackVarDeltaSet, fldVarIndex);
}
}
else if (ForCodeGen && VarSetOps::IsMember(compiler, varDeltaSet, fldVarIndex))
{
VarSetOps::AddElemD(compiler, stackVarDeltaSet, fldVarIndex);
}
}
}
}
// First, update the live set
if (isDying)
{
// We'd like to be able to assert the following, however if we are walking
// through a qmark/colon tree, we may encounter multiple last-use nodes.
// assert (VarSetOps::IsSubset(compiler, regVarDeltaSet, newLife));
VarSetOps::DiffD(compiler, newLife, varDeltaSet);
}
else
{
// This shouldn't be in newLife, unless this is debug code, in which
// case we keep vars live everywhere, OR the variable is address-exposed,
// OR this block is part of a try block, in which case it may be live at the handler
// Could add a check that, if it's in newLife, that it's also in
// fgGetHandlerLiveVars(compCurBB), but seems excessive
//
// For a dead store, it can be the case that we set both isBorn and isDying to true.
// (We don't eliminate dead stores under MinOpts, so we can't assume they're always
// eliminated.) If it's both, we handled it above.
VarSetOps::UnionD(compiler, newLife, varDeltaSet);
}
}
if (!VarSetOps::Equal(compiler, compiler->compCurLife, newLife))
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tLive vars: ");
dumpConvertedVarSet(compiler, compiler->compCurLife);
printf(" => ");
dumpConvertedVarSet(compiler, newLife);
printf("\n");
}
#endif // DEBUG
VarSetOps::Assign(compiler, compiler->compCurLife, newLife);
if (ForCodeGen)
{
// Only add vars to the gcInfo.gcVarPtrSetCur if they are currently on stack, since the
// gcInfo.gcTrkStkPtrLcls
// includes all TRACKED vars that EVER live on the stack (i.e. are not always in a register).
VarSetOps::Assign(compiler, gcTrkStkDeltaSet, compiler->codeGen->gcInfo.gcTrkStkPtrLcls);
VarSetOps::IntersectionD(compiler, gcTrkStkDeltaSet, stackVarDeltaSet);
if (!VarSetOps::IsEmpty(compiler, gcTrkStkDeltaSet))
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tGCvars: ");
dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
printf(" => ");
}
#endif // DEBUG
if (isBorn)
{
VarSetOps::UnionD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, gcTrkStkDeltaSet);
}
else
{
VarSetOps::DiffD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, gcTrkStkDeltaSet);
}
#ifdef DEBUG
if (compiler->verbose)
{
dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
printf("\n");
}
#endif // DEBUG
}
#ifdef USING_VARIABLE_LIVE_RANGE
// For each of the LclVarDsc that are reporting change, variable or fields
compiler->codeGen->getVariableLiveKeeper()->siStartOrCloseVariableLiveRanges(varDeltaSet, isBorn, isDying);
#endif // USING_VARIABLE_LIVE_RANGE
#ifdef USING_SCOPE_INFO
compiler->codeGen->siUpdate();
#endif // USING_SCOPE_INFO
}
}
if (ForCodeGen && spill)
{
assert(!varDsc->lvPromoted);
compiler->codeGen->genSpillVar(tree);
if (VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcTrkStkPtrLcls, varDsc->lvVarIndex))
{
if (!VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
{
VarSetOps::AddElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tVar V%02u becoming live\n", compiler->lvaGetLclNum(varDsc));
}
#endif // DEBUG
}
}
}
}
//------------------------------------------------------------------------
// UpdateLife: Determine whether the tree affects liveness, and update liveness sets accordingly.
//
// Arguments:
// tree - the tree which effect on liveness is processed.
//
template <bool ForCodeGen>
void TreeLifeUpdater<ForCodeGen>::UpdateLife(GenTree* tree)
{
assert(compiler->GetCurLVEpoch() == epoch);
// TODO-Cleanup: We shouldn't really be calling this more than once
if (tree == compiler->compCurLifeTree)
{
return;
}
if (!tree->OperIsNonPhiLocal() && compiler->fgIsIndirOfAddrOfLocal(tree) == nullptr)
{
return;
}
UpdateLifeVar(tree);
}
template class TreeLifeUpdater<true>;
template class TreeLifeUpdater<false>;
| 1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/nativeaot/Runtime/thread.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "forward_declarations.h"
struct gc_alloc_context;
class RuntimeInstance;
class ThreadStore;
class CLREventStatic;
class Thread;
// The offsets of some fields in the thread (in particular, m_pTransitionFrame) are known to the compiler and get
// inlined into the code. Let's make sure they don't change just because we enable/disable server GC in a particular
// runtime build.
#define KEEP_THREAD_LAYOUT_CONSTANT
#ifndef HOST_64BIT
# if defined(FEATURE_SVR_GC) || defined(KEEP_THREAD_LAYOUT_CONSTANT)
# define SIZEOF_ALLOC_CONTEXT 40
# else
# define SIZEOF_ALLOC_CONTEXT 28
# endif
#else // HOST_64BIT
# if defined(FEATURE_SVR_GC) || defined(KEEP_THREAD_LAYOUT_CONSTANT)
# define SIZEOF_ALLOC_CONTEXT 56
# else
# define SIZEOF_ALLOC_CONTEXT 40
# endif
#endif // HOST_64BIT
#define TOP_OF_STACK_MARKER ((PTR_VOID)(uintptr_t)(intptr_t)-1)
#define DYNAMIC_TYPE_TLS_OFFSET_FLAG 0x80000000
enum SyncRequestResult
{
TryAgain,
SuccessUnmanaged,
SuccessManaged,
};
typedef DPTR(PAL_LIMITED_CONTEXT) PTR_PAL_LIMITED_CONTEXT;
struct ExInfo;
typedef DPTR(ExInfo) PTR_ExInfo;
// Also defined in ExceptionHandling.cs, layouts must match.
// When adding new fields to this struct, ensure they get properly initialized in the exception handling
// assembly stubs
struct ExInfo
{
PTR_ExInfo m_pPrevExInfo;
PTR_PAL_LIMITED_CONTEXT m_pExContext;
PTR_Object m_exception; // actual object reference, specially reported by GcScanRootsWorker
ExKind m_kind;
uint8_t m_passNumber;
uint32_t m_idxCurClause;
StackFrameIterator m_frameIter;
volatile void* m_notifyDebuggerSP;
};
struct ThreadBuffer
{
uint8_t m_rgbAllocContextBuffer[SIZEOF_ALLOC_CONTEXT];
uint32_t volatile m_ThreadStateFlags; // see Thread::ThreadStateFlags enum
#if DACCESS_COMPILE
PTR_VOID m_pTransitionFrame;
#else
PTR_VOID volatile m_pTransitionFrame;
#endif
PTR_VOID m_pHackPInvokeTunnel; // see Thread::EnablePreemptiveMode
PTR_VOID m_pCachedTransitionFrame;
PTR_Thread m_pNext; // used by ThreadStore's SList<Thread>
HANDLE m_hPalThread; // WARNING: this may legitimately be INVALID_HANDLE_VALUE
void ** m_ppvHijackedReturnAddressLocation;
void * m_pvHijackedReturnAddress;
#ifdef HOST_64BIT
uintptr_t m_uHijackedReturnValueFlags; // used on ARM64 only; however, ARM64 and AMD64 share field offsets
#endif // HOST_64BIT
PTR_ExInfo m_pExInfoStackHead;
Object* m_threadAbortException; // ThreadAbortException instance -set only during thread abort
PTR_PTR_VOID m_pThreadLocalModuleStatics;
uint32_t m_numThreadLocalModuleStatics;
PTR_VOID m_pStackLow;
PTR_VOID m_pStackHigh;
PTR_UInt8 m_pTEB; // Pointer to OS TEB structure for this thread
uint64_t m_uPalThreadIdForLogging; // @TODO: likely debug-only
EEThreadId m_threadId;
PTR_VOID m_pThreadStressLog; // pointer to head of thread's StressLogChunks
uint32_t m_cantAlloc;
#ifdef FEATURE_GC_STRESS
uint32_t m_uRand; // current per-thread random number
#endif // FEATURE_GC_STRESS
// Thread Statics Storage for dynamic types
uint32_t m_numDynamicTypesTlsCells;
PTR_PTR_UInt8 m_pDynamicTypesTlsCells;
};
struct ReversePInvokeFrame
{
void* m_savedPInvokeTransitionFrame;
Thread* m_savedThread;
};
class Thread : private ThreadBuffer
{
friend class AsmOffsets;
friend struct DefaultSListTraits<Thread>;
friend class ThreadStore;
IN_DAC(friend class ClrDataAccess;)
public:
enum ThreadStateFlags
{
TSF_Unknown = 0x00000000, // Threads are created in this state
TSF_Attached = 0x00000001, // Thread was inited by first U->M transition on this thread
TSF_Detached = 0x00000002, // Thread was detached by DllMain
TSF_SuppressGcStress = 0x00000008, // Do not allow gc stress on this thread, used in DllMain
// ...and on the Finalizer thread
TSF_DoNotTriggerGc = 0x00000010, // Do not allow hijacking of this thread, also intended to
// ...be checked during allocations in debug builds.
TSF_IsGcSpecialThread = 0x00000020, // Set to indicate a GC worker thread used for background GC
#ifdef FEATURE_GC_STRESS
TSF_IsRandSeedSet = 0x00000040, // set to indicate the random number generator for GCStress was inited
#endif // FEATURE_GC_STRESS
};
private:
void Construct();
void SetState(ThreadStateFlags flags);
void ClearState(ThreadStateFlags flags);
bool IsStateSet(ThreadStateFlags flags);
static UInt32_BOOL HijackCallback(HANDLE hThread, PAL_LIMITED_CONTEXT* pThreadContext, void* pCallbackContext);
bool InternalHijack(PAL_LIMITED_CONTEXT * pSuspendCtx, void * pvHijackTargets[]);
bool CacheTransitionFrameForSuspend();
void ResetCachedTransitionFrame();
void CrossThreadUnhijack();
void UnhijackWorker();
void EnsureRuntimeInitialized();
#ifdef _DEBUG
bool DebugIsSuspended();
#endif
//
// SyncState members
//
PTR_VOID GetTransitionFrame();
void GcScanRootsWorker(void * pfnEnumCallback, void * pvCallbackData, StackFrameIterator & sfIter);
public:
void Detach(); // First phase of thread destructor, executed with thread store lock taken
void Destroy(); // Second phase of thread destructor, executed without thread store lock taken
bool IsInitialized();
gc_alloc_context * GetAllocContext(); // @TODO: I would prefer to not expose this in this way
#ifndef DACCESS_COMPILE
uint64_t GetPalThreadIdForLogging();
bool IsCurrentThread();
void GcScanRoots(void * pfnEnumCallback, void * pvCallbackData);
#else
typedef void GcScanRootsCallbackFunc(PTR_RtuObjectRef ppObject, void* token, uint32_t flags);
bool GcScanRoots(GcScanRootsCallbackFunc * pfnCallback, void * token, PTR_PAL_LIMITED_CONTEXT pInitialContext);
#endif
bool Hijack();
void Unhijack();
#ifdef FEATURE_GC_STRESS
static void HijackForGcStress(PAL_LIMITED_CONTEXT * pSuspendCtx);
#endif // FEATURE_GC_STRESS
bool IsHijacked();
void * GetHijackedReturnAddress();
void * GetUnhijackedReturnAddress(void** ppvReturnAddressLocation);
bool DangerousCrossThreadIsHijacked();
bool IsSuppressGcStressSet();
void SetSuppressGcStress();
void ClearSuppressGcStress();
bool IsWithinStackBounds(PTR_VOID p);
void GetStackBounds(PTR_VOID * ppStackLow, PTR_VOID * ppStackHigh);
PTR_UInt8 AllocateThreadLocalStorageForDynamicType(uint32_t uTlsTypeOffset, uint32_t tlsStorageSize, uint32_t numTlsCells);
PTR_UInt8 GetThreadLocalStorageForDynamicType(uint32_t uTlsTypeOffset);
PTR_UInt8 GetThreadLocalStorage(uint32_t uTlsIndex, uint32_t uTlsStartOffset);
void PushExInfo(ExInfo * pExInfo);
void ValidateExInfoPop(ExInfo * pExInfo, void * limitSP);
void ValidateExInfoStack();
bool IsDoNotTriggerGcSet();
void SetDoNotTriggerGc();
void ClearDoNotTriggerGc();
bool IsDetached();
void SetDetached();
PTR_VOID GetThreadStressLog() const;
#ifndef DACCESS_COMPILE
void SetThreadStressLog(void * ptsl);
#endif // DACCESS_COMPILE
void EnterCantAllocRegion();
void LeaveCantAllocRegion();
bool IsInCantAllocStressLogRegion();
#ifdef FEATURE_GC_STRESS
void SetRandomSeed(uint32_t seed);
uint32_t NextRand();
bool IsRandInited();
#endif // FEATURE_GC_STRESS
PTR_ExInfo GetCurExInfo();
bool IsCurrentThreadInCooperativeMode();
PTR_VOID GetTransitionFrameForStackTrace();
void * GetCurrentThreadPInvokeReturnAddress();
static bool IsHijackTarget(void * address);
//
// The set of operations used to support unmanaged code running in cooperative mode
//
void EnablePreemptiveMode();
void DisablePreemptiveMode();
// Set the m_pHackPInvokeTunnel field for GC allocation helpers that setup transition frame
// in assembly code. Do not use anywhere else.
void SetCurrentThreadPInvokeTunnelForGcAlloc(void * pTransitionFrame);
// Setup the m_pHackPInvokeTunnel field for GC helpers entered via regular PInvoke.
// Do not use anywhere else.
void SetupHackPInvokeTunnel();
//
// GC support APIs - do not use except from GC itself
//
void SetGCSpecial(bool isGCSpecial);
bool IsGCSpecial();
bool CatchAtSafePoint();
//
// Managed/unmanaged interop transitions support APIs
//
void WaitForSuspend();
void WaitForGC(void * pTransitionFrame);
void ReversePInvokeAttachOrTrapThread(ReversePInvokeFrame * pFrame);
bool InlineTryFastReversePInvoke(ReversePInvokeFrame * pFrame);
void InlineReversePInvokeReturn(ReversePInvokeFrame * pFrame);
void InlinePInvoke(PInvokeTransitionFrame * pFrame);
void InlinePInvokeReturn(PInvokeTransitionFrame * pFrame);
Object * GetThreadAbortException();
void SetThreadAbortException(Object *exception);
Object* GetThreadStaticStorageForModule(uint32_t moduleIndex);
bool SetThreadStaticStorageForModule(Object* pStorage, uint32_t moduleIndex);
};
#ifndef __GCENV_BASE_INCLUDED__
typedef DPTR(Object) PTR_Object;
typedef DPTR(PTR_Object) PTR_PTR_Object;
#endif // !__GCENV_BASE_INCLUDED__
#ifdef DACCESS_COMPILE
// The DAC uses DebuggerEnumGcRefContext in place of a GCCONTEXT when doing reference
// enumeration. The GC passes through additional data in the ScanContext which the debugger
// neither has nor needs. While we could refactor the GC code to make an interface
// with less coupling, that might affect perf or make integration messier. Instead
// we use some typedefs so DAC and runtime can get strong yet distinct types.
// Ideally we wouldn't need this wrapper, but PromoteCarefully needs access to the
// thread and a promotion field. We aren't assuming the user's token will have this data.
struct DacScanCallbackData
{
Thread* thread_under_crawl; // the thread being scanned
bool promotion; // are we emulating the GC promote phase or relocate phase?
// different references are reported for each
void* token; // the callback data passed to GCScanRoots
void* pfnUserCallback; // the callback passed in to GcScanRoots
uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
};
typedef DacScanCallbackData EnumGcRefScanContext;
typedef void EnumGcRefCallbackFunc(PTR_PTR_Object, EnumGcRefScanContext* callbackData, uint32_t flags);
#else // DACCESS_COMPILE
#ifndef __GCENV_BASE_INCLUDED__
struct ScanContext;
typedef void promote_func(PTR_PTR_Object, ScanContext*, unsigned);
#endif // !__GCENV_BASE_INCLUDED__
typedef promote_func EnumGcRefCallbackFunc;
typedef ScanContext EnumGcRefScanContext;
#endif // DACCESS_COMPILE
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "forward_declarations.h"
struct gc_alloc_context;
class RuntimeInstance;
class ThreadStore;
class CLREventStatic;
class Thread;
// The offsets of some fields in the thread (in particular, m_pTransitionFrame) are known to the compiler and get
// inlined into the code. Let's make sure they don't change just because we enable/disable server GC in a particular
// runtime build.
#define KEEP_THREAD_LAYOUT_CONSTANT
#ifndef HOST_64BIT
# if defined(FEATURE_SVR_GC) || defined(KEEP_THREAD_LAYOUT_CONSTANT)
# define SIZEOF_ALLOC_CONTEXT 40
# else
# define SIZEOF_ALLOC_CONTEXT 28
# endif
#else // HOST_64BIT
# if defined(FEATURE_SVR_GC) || defined(KEEP_THREAD_LAYOUT_CONSTANT)
# define SIZEOF_ALLOC_CONTEXT 56
# else
# define SIZEOF_ALLOC_CONTEXT 40
# endif
#endif // HOST_64BIT
#define TOP_OF_STACK_MARKER ((PTR_VOID)(uintptr_t)(intptr_t)-1)
#define DYNAMIC_TYPE_TLS_OFFSET_FLAG 0x80000000
enum SyncRequestResult
{
TryAgain,
SuccessUnmanaged,
SuccessManaged,
};
typedef DPTR(PAL_LIMITED_CONTEXT) PTR_PAL_LIMITED_CONTEXT;
struct ExInfo;
typedef DPTR(ExInfo) PTR_ExInfo;
// Also defined in ExceptionHandling.cs, layouts must match.
// When adding new fields to this struct, ensure they get properly initialized in the exception handling
// assembly stubs
struct ExInfo
{
PTR_ExInfo m_pPrevExInfo;
PTR_PAL_LIMITED_CONTEXT m_pExContext;
PTR_Object m_exception; // actual object reference, specially reported by GcScanRootsWorker
ExKind m_kind;
uint8_t m_passNumber;
uint32_t m_idxCurClause;
StackFrameIterator m_frameIter;
volatile void* m_notifyDebuggerSP;
};
struct ThreadBuffer
{
uint8_t m_rgbAllocContextBuffer[SIZEOF_ALLOC_CONTEXT];
uint32_t volatile m_ThreadStateFlags; // see Thread::ThreadStateFlags enum
#if DACCESS_COMPILE
PTR_VOID m_pTransitionFrame;
#else
PTR_VOID volatile m_pTransitionFrame;
#endif
PTR_VOID m_pHackPInvokeTunnel; // see Thread::EnablePreemptiveMode
PTR_VOID m_pCachedTransitionFrame;
PTR_Thread m_pNext; // used by ThreadStore's SList<Thread>
HANDLE m_hPalThread; // WARNING: this may legitimately be INVALID_HANDLE_VALUE
void ** m_ppvHijackedReturnAddressLocation;
void * m_pvHijackedReturnAddress;
#ifdef HOST_64BIT
uintptr_t m_uHijackedReturnValueFlags; // used on ARM64 only; however, ARM64 and AMD64 share field offsets
#endif // HOST_64BIT
PTR_ExInfo m_pExInfoStackHead;
Object* m_threadAbortException; // ThreadAbortException instance -set only during thread abort
PTR_PTR_VOID m_pThreadLocalModuleStatics;
uint32_t m_numThreadLocalModuleStatics;
PTR_VOID m_pStackLow;
PTR_VOID m_pStackHigh;
PTR_UInt8 m_pTEB; // Pointer to OS TEB structure for this thread
uint64_t m_uPalThreadIdForLogging; // @TODO: likely debug-only
EEThreadId m_threadId;
PTR_VOID m_pThreadStressLog; // pointer to head of thread's StressLogChunks
uint32_t m_cantAlloc;
#ifdef FEATURE_GC_STRESS
uint32_t m_uRand; // current per-thread random number
#endif // FEATURE_GC_STRESS
// Thread Statics Storage for dynamic types
uint32_t m_numDynamicTypesTlsCells;
PTR_PTR_UInt8 m_pDynamicTypesTlsCells;
};
struct ReversePInvokeFrame
{
void* m_savedPInvokeTransitionFrame;
Thread* m_savedThread;
};
class Thread : private ThreadBuffer
{
friend class AsmOffsets;
friend struct DefaultSListTraits<Thread>;
friend class ThreadStore;
IN_DAC(friend class ClrDataAccess;)
public:
enum ThreadStateFlags
{
TSF_Unknown = 0x00000000, // Threads are created in this state
TSF_Attached = 0x00000001, // Thread was inited by first U->M transition on this thread
TSF_Detached = 0x00000002, // Thread was detached by DllMain
TSF_SuppressGcStress = 0x00000008, // Do not allow gc stress on this thread, used in DllMain
// ...and on the Finalizer thread
TSF_DoNotTriggerGc = 0x00000010, // Do not allow hijacking of this thread, also intended to
// ...be checked during allocations in debug builds.
TSF_IsGcSpecialThread = 0x00000020, // Set to indicate a GC worker thread used for background GC
#ifdef FEATURE_GC_STRESS
TSF_IsRandSeedSet = 0x00000040, // set to indicate the random number generator for GCStress was inited
#endif // FEATURE_GC_STRESS
};
private:
void Construct();
void SetState(ThreadStateFlags flags);
void ClearState(ThreadStateFlags flags);
bool IsStateSet(ThreadStateFlags flags);
static UInt32_BOOL HijackCallback(HANDLE hThread, PAL_LIMITED_CONTEXT* pThreadContext, void* pCallbackContext);
bool InternalHijack(PAL_LIMITED_CONTEXT * pSuspendCtx, void * pvHijackTargets[]);
bool CacheTransitionFrameForSuspend();
void ResetCachedTransitionFrame();
void CrossThreadUnhijack();
void UnhijackWorker();
void EnsureRuntimeInitialized();
#ifdef _DEBUG
bool DebugIsSuspended();
#endif
//
// SyncState members
//
PTR_VOID GetTransitionFrame();
void GcScanRootsWorker(void * pfnEnumCallback, void * pvCallbackData, StackFrameIterator & sfIter);
public:
void Detach(); // First phase of thread destructor, executed with thread store lock taken
void Destroy(); // Second phase of thread destructor, executed without thread store lock taken
bool IsInitialized();
gc_alloc_context * GetAllocContext(); // @TODO: I would prefer to not expose this in this way
#ifndef DACCESS_COMPILE
uint64_t GetPalThreadIdForLogging();
bool IsCurrentThread();
void GcScanRoots(void * pfnEnumCallback, void * pvCallbackData);
#else
typedef void GcScanRootsCallbackFunc(PTR_RtuObjectRef ppObject, void* token, uint32_t flags);
bool GcScanRoots(GcScanRootsCallbackFunc * pfnCallback, void * token, PTR_PAL_LIMITED_CONTEXT pInitialContext);
#endif
bool Hijack();
void Unhijack();
#ifdef FEATURE_GC_STRESS
static void HijackForGcStress(PAL_LIMITED_CONTEXT * pSuspendCtx);
#endif // FEATURE_GC_STRESS
bool IsHijacked();
void * GetHijackedReturnAddress();
void * GetUnhijackedReturnAddress(void** ppvReturnAddressLocation);
bool DangerousCrossThreadIsHijacked();
bool IsSuppressGcStressSet();
void SetSuppressGcStress();
void ClearSuppressGcStress();
bool IsWithinStackBounds(PTR_VOID p);
void GetStackBounds(PTR_VOID * ppStackLow, PTR_VOID * ppStackHigh);
PTR_UInt8 AllocateThreadLocalStorageForDynamicType(uint32_t uTlsTypeOffset, uint32_t tlsStorageSize, uint32_t numTlsCells);
PTR_UInt8 GetThreadLocalStorageForDynamicType(uint32_t uTlsTypeOffset);
PTR_UInt8 GetThreadLocalStorage(uint32_t uTlsIndex, uint32_t uTlsStartOffset);
void PushExInfo(ExInfo * pExInfo);
void ValidateExInfoPop(ExInfo * pExInfo, void * limitSP);
void ValidateExInfoStack();
bool IsDoNotTriggerGcSet();
void SetDoNotTriggerGc();
void ClearDoNotTriggerGc();
bool IsDetached();
void SetDetached();
PTR_VOID GetThreadStressLog() const;
#ifndef DACCESS_COMPILE
void SetThreadStressLog(void * ptsl);
#endif // DACCESS_COMPILE
void EnterCantAllocRegion();
void LeaveCantAllocRegion();
bool IsInCantAllocStressLogRegion();
#ifdef FEATURE_GC_STRESS
void SetRandomSeed(uint32_t seed);
uint32_t NextRand();
bool IsRandInited();
#endif // FEATURE_GC_STRESS
PTR_ExInfo GetCurExInfo();
bool IsCurrentThreadInCooperativeMode();
PTR_VOID GetTransitionFrameForStackTrace();
void * GetCurrentThreadPInvokeReturnAddress();
static bool IsHijackTarget(void * address);
//
// The set of operations used to support unmanaged code running in cooperative mode
//
void EnablePreemptiveMode();
void DisablePreemptiveMode();
// Set the m_pHackPInvokeTunnel field for GC allocation helpers that setup transition frame
// in assembly code. Do not use anywhere else.
void SetCurrentThreadPInvokeTunnelForGcAlloc(void * pTransitionFrame);
// Setup the m_pHackPInvokeTunnel field for GC helpers entered via regular PInvoke.
// Do not use anywhere else.
void SetupHackPInvokeTunnel();
//
// GC support APIs - do not use except from GC itself
//
void SetGCSpecial(bool isGCSpecial);
bool IsGCSpecial();
bool CatchAtSafePoint();
//
// Managed/unmanaged interop transitions support APIs
//
void WaitForSuspend();
void WaitForGC(void * pTransitionFrame);
void ReversePInvokeAttachOrTrapThread(ReversePInvokeFrame * pFrame);
bool InlineTryFastReversePInvoke(ReversePInvokeFrame * pFrame);
void InlineReversePInvokeReturn(ReversePInvokeFrame * pFrame);
void InlinePInvoke(PInvokeTransitionFrame * pFrame);
void InlinePInvokeReturn(PInvokeTransitionFrame * pFrame);
Object * GetThreadAbortException();
void SetThreadAbortException(Object *exception);
Object* GetThreadStaticStorageForModule(uint32_t moduleIndex);
bool SetThreadStaticStorageForModule(Object* pStorage, uint32_t moduleIndex);
};
#ifndef __GCENV_BASE_INCLUDED__
typedef DPTR(Object) PTR_Object;
typedef DPTR(PTR_Object) PTR_PTR_Object;
#endif // !__GCENV_BASE_INCLUDED__
#ifdef DACCESS_COMPILE
// The DAC uses DebuggerEnumGcRefContext in place of a GCCONTEXT when doing reference
// enumeration. The GC passes through additional data in the ScanContext which the debugger
// neither has nor needs. While we could refactor the GC code to make an interface
// with less coupling, that might affect perf or make integration messier. Instead
// we use some typedefs so DAC and runtime can get strong yet distinct types.
// Ideally we wouldn't need this wrapper, but PromoteCarefully needs access to the
// thread and a promotion field. We aren't assuming the user's token will have this data.
struct DacScanCallbackData
{
Thread* thread_under_crawl; // the thread being scanned
bool promotion; // are we emulating the GC promote phase or relocate phase?
// different references are reported for each
void* token; // the callback data passed to GCScanRoots
void* pfnUserCallback; // the callback passed in to GcScanRoots
uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
};
typedef DacScanCallbackData EnumGcRefScanContext;
typedef void EnumGcRefCallbackFunc(PTR_PTR_Object, EnumGcRefScanContext* callbackData, uint32_t flags);
#else // DACCESS_COMPILE
#ifndef __GCENV_BASE_INCLUDED__
struct ScanContext;
typedef void promote_func(PTR_PTR_Object, ScanContext*, unsigned);
#endif // !__GCENV_BASE_INCLUDED__
typedef promote_func EnumGcRefCallbackFunc;
typedef ScanContext EnumGcRefScanContext;
#endif // DACCESS_COMPILE
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/ildasm/ildasmpch.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This is just to build the PCH for ildasm
#include "ildasmpch.h"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This is just to build the PCH for ildasm
#include "ildasmpch.h"
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/mono/dlls/mscordbi/cordb-function.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CORDB-FUNCTION.H
//
#ifndef __MONO_DEBUGGER_CORDB_FUNCTION_H__
#define __MONO_DEBUGGER_CORDB_FUNCTION_H__
#include <cordb-assembly.h>
#include <cordb.h>
class CordbFunction : public CordbBaseMono,
public ICorDebugFunction,
public ICorDebugFunction2,
public ICorDebugFunction3,
public ICorDebugFunction4
{
int m_debuggerId;
mdToken m_metadataToken;
CordbCode* m_pCode;
CordbModule* m_pModule;
public:
CordbFunction(Connection* conn, mdToken token, int id, CordbModule* module);
ULONG STDMETHODCALLTYPE AddRef(void)
{
return (BaseAddRef());
}
ULONG STDMETHODCALLTYPE Release(void)
{
return (BaseRelease());
}
const char* GetClassName()
{
return "CordbFunction";
}
~CordbFunction();
int GetDebuggerId() const
{
return m_debuggerId;
}
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID id, _COM_Outptr_ void __RPC_FAR* __RPC_FAR* pInterface);
HRESULT STDMETHODCALLTYPE GetModule(ICorDebugModule** ppModule);
HRESULT STDMETHODCALLTYPE GetClass(ICorDebugClass** ppClass);
HRESULT STDMETHODCALLTYPE GetToken(mdMethodDef* pMethodDef);
HRESULT STDMETHODCALLTYPE GetILCode(ICorDebugCode** ppCode);
HRESULT STDMETHODCALLTYPE GetNativeCode(ICorDebugCode** ppCode);
HRESULT STDMETHODCALLTYPE CreateBreakpoint(ICorDebugFunctionBreakpoint** ppBreakpoint);
HRESULT STDMETHODCALLTYPE GetLocalVarSigToken(mdSignature* pmdSig);
HRESULT STDMETHODCALLTYPE GetCurrentVersionNumber(ULONG32* pnCurrentVersion);
HRESULT STDMETHODCALLTYPE SetJMCStatus(BOOL bIsJustMyCode);
HRESULT STDMETHODCALLTYPE GetJMCStatus(BOOL* pbIsJustMyCode);
HRESULT STDMETHODCALLTYPE EnumerateNativeCode(ICorDebugCodeEnum** ppCodeEnum);
HRESULT STDMETHODCALLTYPE GetVersionNumber(ULONG32* pnVersion);
HRESULT STDMETHODCALLTYPE GetActiveReJitRequestILCode(ICorDebugILCode** ppReJitedILCode);
HRESULT STDMETHODCALLTYPE CreateNativeBreakpoint(ICorDebugFunctionBreakpoint** ppBreakpoint);
mdToken GetMetadataToken() const
{
return m_metadataToken;
}
};
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CORDB-FUNCTION.H
//
#ifndef __MONO_DEBUGGER_CORDB_FUNCTION_H__
#define __MONO_DEBUGGER_CORDB_FUNCTION_H__
#include <cordb-assembly.h>
#include <cordb.h>
class CordbFunction : public CordbBaseMono,
public ICorDebugFunction,
public ICorDebugFunction2,
public ICorDebugFunction3,
public ICorDebugFunction4
{
int m_debuggerId;
mdToken m_metadataToken;
CordbCode* m_pCode;
CordbModule* m_pModule;
public:
CordbFunction(Connection* conn, mdToken token, int id, CordbModule* module);
ULONG STDMETHODCALLTYPE AddRef(void)
{
return (BaseAddRef());
}
ULONG STDMETHODCALLTYPE Release(void)
{
return (BaseRelease());
}
const char* GetClassName()
{
return "CordbFunction";
}
~CordbFunction();
int GetDebuggerId() const
{
return m_debuggerId;
}
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID id, _COM_Outptr_ void __RPC_FAR* __RPC_FAR* pInterface);
HRESULT STDMETHODCALLTYPE GetModule(ICorDebugModule** ppModule);
HRESULT STDMETHODCALLTYPE GetClass(ICorDebugClass** ppClass);
HRESULT STDMETHODCALLTYPE GetToken(mdMethodDef* pMethodDef);
HRESULT STDMETHODCALLTYPE GetILCode(ICorDebugCode** ppCode);
HRESULT STDMETHODCALLTYPE GetNativeCode(ICorDebugCode** ppCode);
HRESULT STDMETHODCALLTYPE CreateBreakpoint(ICorDebugFunctionBreakpoint** ppBreakpoint);
HRESULT STDMETHODCALLTYPE GetLocalVarSigToken(mdSignature* pmdSig);
HRESULT STDMETHODCALLTYPE GetCurrentVersionNumber(ULONG32* pnCurrentVersion);
HRESULT STDMETHODCALLTYPE SetJMCStatus(BOOL bIsJustMyCode);
HRESULT STDMETHODCALLTYPE GetJMCStatus(BOOL* pbIsJustMyCode);
HRESULT STDMETHODCALLTYPE EnumerateNativeCode(ICorDebugCodeEnum** ppCodeEnum);
HRESULT STDMETHODCALLTYPE GetVersionNumber(ULONG32* pnVersion);
HRESULT STDMETHODCALLTYPE GetActiveReJitRequestILCode(ICorDebugILCode** ppReJitedILCode);
HRESULT STDMETHODCALLTYPE CreateNativeBreakpoint(ICorDebugFunctionBreakpoint** ppBreakpoint);
mdToken GetMetadataToken() const
{
return m_metadataToken;
}
};
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/tests/Interop/PInvoke/CriticalHandles/CriticalHandlesNative.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <xplatform.h>
typedef BOOL(__stdcall *HandleCallback)(void*);
extern "C" DLL_EXPORT size_t __stdcall In(void* handle, HandleCallback handleCallback)
{
if (handleCallback != nullptr && !handleCallback(handle))
{
return (size_t)(-1);
}
return reinterpret_cast<size_t>(handle);
}
extern "C" DLL_EXPORT void* __stdcall Ret(void* handleValue)
{
return handleValue;
}
extern "C" DLL_EXPORT void __stdcall Out(void* handleValue, void** pHandle)
{
if (pHandle == nullptr)
{
return;
}
*pHandle = handleValue;
}
extern "C" DLL_EXPORT size_t __stdcall Ref(void** pHandle, HandleCallback handleCallback)
{
if (handleCallback != nullptr && !handleCallback(*pHandle))
{
return (size_t)(-1);
}
return reinterpret_cast<size_t>(*pHandle);
}
extern "C" DLL_EXPORT size_t __stdcall RefModify(void* handleValue, void** pHandle, HandleCallback handleCallback)
{
if (handleCallback != nullptr && !handleCallback(*pHandle))
{
return (size_t)(-1);
}
void* originalHandle = *pHandle;
*pHandle = handleValue;
return reinterpret_cast<size_t>(originalHandle);
}
typedef void(__stdcall *InCallback)(void*);
extern "C" DLL_EXPORT void __stdcall InvokeInCallback(InCallback callback, void* handle)
{
callback(handle);
}
typedef void(__stdcall *RefCallback)(void**);
extern "C" DLL_EXPORT void __stdcall InvokeRefCallback(RefCallback callback, void** pHandle)
{
callback(pHandle);
}
typedef void*(__stdcall *RetCallback)();
extern "C" DLL_EXPORT void* __stdcall InvokeRetCallback(RetCallback callback)
{
return callback();
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <xplatform.h>
typedef BOOL(__stdcall *HandleCallback)(void*);
extern "C" DLL_EXPORT size_t __stdcall In(void* handle, HandleCallback handleCallback)
{
if (handleCallback != nullptr && !handleCallback(handle))
{
return (size_t)(-1);
}
return reinterpret_cast<size_t>(handle);
}
extern "C" DLL_EXPORT void* __stdcall Ret(void* handleValue)
{
return handleValue;
}
extern "C" DLL_EXPORT void __stdcall Out(void* handleValue, void** pHandle)
{
if (pHandle == nullptr)
{
return;
}
*pHandle = handleValue;
}
extern "C" DLL_EXPORT size_t __stdcall Ref(void** pHandle, HandleCallback handleCallback)
{
if (handleCallback != nullptr && !handleCallback(*pHandle))
{
return (size_t)(-1);
}
return reinterpret_cast<size_t>(*pHandle);
}
extern "C" DLL_EXPORT size_t __stdcall RefModify(void* handleValue, void** pHandle, HandleCallback handleCallback)
{
if (handleCallback != nullptr && !handleCallback(*pHandle))
{
return (size_t)(-1);
}
void* originalHandle = *pHandle;
*pHandle = handleValue;
return reinterpret_cast<size_t>(originalHandle);
}
typedef void(__stdcall *InCallback)(void*);
extern "C" DLL_EXPORT void __stdcall InvokeInCallback(InCallback callback, void* handle)
{
callback(handle);
}
typedef void(__stdcall *RefCallback)(void**);
extern "C" DLL_EXPORT void __stdcall InvokeRefCallback(RefCallback callback, void** pHandle)
{
callback(pHandle);
}
typedef void*(__stdcall *RetCallback)();
extern "C" DLL_EXPORT void* __stdcall InvokeRetCallback(RetCallback callback)
{
return callback();
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/native/corehost/fxr_resolver.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <pal.h>
#include "fxr_resolver.h"
#include <fx_ver.h>
#include <trace.h>
#include <utils.h>
namespace
{
bool get_latest_fxr(pal::string_t fxr_root, pal::string_t* out_fxr_path)
{
trace::info(_X("Reading fx resolver directory=[%s]"), fxr_root.c_str());
std::vector<pal::string_t> list;
pal::readdir_onlydirectories(fxr_root, &list);
fx_ver_t max_ver;
for (const auto& dir : list)
{
trace::info(_X("Considering fxr version=[%s]..."), dir.c_str());
pal::string_t ver = get_filename(dir);
fx_ver_t fx_ver;
if (fx_ver_t::parse(ver, &fx_ver, /* parse_only_production */ false))
{
max_ver = std::max(max_ver, fx_ver);
}
}
if (max_ver == fx_ver_t())
{
trace::error(_X("A fatal error occurred, the folder [%s] does not contain any version-numbered child folders"), fxr_root.c_str());
return false;
}
pal::string_t max_ver_str = max_ver.as_str();
append_path(&fxr_root, max_ver_str.c_str());
trace::info(_X("Detected latest fxr version=[%s]..."), fxr_root.c_str());
if (library_exists_in_dir(fxr_root, LIBFXR_NAME, out_fxr_path))
{
trace::info(_X("Resolved fxr [%s]..."), out_fxr_path->c_str());
return true;
}
trace::error(_X("A fatal error occurred, the required library %s could not be found in [%s]"), LIBFXR_NAME, fxr_root.c_str());
return false;
}
}
bool fxr_resolver::try_get_path(const pal::string_t& root_path, pal::string_t* out_dotnet_root, pal::string_t* out_fxr_path)
{
#if defined(FEATURE_APPHOST) || defined(FEATURE_LIBHOST)
// For apphost and libhost, root_path is expected to be a directory.
// For libhost, it may be empty if app-local search is not desired (e.g. com/ijw/winrt hosts, nethost when no assembly path is specified)
// If a hostfxr exists in root_path, then assume self-contained.
if (root_path.length() > 0 && library_exists_in_dir(root_path, LIBFXR_NAME, out_fxr_path))
{
trace::info(_X("Resolved fxr [%s]..."), out_fxr_path->c_str());
out_dotnet_root->assign(root_path);
return true;
}
// For framework-dependent apps, use DOTNET_ROOT_<ARCH>
pal::string_t default_install_location;
pal::string_t dotnet_root_env_var_name;
if (get_dotnet_root_from_env(&dotnet_root_env_var_name, out_dotnet_root))
{
trace::info(_X("Using environment variable %s=[%s] as runtime location."), dotnet_root_env_var_name.c_str(), out_dotnet_root->c_str());
}
else
{
if (pal::get_dotnet_self_registered_dir(&default_install_location) || pal::get_default_installation_dir(&default_install_location))
{
trace::info(_X("Using global installation location [%s] as runtime location."), default_install_location.c_str());
out_dotnet_root->assign(default_install_location);
}
else
{
trace::error(_X("A fatal error occurred, the default install location cannot be obtained."));
return false;
}
}
pal::string_t fxr_dir = *out_dotnet_root;
append_path(&fxr_dir, _X("host"));
append_path(&fxr_dir, _X("fxr"));
if (!pal::directory_exists(fxr_dir))
{
if (default_install_location.empty())
{
pal::get_dotnet_self_registered_dir(&default_install_location);
}
if (default_install_location.empty())
{
pal::get_default_installation_dir(&default_install_location);
}
pal::string_t self_registered_config_location = pal::get_dotnet_self_registered_config_location();
pal::string_t self_registered_message = _X(" or register the runtime location in [") + self_registered_config_location + _X("]");
trace::error(_X("A fatal error occurred. The required library %s could not be found.\n"
"If this is a self-contained application, that library should exist in [%s].\n"
"If this is a framework-dependent application, install the runtime in the global location [%s] or use the %s environment variable to specify the runtime location%s."),
LIBFXR_NAME,
root_path.c_str(),
default_install_location.c_str(),
dotnet_root_env_var_name.c_str(),
self_registered_message.c_str());
trace::error(_X(""));
trace::error(_X("The .NET runtime can be found at:"));
trace::error(_X(" - %s&apphost_version=%s"), get_download_url().c_str(), _STRINGIFY(COMMON_HOST_PKG_VER));
return false;
}
return get_latest_fxr(std::move(fxr_dir), out_fxr_path);
#else // !FEATURE_APPHOST && !FEATURE_LIBHOST
// For non-apphost and non-libhost (i.e. muxer), root_path is expected to be the full path to the host
pal::string_t host_dir;
host_dir.assign(get_directory(root_path));
out_dotnet_root->assign(host_dir);
return fxr_resolver::try_get_path_from_dotnet_root(*out_dotnet_root, out_fxr_path);
#endif // !FEATURE_APPHOST && !FEATURE_LIBHOST
}
bool fxr_resolver::try_get_path_from_dotnet_root(const pal::string_t& dotnet_root, pal::string_t* out_fxr_path)
{
pal::string_t fxr_dir = dotnet_root;
append_path(&fxr_dir, _X("host"));
append_path(&fxr_dir, _X("fxr"));
if (!pal::directory_exists(fxr_dir))
{
trace::error(_X("A fatal error occurred. The folder [%s] does not exist"), fxr_dir.c_str());
return false;
}
return get_latest_fxr(std::move(fxr_dir), out_fxr_path);
}
bool fxr_resolver::try_get_existing_fxr(pal::dll_t* out_fxr, pal::string_t* out_fxr_path)
{
if (!pal::get_loaded_library(LIBFXR_NAME, "hostfxr_main", out_fxr, out_fxr_path))
return false;
trace::verbose(_X("Found previously loaded library %s [%s]."), LIBFXR_NAME, out_fxr_path->c_str());
return true;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <pal.h>
#include "fxr_resolver.h"
#include <fx_ver.h>
#include <trace.h>
#include <utils.h>
namespace
{
bool get_latest_fxr(pal::string_t fxr_root, pal::string_t* out_fxr_path)
{
trace::info(_X("Reading fx resolver directory=[%s]"), fxr_root.c_str());
std::vector<pal::string_t> list;
pal::readdir_onlydirectories(fxr_root, &list);
fx_ver_t max_ver;
for (const auto& dir : list)
{
trace::info(_X("Considering fxr version=[%s]..."), dir.c_str());
pal::string_t ver = get_filename(dir);
fx_ver_t fx_ver;
if (fx_ver_t::parse(ver, &fx_ver, /* parse_only_production */ false))
{
max_ver = std::max(max_ver, fx_ver);
}
}
if (max_ver == fx_ver_t())
{
trace::error(_X("A fatal error occurred, the folder [%s] does not contain any version-numbered child folders"), fxr_root.c_str());
return false;
}
pal::string_t max_ver_str = max_ver.as_str();
append_path(&fxr_root, max_ver_str.c_str());
trace::info(_X("Detected latest fxr version=[%s]..."), fxr_root.c_str());
if (library_exists_in_dir(fxr_root, LIBFXR_NAME, out_fxr_path))
{
trace::info(_X("Resolved fxr [%s]..."), out_fxr_path->c_str());
return true;
}
trace::error(_X("A fatal error occurred, the required library %s could not be found in [%s]"), LIBFXR_NAME, fxr_root.c_str());
return false;
}
}
bool fxr_resolver::try_get_path(const pal::string_t& root_path, pal::string_t* out_dotnet_root, pal::string_t* out_fxr_path)
{
#if defined(FEATURE_APPHOST) || defined(FEATURE_LIBHOST)
// For apphost and libhost, root_path is expected to be a directory.
// For libhost, it may be empty if app-local search is not desired (e.g. com/ijw/winrt hosts, nethost when no assembly path is specified)
// If a hostfxr exists in root_path, then assume self-contained.
if (root_path.length() > 0 && library_exists_in_dir(root_path, LIBFXR_NAME, out_fxr_path))
{
trace::info(_X("Resolved fxr [%s]..."), out_fxr_path->c_str());
out_dotnet_root->assign(root_path);
return true;
}
// For framework-dependent apps, use DOTNET_ROOT_<ARCH>
pal::string_t default_install_location;
pal::string_t dotnet_root_env_var_name;
if (get_dotnet_root_from_env(&dotnet_root_env_var_name, out_dotnet_root))
{
trace::info(_X("Using environment variable %s=[%s] as runtime location."), dotnet_root_env_var_name.c_str(), out_dotnet_root->c_str());
}
else
{
if (pal::get_dotnet_self_registered_dir(&default_install_location) || pal::get_default_installation_dir(&default_install_location))
{
trace::info(_X("Using global installation location [%s] as runtime location."), default_install_location.c_str());
out_dotnet_root->assign(default_install_location);
}
else
{
trace::error(_X("A fatal error occurred, the default install location cannot be obtained."));
return false;
}
}
pal::string_t fxr_dir = *out_dotnet_root;
append_path(&fxr_dir, _X("host"));
append_path(&fxr_dir, _X("fxr"));
if (!pal::directory_exists(fxr_dir))
{
if (default_install_location.empty())
{
pal::get_dotnet_self_registered_dir(&default_install_location);
}
if (default_install_location.empty())
{
pal::get_default_installation_dir(&default_install_location);
}
pal::string_t self_registered_config_location = pal::get_dotnet_self_registered_config_location();
pal::string_t self_registered_message = _X(" or register the runtime location in [") + self_registered_config_location + _X("]");
trace::error(_X("A fatal error occurred. The required library %s could not be found.\n"
"If this is a self-contained application, that library should exist in [%s].\n"
"If this is a framework-dependent application, install the runtime in the global location [%s] or use the %s environment variable to specify the runtime location%s."),
LIBFXR_NAME,
root_path.c_str(),
default_install_location.c_str(),
dotnet_root_env_var_name.c_str(),
self_registered_message.c_str());
trace::error(_X(""));
trace::error(_X("The .NET runtime can be found at:"));
trace::error(_X(" - %s&apphost_version=%s"), get_download_url().c_str(), _STRINGIFY(COMMON_HOST_PKG_VER));
return false;
}
return get_latest_fxr(std::move(fxr_dir), out_fxr_path);
#else // !FEATURE_APPHOST && !FEATURE_LIBHOST
// For non-apphost and non-libhost (i.e. muxer), root_path is expected to be the full path to the host
pal::string_t host_dir;
host_dir.assign(get_directory(root_path));
out_dotnet_root->assign(host_dir);
return fxr_resolver::try_get_path_from_dotnet_root(*out_dotnet_root, out_fxr_path);
#endif // !FEATURE_APPHOST && !FEATURE_LIBHOST
}
bool fxr_resolver::try_get_path_from_dotnet_root(const pal::string_t& dotnet_root, pal::string_t* out_fxr_path)
{
pal::string_t fxr_dir = dotnet_root;
append_path(&fxr_dir, _X("host"));
append_path(&fxr_dir, _X("fxr"));
if (!pal::directory_exists(fxr_dir))
{
trace::error(_X("A fatal error occurred. The folder [%s] does not exist"), fxr_dir.c_str());
return false;
}
return get_latest_fxr(std::move(fxr_dir), out_fxr_path);
}
bool fxr_resolver::try_get_existing_fxr(pal::dll_t* out_fxr, pal::string_t* out_fxr_path)
{
if (!pal::get_loaded_library(LIBFXR_NAME, "hostfxr_main", out_fxr, out_fxr_path))
return false;
trace::verbose(_X("Found previously loaded library %s [%s]."), LIBFXR_NAME, out_fxr_path->c_str());
return true;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/utilcode/stgpoolreadonly.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// StgPoolReadOnly.cpp
//
//
// Read only pools are used to reduce the amount of data actually required in the database.
//
//*****************************************************************************
#include "stdafx.h" // Standard include.
#include <stgpool.h> // Our interface definitions.
#include "metadatatracker.h"
//
//
// StgPoolReadOnly
//
//
#if METADATATRACKER_ENABLED
MetaDataTracker *MetaDataTracker::m_MDTrackers = NULL;
BOOL MetaDataTracker::s_bEnabled = FALSE;
void (*MetaDataTracker::s_IBCLogMetaDataAccess)(const void *addr) = NULL;
void (*MetaDataTracker::s_IBCLogMetaDataSearch)(const void *result) = NULL;
#endif // METADATATRACKER_ENABLED
const BYTE StgPoolSeg::m_zeros[64] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
//*****************************************************************************
// Free any memory we allocated.
//*****************************************************************************
StgPoolReadOnly::~StgPoolReadOnly()
{
LIMITED_METHOD_CONTRACT;
}
//*****************************************************************************
// Init the pool from existing data.
//*****************************************************************************
HRESULT StgPoolReadOnly::InitOnMemReadOnly(// Return code.
void *pData, // Predefined data.
ULONG iSize) // Size of data.
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY);
}
CONTRACTL_END
// Make sure we aren't stomping anything and are properly initialized.
_ASSERTE(m_pSegData == m_zeros);
// Create case requires no further action.
if (pData == NULL)
return E_INVALIDARG;
// Keep m_zeros data pointer if there's no content of the pool
if (iSize != 0)
{
m_pSegData = reinterpret_cast<BYTE*>(pData);
}
m_cbSegSize = iSize;
m_cbSegNext = iSize;
return S_OK;
}
//*****************************************************************************
// Prepare to shut down or reinitialize.
//*****************************************************************************
void StgPoolReadOnly::Uninit()
{
LIMITED_METHOD_CONTRACT;
m_pSegData = (BYTE*)m_zeros;
m_pNextSeg = 0;
}
//*****************************************************************************
// Convert a string to UNICODE into the caller's buffer.
//*****************************************************************************
HRESULT StgPoolReadOnly::GetStringW( // Return code.
ULONG iOffset, // Offset of string in pool.
_Out_writes_(cchBuffer) LPWSTR szOut, // Output buffer for string.
int cchBuffer) // Size of output buffer.
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FAULT;
HRESULT hr;
LPCSTR pString; // The string in UTF8.
int iChars;
IfFailRet(GetString(iOffset, &pString));
iChars = ::WszMultiByteToWideChar(CP_UTF8, 0, pString, -1, szOut, cchBuffer);
if (iChars == 0)
return (BadError(HRESULT_FROM_NT(GetLastError())));
return S_OK;
}
//*****************************************************************************
// Return a pointer to a null terminated blob given an offset previously
// handed out by Addblob or Findblob.
//*****************************************************************************
HRESULT
StgPoolReadOnly::GetBlob(
UINT32 nOffset, // Offset of blob in pool.
MetaData::DataBlob *pData)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
HRESULT hr;
UINT32 cbBlobContentSize;
// This should not be a necessary special case. The zero byte at the
// start of the pool will code for a length of zero. We will return
// a pointer to the next length byte, but the caller should notice that
// the size is zero, and should not look at any bytes.
// [SL] Yes, but we don't need all further computations and checks if iOffset==0
if (nOffset == 0)
{
pData->Clear();
return S_OK;
}
// Is the offset within this heap?
if (!IsValidOffset(nOffset))
{
Debug_ReportError("Invalid blob offset.");
IfFailGo(CLDB_E_INDEX_NOTFOUND);
}
IfFailGo(GetDataReadOnly(nOffset, pData));
if (!pData->GetCompressedU(&cbBlobContentSize))
{
Debug_ReportError("Invalid blob - size compression.");
IfFailGo(COR_E_BADIMAGEFORMAT);
}
if (!pData->TruncateToExactSize(cbBlobContentSize))
{
Debug_ReportError("Invalid blob - reaches behind the end of data block.");
IfFailGo(COR_E_BADIMAGEFORMAT);
}
return S_OK;
ErrExit:
pData->Clear();
return hr;
} // StgPoolReadOnly::GetBlob
//*****************************************************************************
// code:StgPoolReadOnly::GetBlob specialization with inlined check for valid offsets to avoid redundant code:StgPoolReadOnly::GetDataReadOnly calls.
// code:StgPoolReadOnly::GetDataReadOnly is not cheap because of it performs binary lookup in hot metadata.
//*****************************************************************************
HRESULT
StgBlobPoolReadOnly::GetBlob(
UINT32 nOffset, // Offset of blob in pool.
MetaData::DataBlob *pData)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
HRESULT hr;
UINT32 cbBlobContentSize;
// This should not be a necessary special case. The zero byte at the
// start of the pool will code for a length of zero. We will return
// a pointer to the next length byte, but the caller should notice that
// the size is zero, and should not look at any bytes.
// [SL] Yes, but we don't need all further computations and checks if iOffset==0
if (nOffset == 0)
{
pData->Clear();
return S_OK;
}
if (m_pSegData == m_zeros)
{
Debug_ReportError("Invalid blob offset.");
IfFailGo(CLDB_E_INDEX_NOTFOUND);
}
IfFailGo(GetDataReadOnly(nOffset, pData));
if (!pData->GetCompressedU(&cbBlobContentSize))
{
Debug_ReportError("Invalid blob - size compression.");
IfFailGo(CLDB_E_INDEX_NOTFOUND);
}
if (!pData->TruncateToExactSize(cbBlobContentSize))
{
Debug_ReportError("Invalid blob - reaches behind the end of data block.");
IfFailGo(CLDB_E_INDEX_NOTFOUND);
}
return S_OK;
ErrExit:
pData->Clear();
return hr;
} // StgBlobPoolReadOnly::GetBlob
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// StgPoolReadOnly.cpp
//
//
// Read only pools are used to reduce the amount of data actually required in the database.
//
//*****************************************************************************
#include "stdafx.h" // Standard include.
#include <stgpool.h> // Our interface definitions.
#include "metadatatracker.h"
//
//
// StgPoolReadOnly
//
//
#if METADATATRACKER_ENABLED
MetaDataTracker *MetaDataTracker::m_MDTrackers = NULL;
BOOL MetaDataTracker::s_bEnabled = FALSE;
void (*MetaDataTracker::s_IBCLogMetaDataAccess)(const void *addr) = NULL;
void (*MetaDataTracker::s_IBCLogMetaDataSearch)(const void *result) = NULL;
#endif // METADATATRACKER_ENABLED
const BYTE StgPoolSeg::m_zeros[64] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
//*****************************************************************************
// Free any memory we allocated.
//*****************************************************************************
StgPoolReadOnly::~StgPoolReadOnly()
{
LIMITED_METHOD_CONTRACT;
}
//*****************************************************************************
// Init the pool from existing data.
//*****************************************************************************
HRESULT StgPoolReadOnly::InitOnMemReadOnly(// Return code.
void *pData, // Predefined data.
ULONG iSize) // Size of data.
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY);
}
CONTRACTL_END
// Make sure we aren't stomping anything and are properly initialized.
_ASSERTE(m_pSegData == m_zeros);
// Create case requires no further action.
if (pData == NULL)
return E_INVALIDARG;
// Keep m_zeros data pointer if there's no content of the pool
if (iSize != 0)
{
m_pSegData = reinterpret_cast<BYTE*>(pData);
}
m_cbSegSize = iSize;
m_cbSegNext = iSize;
return S_OK;
}
//*****************************************************************************
// Prepare to shut down or reinitialize.
//*****************************************************************************
void StgPoolReadOnly::Uninit()
{
LIMITED_METHOD_CONTRACT;
m_pSegData = (BYTE*)m_zeros;
m_pNextSeg = 0;
}
//*****************************************************************************
// Convert a string to UNICODE into the caller's buffer.
//*****************************************************************************
HRESULT StgPoolReadOnly::GetStringW( // Return code.
ULONG iOffset, // Offset of string in pool.
_Out_writes_(cchBuffer) LPWSTR szOut, // Output buffer for string.
int cchBuffer) // Size of output buffer.
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FAULT;
HRESULT hr;
LPCSTR pString; // The string in UTF8.
int iChars;
IfFailRet(GetString(iOffset, &pString));
iChars = ::WszMultiByteToWideChar(CP_UTF8, 0, pString, -1, szOut, cchBuffer);
if (iChars == 0)
return (BadError(HRESULT_FROM_NT(GetLastError())));
return S_OK;
}
//*****************************************************************************
// Return a pointer to a null terminated blob given an offset previously
// handed out by Addblob or Findblob.
//*****************************************************************************
HRESULT
StgPoolReadOnly::GetBlob(
UINT32 nOffset, // Offset of blob in pool.
MetaData::DataBlob *pData)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
HRESULT hr;
UINT32 cbBlobContentSize;
// This should not be a necessary special case. The zero byte at the
// start of the pool will code for a length of zero. We will return
// a pointer to the next length byte, but the caller should notice that
// the size is zero, and should not look at any bytes.
// [SL] Yes, but we don't need all further computations and checks if iOffset==0
if (nOffset == 0)
{
pData->Clear();
return S_OK;
}
// Is the offset within this heap?
if (!IsValidOffset(nOffset))
{
Debug_ReportError("Invalid blob offset.");
IfFailGo(CLDB_E_INDEX_NOTFOUND);
}
IfFailGo(GetDataReadOnly(nOffset, pData));
if (!pData->GetCompressedU(&cbBlobContentSize))
{
Debug_ReportError("Invalid blob - size compression.");
IfFailGo(COR_E_BADIMAGEFORMAT);
}
if (!pData->TruncateToExactSize(cbBlobContentSize))
{
Debug_ReportError("Invalid blob - reaches behind the end of data block.");
IfFailGo(COR_E_BADIMAGEFORMAT);
}
return S_OK;
ErrExit:
pData->Clear();
return hr;
} // StgPoolReadOnly::GetBlob
//*****************************************************************************
// code:StgPoolReadOnly::GetBlob specialization with inlined check for valid offsets to avoid redundant code:StgPoolReadOnly::GetDataReadOnly calls.
// code:StgPoolReadOnly::GetDataReadOnly is not cheap because of it performs binary lookup in hot metadata.
//*****************************************************************************
HRESULT
StgBlobPoolReadOnly::GetBlob(
UINT32 nOffset, // Offset of blob in pool.
MetaData::DataBlob *pData)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
HRESULT hr;
UINT32 cbBlobContentSize;
// This should not be a necessary special case. The zero byte at the
// start of the pool will code for a length of zero. We will return
// a pointer to the next length byte, but the caller should notice that
// the size is zero, and should not look at any bytes.
// [SL] Yes, but we don't need all further computations and checks if iOffset==0
if (nOffset == 0)
{
pData->Clear();
return S_OK;
}
if (m_pSegData == m_zeros)
{
Debug_ReportError("Invalid blob offset.");
IfFailGo(CLDB_E_INDEX_NOTFOUND);
}
IfFailGo(GetDataReadOnly(nOffset, pData));
if (!pData->GetCompressedU(&cbBlobContentSize))
{
Debug_ReportError("Invalid blob - size compression.");
IfFailGo(CLDB_E_INDEX_NOTFOUND);
}
if (!pData->TruncateToExactSize(cbBlobContentSize))
{
Debug_ReportError("Invalid blob - reaches behind the end of data block.");
IfFailGo(CLDB_E_INDEX_NOTFOUND);
}
return S_OK;
ErrExit:
pData->Clear();
return hr;
} // StgBlobPoolReadOnly::GetBlob
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/nativeaot/Runtime/UniversalTransitionHelpers.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "CommonTypes.h"
#include "CommonMacros.h"
#include "PalRedhawkCommon.h"
#include "PalRedhawk.h"
#ifdef _DEBUG
#define TRASH_SAVED_ARGUMENT_REGISTERS
#endif
#ifdef TRASH_SAVED_ARGUMENT_REGISTERS
//
// Define tables of predictable distinguished values that RhpUniversalTransition can use to
// trash argument registers after they have been saved into the transition frame.
//
// Trashing these registers is a testability aid that makes it easier to detect bugs where
// the transition frame content is not correctly propagated to the eventual callee.
//
// In the absence of trashing, such bugs can become undetectable if the code that
// dispatches the call happens to never touch the impacted argument register (e.g., xmm3 on
// amd64 or d5 on arm32). In such a case, the original enregistered argument will flow
// unmodified into the eventual callee, obscuring the fact that the dispatcher failed to
// propagate the transition frame copy of this register.
//
// These tables are manually aligned as a conservative safeguard to ensure that the
// consumers can use arbitrary access widths without ever needing to worry about alignment.
// The comments in each table show the %d/%f renderings of each 32-bit value, plus the
// %I64d/%f rendering of the combined 64-bit value of each aligned pair of 32-bit values.
//
#define TRASH_VALUE_ALIGNMENT 16
EXTERN_C
DECLSPEC_ALIGN(TRASH_VALUE_ALIGNMENT)
const uint32_t RhpIntegerTrashValues[] = {
// Lo32 Hi32 Lo32 Hi32 Hi32:Lo32
// ----------- ----------- --------- --------- ------------------
0x07801001U, 0x07802002U, // (125833217, 125837314) (540467148372316161)
0x07803003U, 0x07804004U, // (125841411, 125845508) (540502341334347779)
0x07805005U, 0x07806006U, // (125849605, 125853702) (540537534296379397)
0x07807007U, 0x07808008U, // (125857799, 125861896) (540572727258411015)
0x07809009U, 0x0780a00aU, // (125865993, 125870090) (540607920220442633)
0x0780b00bU, 0x0780c00cU, // (125874187, 125878284) (540643113182474251)
0x0780d00dU, 0x0780e00eU, // (125882381, 125886478) (540678306144505869)
0x0780f00fU, 0x07810010U, // (125890575, 125894672) (540713499106537487)
};
EXTERN_C
DECLSPEC_ALIGN(TRASH_VALUE_ALIGNMENT)
const uint32_t RhpFpTrashValues[] = {
// Lo32 Hi32 Lo32 Hi32 Hi32:Lo32
// ----------- ----------- ------------------- ------------------- -------------------
0x42001001U, 0x42002002U, // (32.0156288146972660, 32.0312576293945310) (8657061952.00781440)
0x42003003U, 0x42004004U, // (32.0468864440917970, 32.0625152587890630) (8724187200.02344320)
0x42005005U, 0x42006006U, // (32.0781440734863280, 32.0937728881835940) (8791312448.03907200)
0x42007007U, 0x42008008U, // (32.1094017028808590, 32.1250305175781250) (8858437696.05470090)
0x42009009U, 0x4200a00aU, // (32.1406593322753910, 32.1562881469726560) (8925562944.07032970)
0x4200b00bU, 0x4200c00cU, // (32.1719169616699220, 32.1875457763671880) (8992688192.08595850)
0x4200d00dU, 0x4200e00eU, // (32.2031745910644530, 32.2188034057617190) (9059813440.10158730)
0x4200f00fU, 0x42010010U, // (32.2344322204589840, 32.2500610351562500) (9126938688.11721610)
};
#endif // TRASH_SAVED_ARGUMENT_REGISTERS
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "CommonTypes.h"
#include "CommonMacros.h"
#include "PalRedhawkCommon.h"
#include "PalRedhawk.h"
#ifdef _DEBUG
#define TRASH_SAVED_ARGUMENT_REGISTERS
#endif
#ifdef TRASH_SAVED_ARGUMENT_REGISTERS
//
// Define tables of predictable distinguished values that RhpUniversalTransition can use to
// trash argument registers after they have been saved into the transition frame.
//
// Trashing these registers is a testability aid that makes it easier to detect bugs where
// the transition frame content is not correctly propagated to the eventual callee.
//
// In the absence of trashing, such bugs can become undetectable if the code that
// dispatches the call happens to never touch the impacted argument register (e.g., xmm3 on
// amd64 or d5 on arm32). In such a case, the original enregistered argument will flow
// unmodified into the eventual callee, obscuring the fact that the dispatcher failed to
// propagate the transition frame copy of this register.
//
// These tables are manually aligned as a conservative safeguard to ensure that the
// consumers can use arbitrary access widths without ever needing to worry about alignment.
// The comments in each table show the %d/%f renderings of each 32-bit value, plus the
// %I64d/%f rendering of the combined 64-bit value of each aligned pair of 32-bit values.
//
#define TRASH_VALUE_ALIGNMENT 16
EXTERN_C
DECLSPEC_ALIGN(TRASH_VALUE_ALIGNMENT)
const uint32_t RhpIntegerTrashValues[] = {
// Lo32 Hi32 Lo32 Hi32 Hi32:Lo32
// ----------- ----------- --------- --------- ------------------
0x07801001U, 0x07802002U, // (125833217, 125837314) (540467148372316161)
0x07803003U, 0x07804004U, // (125841411, 125845508) (540502341334347779)
0x07805005U, 0x07806006U, // (125849605, 125853702) (540537534296379397)
0x07807007U, 0x07808008U, // (125857799, 125861896) (540572727258411015)
0x07809009U, 0x0780a00aU, // (125865993, 125870090) (540607920220442633)
0x0780b00bU, 0x0780c00cU, // (125874187, 125878284) (540643113182474251)
0x0780d00dU, 0x0780e00eU, // (125882381, 125886478) (540678306144505869)
0x0780f00fU, 0x07810010U, // (125890575, 125894672) (540713499106537487)
};
EXTERN_C
DECLSPEC_ALIGN(TRASH_VALUE_ALIGNMENT)
const uint32_t RhpFpTrashValues[] = {
// Lo32 Hi32 Lo32 Hi32 Hi32:Lo32
// ----------- ----------- ------------------- ------------------- -------------------
0x42001001U, 0x42002002U, // (32.0156288146972660, 32.0312576293945310) (8657061952.00781440)
0x42003003U, 0x42004004U, // (32.0468864440917970, 32.0625152587890630) (8724187200.02344320)
0x42005005U, 0x42006006U, // (32.0781440734863280, 32.0937728881835940) (8791312448.03907200)
0x42007007U, 0x42008008U, // (32.1094017028808590, 32.1250305175781250) (8858437696.05470090)
0x42009009U, 0x4200a00aU, // (32.1406593322753910, 32.1562881469726560) (8925562944.07032970)
0x4200b00bU, 0x4200c00cU, // (32.1719169616699220, 32.1875457763671880) (8992688192.08595850)
0x4200d00dU, 0x4200e00eU, // (32.2031745910644530, 32.2188034057617190) (9059813440.10158730)
0x4200f00fU, 0x42010010U, // (32.2344322204589840, 32.2500610351562500) (9126938688.11721610)
};
#endif // TRASH_SAVED_ARGUMENT_REGISTERS
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/typectxt.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// typectxt.h
//
//
#ifndef _H_TYPECTXT
#define _H_TYPECTXT
//------------------------------------------------------------------------
// A signature type context gives the information necessary to interpret
// the ELEMENT_TYPE_VAR and ELEMENT_TYPE_MVAR elements of a regular
// metadata signature. These are usually stack allocated at appropriate
// points where the SigPointer objects are created, or are allocated
// inside a MetaSig (which are themselves normally stack allocated)
//
// They are normally passed as "const SigTypeContext *".
//------------------------------------------------------------------------
class SigTypeContext
{
public:
// Store pointers first and DWORDs second to ensure good packing on 64-bit
Instantiation m_classInst;
Instantiation m_methodInst;
// Default constructor for non-generic code
inline SigTypeContext()
{ WRAPPER_NO_CONTRACT; InitTypeContext(this); }
// Initialize a type context given instantiations.
inline SigTypeContext(Instantiation classInst, Instantiation methodInst)
{ WRAPPER_NO_CONTRACT; InitTypeContext(classInst, methodInst, this); }
// Initialize a type context from a MethodDesc. If this is a MethodDesc that gets
// shared between generic instantiations (e.g. one being jitted by a code-sharing JIT)
// and a null declaring Type is passed then the type context will
// be a representative context, not an exact one.
// This is sufficient for most purposes, e.g. GC and field layout, because
// these operations are "parametric", i.e. behave the same for all shared types.
//
// If declaringType is non-null, then the MethodDesc is assumed to be
// shared between generic classes, and the type handle is used to give the
// exact type context. The method should be one of the methods supported by the
// given type handle.
//
// If the method is a method in an array type then the type context will
// contain one item in the class instantiation corresponding to the
// element type of the array.
//
// Finally, exactMethodInst should be specified if md might represent a generic method definition,
// as type parameters are not always available off the method desc for generic method definitions without
// forcing a load. Typically the caller will use MethodDesc::LoadMethodInstantiation.
inline SigTypeContext(MethodDesc *md)
{ WRAPPER_NO_CONTRACT; InitTypeContext(md,this); }
inline SigTypeContext(MethodDesc *md, TypeHandle declaringType)
{ WRAPPER_NO_CONTRACT; InitTypeContext(md,declaringType,this); }
inline SigTypeContext(MethodDesc *md, TypeHandle declaringType, Instantiation exactMethodInst)
{ WRAPPER_NO_CONTRACT; InitTypeContext(md,declaringType,exactMethodInst,this); }
// This is similar to the one above except that exact
// instantiations are provided explicitly.
// This will only normally be used when the code is shared
// between generic instantiations and after fetching the
// exact instantiations from the stack.
//
inline SigTypeContext(MethodDesc *md, Instantiation exactClassInst, Instantiation exactMethodInst)
{ WRAPPER_NO_CONTRACT; InitTypeContext(md,exactClassInst,exactMethodInst,this); }
// Initialize a type context from a type handle. This is used when
// generating the type context for a
// any of the metadata in the class covered by the type handle apart from
// the metadata for any generic methods in the class.
// If the type handle satisfies th.IsNull() then the created type context
// will be empty.
inline SigTypeContext(TypeHandle th)
{ WRAPPER_NO_CONTRACT; InitTypeContext(th,this); }
inline SigTypeContext(FieldDesc *pFD, TypeHandle declaringType = TypeHandle())
{ WRAPPER_NO_CONTRACT; InitTypeContext(pFD,declaringType,this); }
// Copy contructor - try not to use this. The C++ compiler is not doing a good job
// of copy-constructor based code, and we've had perf regressions when using this too
// much for this simple objects. Use an explicit call to InitTypeContext instead,
// or use GetOptionalTypeContext.
inline SigTypeContext(const SigTypeContext &c)
{ WRAPPER_NO_CONTRACT; InitTypeContext(&c,this); }
// Copy contructor from a possibly-NULL pointer.
inline SigTypeContext(const SigTypeContext *c)
{ WRAPPER_NO_CONTRACT; InitTypeContext(c,this); }
static void InitTypeContext(MethodDesc *md, SigTypeContext *pRes);
static void InitTypeContext(MethodDesc *md, TypeHandle declaringType, SigTypeContext *pRes);
static void InitTypeContext(MethodDesc *md, TypeHandle declaringType, Instantiation exactMethodInst, SigTypeContext *pRes);
static void InitTypeContext(MethodDesc *md, Instantiation exactClassInst, Instantiation exactMethodInst, SigTypeContext *pRes);
static void InitTypeContext(TypeHandle th, SigTypeContext *pRes);
static void InitTypeContext(FieldDesc *pFD, TypeHandle declaringType, SigTypeContext *pRes);
inline static void InitTypeContext(Instantiation classInst, Instantiation methodInst, SigTypeContext *pRes);
inline static void InitTypeContext(SigTypeContext *);
inline static void InitTypeContext(const SigTypeContext *c, SigTypeContext *pRes);
// These are allowed to return NULL if an empty type context is generated. The NULL value
// can then be passed around to represent the empty type context.
// pRes must be non-null.
// pRes must initially be zero-initialized, e.g. by the default SigTypeContext constructor.
static const SigTypeContext * GetOptionalTypeContext(MethodDesc *md, TypeHandle declaringType, SigTypeContext *pRes);
static const SigTypeContext * GetOptionalTypeContext(TypeHandle th, SigTypeContext *pRes);
// SigTypeContexts are used as part of keys for various data structures indiexed by instantiation
static BOOL Equal(const SigTypeContext *pCtx1, const SigTypeContext *pCtx2);
static BOOL IsValidTypeOnlyInstantiationOf(const SigTypeContext *pCtxTypicalMethodInstantiation, const SigTypeContext *pCtxTypeOnlyInstantiation);
BOOL IsEmpty() const { LIMITED_METHOD_CONTRACT; return m_classInst.IsEmpty() && m_methodInst.IsEmpty(); }
};
inline void SigTypeContext::InitTypeContext(SigTypeContext *pRes)
{
LIMITED_METHOD_DAC_CONTRACT;
}
inline void SigTypeContext::InitTypeContext(Instantiation classInst,
Instantiation methodInst,
SigTypeContext *pRes)
{
LIMITED_METHOD_CONTRACT;
pRes->m_classInst = classInst;
pRes->m_methodInst = methodInst;
}
// Copy contructor from a possibly-NULL pointer.
inline void SigTypeContext::InitTypeContext(const SigTypeContext *c,SigTypeContext *pRes)
{
LIMITED_METHOD_DAC_CONTRACT;
if (c)
{
pRes->m_classInst = c->m_classInst;
pRes->m_methodInst = c->m_methodInst;
}
else
{
pRes->m_classInst = Instantiation();
pRes->m_methodInst = Instantiation();
}
}
//------------------------------------------------------------------------
// Encapsulates pointers to code:SigTypeContext and code:Substitution chain
// that have been used to instantiate a generic type. The context is passed
// as "const InstantiationContext *" from code:SigPointer.GetTypeHandleThrowing
// down to code:TypeVarTypeDesc.SatisfiesConstraints where it is needed for
// instantiating constraints attached to type arguments.
//
// The reason why we need to pass these pointers down to the code that verifies
// that constraints are satisified is the case when another type variable is
// substituted for a type variable and this argument is constrained by a generic
// type. In order to verify that constraints of the argument satisfy constraints
// of the parameter, the argument constraints must be instantiated in the same
// "instantiation context" as the original signature - and unfortunately this
// context cannot be extracted from the rest of the information that we have
// about the type that is being loaded.
//
// See code:TypeVarTypeDesc.SatisfiesConstraints for more details and an
// example scenario in which we are unable to verify constraints without this
// context.
//------------------------------------------------------------------------
class InstantiationContext
{
public:
const SigTypeContext *m_pArgContext;
const Substitution *m_pSubstChain;
inline InstantiationContext(const SigTypeContext *pArgContext = NULL, const Substitution *pSubstChain = NULL)
{
LIMITED_METHOD_DAC_CONTRACT;
m_pArgContext = pArgContext;
m_pSubstChain = pSubstChain;
}
};
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// typectxt.h
//
//
#ifndef _H_TYPECTXT
#define _H_TYPECTXT
//------------------------------------------------------------------------
// A signature type context gives the information necessary to interpret
// the ELEMENT_TYPE_VAR and ELEMENT_TYPE_MVAR elements of a regular
// metadata signature. These are usually stack allocated at appropriate
// points where the SigPointer objects are created, or are allocated
// inside a MetaSig (which are themselves normally stack allocated)
//
// They are normally passed as "const SigTypeContext *".
//------------------------------------------------------------------------
class SigTypeContext
{
public:
// Store pointers first and DWORDs second to ensure good packing on 64-bit
Instantiation m_classInst;
Instantiation m_methodInst;
// Default constructor for non-generic code
inline SigTypeContext()
{ WRAPPER_NO_CONTRACT; InitTypeContext(this); }
// Initialize a type context given instantiations.
inline SigTypeContext(Instantiation classInst, Instantiation methodInst)
{ WRAPPER_NO_CONTRACT; InitTypeContext(classInst, methodInst, this); }
// Initialize a type context from a MethodDesc. If this is a MethodDesc that gets
// shared between generic instantiations (e.g. one being jitted by a code-sharing JIT)
// and a null declaring Type is passed then the type context will
// be a representative context, not an exact one.
// This is sufficient for most purposes, e.g. GC and field layout, because
// these operations are "parametric", i.e. behave the same for all shared types.
//
// If declaringType is non-null, then the MethodDesc is assumed to be
// shared between generic classes, and the type handle is used to give the
// exact type context. The method should be one of the methods supported by the
// given type handle.
//
// If the method is a method in an array type then the type context will
// contain one item in the class instantiation corresponding to the
// element type of the array.
//
// Finally, exactMethodInst should be specified if md might represent a generic method definition,
// as type parameters are not always available off the method desc for generic method definitions without
// forcing a load. Typically the caller will use MethodDesc::LoadMethodInstantiation.
inline SigTypeContext(MethodDesc *md)
{ WRAPPER_NO_CONTRACT; InitTypeContext(md,this); }
inline SigTypeContext(MethodDesc *md, TypeHandle declaringType)
{ WRAPPER_NO_CONTRACT; InitTypeContext(md,declaringType,this); }
inline SigTypeContext(MethodDesc *md, TypeHandle declaringType, Instantiation exactMethodInst)
{ WRAPPER_NO_CONTRACT; InitTypeContext(md,declaringType,exactMethodInst,this); }
// This is similar to the one above except that exact
// instantiations are provided explicitly.
// This will only normally be used when the code is shared
// between generic instantiations and after fetching the
// exact instantiations from the stack.
//
inline SigTypeContext(MethodDesc *md, Instantiation exactClassInst, Instantiation exactMethodInst)
{ WRAPPER_NO_CONTRACT; InitTypeContext(md,exactClassInst,exactMethodInst,this); }
// Initialize a type context from a type handle. This is used when
// generating the type context for a
// any of the metadata in the class covered by the type handle apart from
// the metadata for any generic methods in the class.
// If the type handle satisfies th.IsNull() then the created type context
// will be empty.
inline SigTypeContext(TypeHandle th)
{ WRAPPER_NO_CONTRACT; InitTypeContext(th,this); }
inline SigTypeContext(FieldDesc *pFD, TypeHandle declaringType = TypeHandle())
{ WRAPPER_NO_CONTRACT; InitTypeContext(pFD,declaringType,this); }
// Copy contructor - try not to use this. The C++ compiler is not doing a good job
// of copy-constructor based code, and we've had perf regressions when using this too
// much for this simple objects. Use an explicit call to InitTypeContext instead,
// or use GetOptionalTypeContext.
inline SigTypeContext(const SigTypeContext &c)
{ WRAPPER_NO_CONTRACT; InitTypeContext(&c,this); }
// Copy contructor from a possibly-NULL pointer.
inline SigTypeContext(const SigTypeContext *c)
{ WRAPPER_NO_CONTRACT; InitTypeContext(c,this); }
static void InitTypeContext(MethodDesc *md, SigTypeContext *pRes);
static void InitTypeContext(MethodDesc *md, TypeHandle declaringType, SigTypeContext *pRes);
static void InitTypeContext(MethodDesc *md, TypeHandle declaringType, Instantiation exactMethodInst, SigTypeContext *pRes);
static void InitTypeContext(MethodDesc *md, Instantiation exactClassInst, Instantiation exactMethodInst, SigTypeContext *pRes);
static void InitTypeContext(TypeHandle th, SigTypeContext *pRes);
static void InitTypeContext(FieldDesc *pFD, TypeHandle declaringType, SigTypeContext *pRes);
inline static void InitTypeContext(Instantiation classInst, Instantiation methodInst, SigTypeContext *pRes);
inline static void InitTypeContext(SigTypeContext *);
inline static void InitTypeContext(const SigTypeContext *c, SigTypeContext *pRes);
// These are allowed to return NULL if an empty type context is generated. The NULL value
// can then be passed around to represent the empty type context.
// pRes must be non-null.
// pRes must initially be zero-initialized, e.g. by the default SigTypeContext constructor.
static const SigTypeContext * GetOptionalTypeContext(MethodDesc *md, TypeHandle declaringType, SigTypeContext *pRes);
static const SigTypeContext * GetOptionalTypeContext(TypeHandle th, SigTypeContext *pRes);
// SigTypeContexts are used as part of keys for various data structures indiexed by instantiation
static BOOL Equal(const SigTypeContext *pCtx1, const SigTypeContext *pCtx2);
static BOOL IsValidTypeOnlyInstantiationOf(const SigTypeContext *pCtxTypicalMethodInstantiation, const SigTypeContext *pCtxTypeOnlyInstantiation);
BOOL IsEmpty() const { LIMITED_METHOD_CONTRACT; return m_classInst.IsEmpty() && m_methodInst.IsEmpty(); }
};
inline void SigTypeContext::InitTypeContext(SigTypeContext *pRes)
{
LIMITED_METHOD_DAC_CONTRACT;
}
inline void SigTypeContext::InitTypeContext(Instantiation classInst,
Instantiation methodInst,
SigTypeContext *pRes)
{
LIMITED_METHOD_CONTRACT;
pRes->m_classInst = classInst;
pRes->m_methodInst = methodInst;
}
// Copy contructor from a possibly-NULL pointer.
inline void SigTypeContext::InitTypeContext(const SigTypeContext *c,SigTypeContext *pRes)
{
LIMITED_METHOD_DAC_CONTRACT;
if (c)
{
pRes->m_classInst = c->m_classInst;
pRes->m_methodInst = c->m_methodInst;
}
else
{
pRes->m_classInst = Instantiation();
pRes->m_methodInst = Instantiation();
}
}
//------------------------------------------------------------------------
// Encapsulates pointers to code:SigTypeContext and code:Substitution chain
// that have been used to instantiate a generic type. The context is passed
// as "const InstantiationContext *" from code:SigPointer.GetTypeHandleThrowing
// down to code:TypeVarTypeDesc.SatisfiesConstraints where it is needed for
// instantiating constraints attached to type arguments.
//
// The reason why we need to pass these pointers down to the code that verifies
// that constraints are satisified is the case when another type variable is
// substituted for a type variable and this argument is constrained by a generic
// type. In order to verify that constraints of the argument satisfy constraints
// of the parameter, the argument constraints must be instantiated in the same
// "instantiation context" as the original signature - and unfortunately this
// context cannot be extracted from the rest of the information that we have
// about the type that is being loaded.
//
// See code:TypeVarTypeDesc.SatisfiesConstraints for more details and an
// example scenario in which we are unable to verify constraints without this
// context.
//------------------------------------------------------------------------
class InstantiationContext
{
public:
const SigTypeContext *m_pArgContext;
const Substitution *m_pSubstChain;
inline InstantiationContext(const SigTypeContext *pArgContext = NULL, const Substitution *pSubstChain = NULL)
{
LIMITED_METHOD_DAC_CONTRACT;
m_pArgContext = pArgContext;
m_pSubstChain = pSubstChain;
}
};
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/debug/ee/s390x/primitives.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#include "stdafx.h"
#include "threads.h"
#include "../../shared/s390x/primitives.cpp"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#include "stdafx.h"
#include "threads.h"
#include "../../shared/s390x/primitives.cpp"
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/debug/ee/i386/x86walker.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: x86walker.cpp
//
//
// x86 instruction decoding/stepping logic
//
//*****************************************************************************
#include "stdafx.h"
#include "walker.h"
#include "frames.h"
#include "openum.h"
#ifdef TARGET_X86
//
// The x86 walker is currently pretty minimal. It only recognizes call and return opcodes, plus a few jumps. The rest
// is treated as unknown.
//
void NativeWalker::Decode()
{
const BYTE *ip = m_ip;
m_type = WALK_UNKNOWN;
m_skipIP = NULL;
m_nextIP = NULL;
LOG((LF_CORDB, LL_INFO100000, "NW:Decode: m_ip 0x%x\n", m_ip));
//
// Skip instruction prefixes
//
do
{
switch (*ip)
{
// Segment overrides
case 0x26: // ES
case 0x2E: // CS
case 0x36: // SS
case 0x3E: // DS
case 0x64: // FS
case 0x65: // GS
// Size overrides
case 0x66: // Operand-Size
case 0x67: // Address-Size
// Lock
case 0xf0:
// String REP prefixes
case 0xf1:
case 0xf2: // REPNE/REPNZ
case 0xf3:
LOG((LF_CORDB, LL_INFO10000, "NW:Decode: prefix:%0.2x ", *ip));
ip++;
continue;
default:
break;
}
} while (0);
// Read the opcode
m_opcode = *ip++;
LOG((LF_CORDB, LL_INFO100000, "NW:Decode: ip 0x%x, m_opcode:%0.2x\n", ip, m_opcode));
if (m_opcode == 0xcc)
{
m_opcode = DebuggerController::GetPatchedOpcode(m_ip);
LOG((LF_CORDB, LL_INFO100000, "NW:Decode after patch look up: m_opcode:%0.2x\n", m_opcode));
}
// Analyze what we can of the opcode
switch (m_opcode)
{
case 0xff:
{
BYTE modrm = *ip++;
BYTE mod = (modrm & 0xC0) >> 6;
BYTE reg = (modrm & 0x38) >> 3;
BYTE rm = (modrm & 0x07);
BYTE *result = 0;
WORD displace = 0;
if ((reg != 2) && (reg != 3) && (reg != 4) && (reg != 5)) {
//
// This is not a CALL or JMP instruction, return, unknown.
//
return;
}
if (m_registers != NULL)
{
// Only try to decode registers if we actually have reg sets.
switch (mod) {
case 0:
case 1:
case 2:
if (rm == 4) {
//
// Get values from the SIB byte
//
BYTE ss = (*ip & 0xC0) >> 6;
BYTE index = (*ip & 0x38) >> 3;
BYTE base = (*ip & 0x7);
ip++;
//
// Get starting value
//
if ((mod == 0) && (base == 5)) {
result = 0;
} else {
result = (BYTE *)(size_t)GetRegisterValue(base);
}
//
// Add in the [index]
//
if (index != 0x4) {
result = result + (GetRegisterValue(index) << ss);
}
//
// Finally add in the offset
//
if (mod == 0) {
if (base == 5) {
result = result + *((unsigned int *)ip);
displace = 7;
} else {
displace = 3;
}
} else if (mod == 1) {
result = result + *((char *)ip);
displace = 4;
} else { // == 2
result = result + *((unsigned int *)ip);
displace = 7;
}
} else {
//
// Get the value we need from the register.
//
if ((mod == 0) && (rm == 5)) {
result = 0;
} else {
result = (BYTE *)GetRegisterValue(rm);
}
if (mod == 0) {
if (rm == 5) {
result = result + *((unsigned int *)ip);
displace = 6;
} else {
displace = 2;
}
} else if (mod == 1) {
result = result + *((char *)ip);
displace = 3;
} else { // == 2
result = result + *((unsigned int *)ip);
displace = 6;
}
}
//
// Now dereference thru the result to get the resulting IP.
//
// If result is bad, then this means we can't predict what the nextIP will be.
// That's ok - we just leave m_nextIp=NULL. We can still provide callers
// with the proper walk-type.
// In practice, this shouldn't happen unless the jit emits bad opcodes.
if (result != NULL)
{
result = (BYTE *)(*((unsigned int *)result));
}
break;
case 3:
default:
result = (BYTE *)GetRegisterValue(rm);
displace = 2;
break;
}
} // have registers
if ((reg == 2) || (reg == 3)) {
m_type = WALK_CALL;
} else if ((reg == 4) || (reg == 5)) {
m_type = WALK_BRANCH;
} else {
break;
}
if (m_registers != NULL)
{
m_nextIP = result;
m_skipIP = m_ip + displace;
}
break;
} // end of 0xFF case
case 0xe8:
{
m_type = WALK_CALL;
UINT32 disp = *((UINT32*)ip);
m_nextIP = ip + 4 + disp;
m_skipIP = ip + 4;
break;
}
case 0xe9:
{
m_type = WALK_BRANCH;
INT32 disp = *((INT32*)ip);
m_nextIP = ip + 4 + disp;
m_skipIP = ip + 4;
break;
}
case 0x9a:
m_type = WALK_CALL;
m_nextIP = (BYTE*) *((UINT32*)ip);
m_skipIP = ip + 4;
break;
case 0xc2:
case 0xc3:
case 0xca:
case 0xcb:
m_type = WALK_RETURN;
break;
default:
break;
}
}
//
// Given a regdisplay and a register number, return the value of the register.
//
DWORD NativeWalker::GetRegisterValue(int registerNumber)
{
// If we're going to decode a register, then we'd better have a valid register set.
PREFIX_ASSUME(m_registers != NULL);
switch (registerNumber)
{
case 0:
return *m_registers->GetEaxLocation();
break;
case 1:
return *m_registers->GetEcxLocation();
break;
case 2:
return *m_registers->GetEdxLocation();
break;
case 3:
return *m_registers->GetEbxLocation();
break;
case 4:
return m_registers->SP;
break;
case 5:
return GetRegdisplayFP(m_registers);
break;
case 6:
return *m_registers->GetEsiLocation();
break;
case 7:
return *m_registers->GetEdiLocation();
break;
default:
_ASSERTE(!"Invalid register number!");
}
return 0;
}
// static
void NativeWalker::DecodeInstructionForPatchSkip(const BYTE *address, InstructionAttribute * pInstrAttrib)
{
//
// Skip instruction prefixes
//
LOG((LF_CORDB, LL_INFO10000, "Patch decode: "));
if (pInstrAttrib == NULL)
return;
const BYTE * origAddr = address;
do
{
switch (*address)
{
// Segment overrides
case 0x26: // ES
case 0x2E: // CS
case 0x36: // SS
case 0x3E: // DS
case 0x64: // FS
case 0x65: // GS
// Size overrides
case 0x66: // Operand-Size
case 0x67: // Address-Size
// Lock
case 0xf0:
// String REP prefixes
case 0xf2: // REPNE/REPNZ
case 0xf3:
LOG((LF_CORDB, LL_INFO10000, "prefix:%0.2x ", *address));
address++;
continue;
default:
break;
}
} while (0);
// There can be at most 4 prefixes.
_ASSERTE(((address - origAddr) <= 4));
//
// Look at opcode to tell if it's a call or an
// absolute branch.
//
pInstrAttrib->Reset();
// Note that we only care about m_cbInstr, m_cbDisp, and m_dwOffsetToDisp for relative branches
// (either call or jump instructions).
switch (*address)
{
case 0xEA: // JMP far
case 0xC2: // RET
case 0xC3: // RET N
pInstrAttrib->m_fIsAbsBranch = true;
LOG((LF_CORDB, LL_INFO10000, "ABS:%0.2x\n", *address));
break;
case 0xE8: // CALL relative
pInstrAttrib->m_fIsCall = true;
pInstrAttrib->m_fIsRelBranch = true;
LOG((LF_CORDB, LL_INFO10000, "CALL REL:%0.2x\n", *address));
address += 1;
pInstrAttrib->m_cbDisp = 4;
break;
case 0xC8: // ENTER
pInstrAttrib->m_fIsCall = true;
pInstrAttrib->m_fIsAbsBranch = true;
LOG((LF_CORDB, LL_INFO10000, "CALL ABS:%0.2x\n", *address));
break;
case 0xFF: // CALL/JMP modr/m
//
// Read opcode modifier from modr/m
//
switch ((address[1]&0x38)>>3)
{
case 2:
case 3:
pInstrAttrib->m_fIsCall = true;
FALLTHROUGH;
case 4:
case 5:
pInstrAttrib->m_fIsAbsBranch = true;
}
LOG((LF_CORDB, LL_INFO10000, "CALL/JMP modr/m:%0.2x\n", *address));
break;
case 0x9A: // CALL ptr16:32
pInstrAttrib->m_fIsCall = true;
pInstrAttrib->m_fIsAbsBranch = true;
break;
case 0xEB: // JMP rel8
pInstrAttrib->m_fIsRelBranch = true;
address += 1;
pInstrAttrib->m_cbDisp = 1;
break;
case 0xE9: // JMP rel32
pInstrAttrib->m_fIsRelBranch = true;
address += 1;
pInstrAttrib->m_cbDisp = 4;
break;
case 0x0F: // Jcc (conditional jump)
// If the second opcode byte is betwen 0x80 and 0x8F, then it's a conditional jump.
// Conditional jumps are always relative.
if ((address[1] & 0xF0) == 0x80)
{
pInstrAttrib->m_fIsCond = true;
pInstrAttrib->m_fIsRelBranch = true;
address += 2; // 2-byte opcode
pInstrAttrib->m_cbDisp = 4;
}
break;
case 0x70:
case 0x71:
case 0x72:
case 0x73:
case 0x74:
case 0x75:
case 0x76:
case 0x77:
case 0x78:
case 0x79:
case 0x7A:
case 0x7B:
case 0x7C:
case 0x7D:
case 0x7E:
case 0x7F: // Jcc (conditional jump)
case 0xE3: // JCXZ/JECXZ (jump on CX/ECX zero)
pInstrAttrib->m_fIsCond = true;
pInstrAttrib->m_fIsRelBranch = true;
address += 1;
pInstrAttrib->m_cbDisp = 1;
break;
default:
LOG((LF_CORDB, LL_INFO10000, "NORMAL:%0.2x\n", *address));
}
// Get additional information for relative branches.
if (pInstrAttrib->m_fIsRelBranch)
{
_ASSERTE(pInstrAttrib->m_cbDisp != 0);
pInstrAttrib->m_dwOffsetToDisp = (address - origAddr);
// Relative jump and call instructions don't use the SIB byte, and there is no immediate value.
// So the instruction size is just the offset to the displacement plus the size of the displacement.
pInstrAttrib->m_cbInstr = pInstrAttrib->m_dwOffsetToDisp + pInstrAttrib->m_cbDisp;
}
}
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: x86walker.cpp
//
//
// x86 instruction decoding/stepping logic
//
//*****************************************************************************
#include "stdafx.h"
#include "walker.h"
#include "frames.h"
#include "openum.h"
#ifdef TARGET_X86
//
// The x86 walker is currently pretty minimal. It only recognizes call and return opcodes, plus a few jumps. The rest
// is treated as unknown.
//
void NativeWalker::Decode()
{
const BYTE *ip = m_ip;
m_type = WALK_UNKNOWN;
m_skipIP = NULL;
m_nextIP = NULL;
LOG((LF_CORDB, LL_INFO100000, "NW:Decode: m_ip 0x%x\n", m_ip));
//
// Skip instruction prefixes
//
do
{
switch (*ip)
{
// Segment overrides
case 0x26: // ES
case 0x2E: // CS
case 0x36: // SS
case 0x3E: // DS
case 0x64: // FS
case 0x65: // GS
// Size overrides
case 0x66: // Operand-Size
case 0x67: // Address-Size
// Lock
case 0xf0:
// String REP prefixes
case 0xf1:
case 0xf2: // REPNE/REPNZ
case 0xf3:
LOG((LF_CORDB, LL_INFO10000, "NW:Decode: prefix:%0.2x ", *ip));
ip++;
continue;
default:
break;
}
} while (0);
// Read the opcode
m_opcode = *ip++;
LOG((LF_CORDB, LL_INFO100000, "NW:Decode: ip 0x%x, m_opcode:%0.2x\n", ip, m_opcode));
if (m_opcode == 0xcc)
{
m_opcode = DebuggerController::GetPatchedOpcode(m_ip);
LOG((LF_CORDB, LL_INFO100000, "NW:Decode after patch look up: m_opcode:%0.2x\n", m_opcode));
}
// Analyze what we can of the opcode
switch (m_opcode)
{
case 0xff:
{
BYTE modrm = *ip++;
BYTE mod = (modrm & 0xC0) >> 6;
BYTE reg = (modrm & 0x38) >> 3;
BYTE rm = (modrm & 0x07);
BYTE *result = 0;
WORD displace = 0;
if ((reg != 2) && (reg != 3) && (reg != 4) && (reg != 5)) {
//
// This is not a CALL or JMP instruction, return, unknown.
//
return;
}
if (m_registers != NULL)
{
// Only try to decode registers if we actually have reg sets.
switch (mod) {
case 0:
case 1:
case 2:
if (rm == 4) {
//
// Get values from the SIB byte
//
BYTE ss = (*ip & 0xC0) >> 6;
BYTE index = (*ip & 0x38) >> 3;
BYTE base = (*ip & 0x7);
ip++;
//
// Get starting value
//
if ((mod == 0) && (base == 5)) {
result = 0;
} else {
result = (BYTE *)(size_t)GetRegisterValue(base);
}
//
// Add in the [index]
//
if (index != 0x4) {
result = result + (GetRegisterValue(index) << ss);
}
//
// Finally add in the offset
//
if (mod == 0) {
if (base == 5) {
result = result + *((unsigned int *)ip);
displace = 7;
} else {
displace = 3;
}
} else if (mod == 1) {
result = result + *((char *)ip);
displace = 4;
} else { // == 2
result = result + *((unsigned int *)ip);
displace = 7;
}
} else {
//
// Get the value we need from the register.
//
if ((mod == 0) && (rm == 5)) {
result = 0;
} else {
result = (BYTE *)GetRegisterValue(rm);
}
if (mod == 0) {
if (rm == 5) {
result = result + *((unsigned int *)ip);
displace = 6;
} else {
displace = 2;
}
} else if (mod == 1) {
result = result + *((char *)ip);
displace = 3;
} else { // == 2
result = result + *((unsigned int *)ip);
displace = 6;
}
}
//
// Now dereference thru the result to get the resulting IP.
//
// If result is bad, then this means we can't predict what the nextIP will be.
// That's ok - we just leave m_nextIp=NULL. We can still provide callers
// with the proper walk-type.
// In practice, this shouldn't happen unless the jit emits bad opcodes.
if (result != NULL)
{
result = (BYTE *)(*((unsigned int *)result));
}
break;
case 3:
default:
result = (BYTE *)GetRegisterValue(rm);
displace = 2;
break;
}
} // have registers
if ((reg == 2) || (reg == 3)) {
m_type = WALK_CALL;
} else if ((reg == 4) || (reg == 5)) {
m_type = WALK_BRANCH;
} else {
break;
}
if (m_registers != NULL)
{
m_nextIP = result;
m_skipIP = m_ip + displace;
}
break;
} // end of 0xFF case
case 0xe8:
{
m_type = WALK_CALL;
UINT32 disp = *((UINT32*)ip);
m_nextIP = ip + 4 + disp;
m_skipIP = ip + 4;
break;
}
case 0xe9:
{
m_type = WALK_BRANCH;
INT32 disp = *((INT32*)ip);
m_nextIP = ip + 4 + disp;
m_skipIP = ip + 4;
break;
}
case 0x9a:
m_type = WALK_CALL;
m_nextIP = (BYTE*) *((UINT32*)ip);
m_skipIP = ip + 4;
break;
case 0xc2:
case 0xc3:
case 0xca:
case 0xcb:
m_type = WALK_RETURN;
break;
default:
break;
}
}
//
// Given a regdisplay and a register number, return the value of the register.
//
DWORD NativeWalker::GetRegisterValue(int registerNumber)
{
// If we're going to decode a register, then we'd better have a valid register set.
PREFIX_ASSUME(m_registers != NULL);
switch (registerNumber)
{
case 0:
return *m_registers->GetEaxLocation();
break;
case 1:
return *m_registers->GetEcxLocation();
break;
case 2:
return *m_registers->GetEdxLocation();
break;
case 3:
return *m_registers->GetEbxLocation();
break;
case 4:
return m_registers->SP;
break;
case 5:
return GetRegdisplayFP(m_registers);
break;
case 6:
return *m_registers->GetEsiLocation();
break;
case 7:
return *m_registers->GetEdiLocation();
break;
default:
_ASSERTE(!"Invalid register number!");
}
return 0;
}
// static
void NativeWalker::DecodeInstructionForPatchSkip(const BYTE *address, InstructionAttribute * pInstrAttrib)
{
//
// Skip instruction prefixes
//
LOG((LF_CORDB, LL_INFO10000, "Patch decode: "));
if (pInstrAttrib == NULL)
return;
const BYTE * origAddr = address;
do
{
switch (*address)
{
// Segment overrides
case 0x26: // ES
case 0x2E: // CS
case 0x36: // SS
case 0x3E: // DS
case 0x64: // FS
case 0x65: // GS
// Size overrides
case 0x66: // Operand-Size
case 0x67: // Address-Size
// Lock
case 0xf0:
// String REP prefixes
case 0xf2: // REPNE/REPNZ
case 0xf3:
LOG((LF_CORDB, LL_INFO10000, "prefix:%0.2x ", *address));
address++;
continue;
default:
break;
}
} while (0);
// There can be at most 4 prefixes.
_ASSERTE(((address - origAddr) <= 4));
//
// Look at opcode to tell if it's a call or an
// absolute branch.
//
pInstrAttrib->Reset();
// Note that we only care about m_cbInstr, m_cbDisp, and m_dwOffsetToDisp for relative branches
// (either call or jump instructions).
switch (*address)
{
case 0xEA: // JMP far
case 0xC2: // RET
case 0xC3: // RET N
pInstrAttrib->m_fIsAbsBranch = true;
LOG((LF_CORDB, LL_INFO10000, "ABS:%0.2x\n", *address));
break;
case 0xE8: // CALL relative
pInstrAttrib->m_fIsCall = true;
pInstrAttrib->m_fIsRelBranch = true;
LOG((LF_CORDB, LL_INFO10000, "CALL REL:%0.2x\n", *address));
address += 1;
pInstrAttrib->m_cbDisp = 4;
break;
case 0xC8: // ENTER
pInstrAttrib->m_fIsCall = true;
pInstrAttrib->m_fIsAbsBranch = true;
LOG((LF_CORDB, LL_INFO10000, "CALL ABS:%0.2x\n", *address));
break;
case 0xFF: // CALL/JMP modr/m
//
// Read opcode modifier from modr/m
//
switch ((address[1]&0x38)>>3)
{
case 2:
case 3:
pInstrAttrib->m_fIsCall = true;
FALLTHROUGH;
case 4:
case 5:
pInstrAttrib->m_fIsAbsBranch = true;
}
LOG((LF_CORDB, LL_INFO10000, "CALL/JMP modr/m:%0.2x\n", *address));
break;
case 0x9A: // CALL ptr16:32
pInstrAttrib->m_fIsCall = true;
pInstrAttrib->m_fIsAbsBranch = true;
break;
case 0xEB: // JMP rel8
pInstrAttrib->m_fIsRelBranch = true;
address += 1;
pInstrAttrib->m_cbDisp = 1;
break;
case 0xE9: // JMP rel32
pInstrAttrib->m_fIsRelBranch = true;
address += 1;
pInstrAttrib->m_cbDisp = 4;
break;
case 0x0F: // Jcc (conditional jump)
// If the second opcode byte is betwen 0x80 and 0x8F, then it's a conditional jump.
// Conditional jumps are always relative.
if ((address[1] & 0xF0) == 0x80)
{
pInstrAttrib->m_fIsCond = true;
pInstrAttrib->m_fIsRelBranch = true;
address += 2; // 2-byte opcode
pInstrAttrib->m_cbDisp = 4;
}
break;
case 0x70:
case 0x71:
case 0x72:
case 0x73:
case 0x74:
case 0x75:
case 0x76:
case 0x77:
case 0x78:
case 0x79:
case 0x7A:
case 0x7B:
case 0x7C:
case 0x7D:
case 0x7E:
case 0x7F: // Jcc (conditional jump)
case 0xE3: // JCXZ/JECXZ (jump on CX/ECX zero)
pInstrAttrib->m_fIsCond = true;
pInstrAttrib->m_fIsRelBranch = true;
address += 1;
pInstrAttrib->m_cbDisp = 1;
break;
default:
LOG((LF_CORDB, LL_INFO10000, "NORMAL:%0.2x\n", *address));
}
// Get additional information for relative branches.
if (pInstrAttrib->m_fIsRelBranch)
{
_ASSERTE(pInstrAttrib->m_cbDisp != 0);
pInstrAttrib->m_dwOffsetToDisp = (address - origAddr);
// Relative jump and call instructions don't use the SIB byte, and there is no immediate value.
// So the instruction size is just the offset to the displacement plus the size of the displacement.
pInstrAttrib->m_cbInstr = pInstrAttrib->m_dwOffsetToDisp + pInstrAttrib->m_cbDisp;
}
}
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/swscanf/test8/test8.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test8.c
**
** Purpose: Tests swscanf with unsigned numbers
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swscanf.h"
PALTEST(c_runtime_swscanf_test8_paltest_swscanf_test8, "c_runtime/swscanf/test8/paltest_swscanf_test8")
{
int n65535 = 65535; /* Walkaround compiler strictness */
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoNumTest(convert("1234d"), convert("%u"), 1234);
DoNumTest(convert("1234d"), convert("%2u"), 12);
DoNumTest(convert("-1"), convert("%u"), -1);
DoNumTest(convert("0x1234"), convert("%u"), 0);
DoNumTest(convert("012"), convert("%u"), 12);
DoShortNumTest(convert("-1"), convert("%hu"), n65535);
DoShortNumTest(convert("65536"), convert("%hu"), 0);
DoNumTest(convert("-1"), convert("%lu"), -1);
DoNumTest(convert("65536"), convert("%lu"), 65536);
DoNumTest(convert("-1"), convert("%Lu"), -1);
DoNumTest(convert("65536"), convert("%Lu"), 65536);
DoI64NumTest(convert("4294967296"), convert("%I64u"), I64(4294967296));
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test8.c
**
** Purpose: Tests swscanf with unsigned numbers
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swscanf.h"
PALTEST(c_runtime_swscanf_test8_paltest_swscanf_test8, "c_runtime/swscanf/test8/paltest_swscanf_test8")
{
int n65535 = 65535; /* Walkaround compiler strictness */
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoNumTest(convert("1234d"), convert("%u"), 1234);
DoNumTest(convert("1234d"), convert("%2u"), 12);
DoNumTest(convert("-1"), convert("%u"), -1);
DoNumTest(convert("0x1234"), convert("%u"), 0);
DoNumTest(convert("012"), convert("%u"), 12);
DoShortNumTest(convert("-1"), convert("%hu"), n65535);
DoShortNumTest(convert("65536"), convert("%hu"), 0);
DoNumTest(convert("-1"), convert("%lu"), -1);
DoNumTest(convert("65536"), convert("%lu"), 65536);
DoNumTest(convert("-1"), convert("%Lu"), -1);
DoNumTest(convert("65536"), convert("%Lu"), 65536);
DoI64NumTest(convert("4294967296"), convert("%I64u"), I64(4294967296));
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/comsynchronizable.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Header: COMSynchronizable.cpp
**
** Purpose: Native methods on System.SynchronizableObject
** and its subclasses.
**
**
===========================================================*/
#include "common.h"
#include <object.h>
#include "threads.h"
#include "excep.h"
#include "vars.hpp"
#include "field.h"
#include "comsynchronizable.h"
#include "dbginterface.h"
#include "comdelegate.h"
#include "eeconfig.h"
#include "callhelpers.h"
#include "appdomain.hpp"
#include "appdomain.inl"
#ifndef TARGET_UNIX
#include "utilcode.h"
#endif
// For the following helpers, we make no attempt to synchronize. The app developer
// is responsible for managing their own race conditions.
//
// Note: if the internal Thread is NULL, this implies that the exposed object has
// finalized and then been resurrected.
static inline BOOL ThreadNotStarted(Thread *t)
{
WRAPPER_NO_CONTRACT;
return (t && t->IsUnstarted() && !t->HasValidThreadHandle());
}
static inline BOOL ThreadIsRunning(Thread *t)
{
WRAPPER_NO_CONTRACT;
return (t &&
(t->m_State & (Thread::TS_ReportDead|Thread::TS_Dead)) == 0 &&
(t->HasValidThreadHandle()));
}
static inline BOOL ThreadIsDead(Thread *t)
{
WRAPPER_NO_CONTRACT;
return (t == 0 || t->IsDead());
}
// Map our exposed notion of thread priorities into the enumeration that NT uses.
static INT32 MapToNTPriority(INT32 ours)
{
CONTRACTL
{
GC_NOTRIGGER;
THROWS;
MODE_ANY;
}
CONTRACTL_END;
INT32 NTPriority = 0;
switch (ours)
{
case ThreadNative::PRIORITY_LOWEST:
NTPriority = THREAD_PRIORITY_LOWEST;
break;
case ThreadNative::PRIORITY_BELOW_NORMAL:
NTPriority = THREAD_PRIORITY_BELOW_NORMAL;
break;
case ThreadNative::PRIORITY_NORMAL:
NTPriority = THREAD_PRIORITY_NORMAL;
break;
case ThreadNative::PRIORITY_ABOVE_NORMAL:
NTPriority = THREAD_PRIORITY_ABOVE_NORMAL;
break;
case ThreadNative::PRIORITY_HIGHEST:
NTPriority = THREAD_PRIORITY_HIGHEST;
break;
default:
COMPlusThrow(kArgumentOutOfRangeException, W("Argument_InvalidFlag"));
}
return NTPriority;
}
// Map to our exposed notion of thread priorities from the enumeration that NT uses.
INT32 MapFromNTPriority(INT32 NTPriority)
{
LIMITED_METHOD_CONTRACT;
INT32 ours = 0;
if (NTPriority <= THREAD_PRIORITY_LOWEST)
{
// managed code does not support IDLE. Map it to PRIORITY_LOWEST.
ours = ThreadNative::PRIORITY_LOWEST;
}
else if (NTPriority >= THREAD_PRIORITY_HIGHEST)
{
ours = ThreadNative::PRIORITY_HIGHEST;
}
else if (NTPriority == THREAD_PRIORITY_BELOW_NORMAL)
{
ours = ThreadNative::PRIORITY_BELOW_NORMAL;
}
else if (NTPriority == THREAD_PRIORITY_NORMAL)
{
ours = ThreadNative::PRIORITY_NORMAL;
}
else if (NTPriority == THREAD_PRIORITY_ABOVE_NORMAL)
{
ours = ThreadNative::PRIORITY_ABOVE_NORMAL;
}
else
{
_ASSERTE (!"not supported priority");
ours = ThreadNative::PRIORITY_NORMAL;
}
return ours;
}
void ThreadNative::KickOffThread_Worker(LPVOID ptr)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
KickOffThread_Args *pKickOffArgs = (KickOffThread_Args *) ptr;
pKickOffArgs->retVal = 0;
PREPARE_NONVIRTUAL_CALLSITE(METHOD__THREAD__START_CALLBACK);
DECLARE_ARGHOLDER_ARRAY(args, 1);
args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(GetThread()->GetExposedObjectRaw());
CALL_MANAGED_METHOD_NORET(args);
}
// Helper to avoid two EX_TRY/EX_CATCH blocks in one function
static void PulseAllHelper(Thread* pThread)
{
CONTRACTL
{
GC_TRIGGERS;
DISABLED(NOTHROW);
MODE_COOPERATIVE;
}
CONTRACTL_END;
EX_TRY
{
// GetExposedObject() will either throw, or we have a valid object. Note
// that we re-acquire it each time, since it may move during calls.
pThread->GetExposedObject()->EnterObjMonitor();
pThread->GetExposedObject()->PulseAll();
pThread->GetExposedObject()->LeaveObjMonitor();
}
EX_CATCH
{
// just keep going...
}
EX_END_CATCH(SwallowAllExceptions)
}
// When an exposed thread is started by Win32, this is where it starts.
ULONG WINAPI ThreadNative::KickOffThread(void* pass)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
}
CONTRACTL_END;
Thread* pThread = (Thread*)pass;
_ASSERTE(pThread != NULL);
if (pThread->HasStarted())
{
// Do not swallow the unhandled exception here
//
// Fire ETW event to correlate with the thread that created current thread
if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadRunning))
FireEtwThreadRunning(pThread, GetClrInstanceId());
// We have a sticky problem here.
//
// Under some circumstances, the context of 'this' doesn't match the context
// of the thread. Today this can only happen if the thread is marked for an
// STA. If so, the delegate that is stored in the object may not be directly
// suitable for invocation. Instead, we need to call through a proxy so that
// the correct context transitions occur.
//
// All the changes occur inside HasStarted(), which will switch this thread
// over to a brand new STA as necessary. We have to notice this happening, so
// we can adjust the delegate we are going to invoke on.
_ASSERTE(GetThread() == pThread); // Now that it's started
KickOffThread_Args args;
args.share = NULL;
args.pThread = pThread;
ManagedThreadBase::KickOff(KickOffThread_Worker, &args);
PulseAllHelper(pThread);
GCX_PREEMP_NO_DTOR();
pThread->ClearThreadCPUGroupAffinity();
DestroyThread(pThread);
}
return 0;
}
extern "C" void QCALLTYPE ThreadNative_Start(QCall::ThreadHandle thread, int threadStackSize, int priority, PCWSTR pThreadName)
{
QCALL_CONTRACT;
BEGIN_QCALL;
ThreadNative::Start(thread, threadStackSize, priority, pThreadName);
END_QCALL;
}
void ThreadNative::Start(Thread* pNewThread, int threadStackSize, int priority, PCWSTR pThreadName)
{
_ASSERTE(pNewThread != NULL);
// Is the thread already started? You can't restart a thread.
if (!ThreadNotStarted(pNewThread))
{
COMPlusThrow(kThreadStateException, W("ThreadState_AlreadyStarted"));
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// Attempt to eagerly set the apartment state during thread startup.
Thread::ApartmentState as = pNewThread->GetExplicitApartment();
if (as == Thread::AS_Unknown)
{
pNewThread->SetApartment(Thread::AS_InMTA);
}
#endif
pNewThread->IncExternalCount();
// Fire an ETW event to mark the current thread as the launcher of the new thread
if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadCreating))
FireEtwThreadCreating(pNewThread, GetClrInstanceId());
// As soon as we create the new thread, it is eligible for suspension, etc.
// So it gets transitioned to cooperative mode before this call returns to
// us. It is our duty to start it running immediately, so that GC isn't blocked.
BOOL success = pNewThread->CreateNewThread(
threadStackSize /* 0 stackSize override*/,
KickOffThread, pNewThread, pThreadName);
if (!success)
{
pNewThread->DecExternalCount(FALSE);
COMPlusThrowOM();
}
// After we have established the thread handle, we can check m_Priority.
// This ordering is required to eliminate the race condition on setting the
// priority of a thread just as it starts up.
pNewThread->SetThreadPriority(MapToNTPriority(priority));
pNewThread->ChooseThreadCPUGroupAffinity();
FastInterlockOr((ULONG *) &pNewThread->m_State, Thread::TS_LegalToJoin);
DWORD ret = pNewThread->StartThread();
// When running under a user mode native debugger there is a race
// between the moment we've created the thread (in CreateNewThread) and
// the moment we resume it (in StartThread); the debugger may receive
// the "ct" (create thread) notification, and it will attempt to
// suspend/resume all threads in the process. Now imagine the debugger
// resumes this thread first, and only later does it try to resume the
// newly created thread. In these conditions our call to ResumeThread
// may come before the debugger's call to ResumeThread actually causing
// ret to equal 2.
// We cannot use IsDebuggerPresent() in the condition below because the
// debugger may have been detached between the time it got the notification
// and the moment we execute the test below.
_ASSERTE(ret == 1 || ret == 2);
// Synchronize with HasStarted.
YIELD_WHILE (!pNewThread->HasThreadState(Thread::TS_FailStarted) &&
pNewThread->HasThreadState(Thread::TS_Unstarted));
if (pNewThread->HasThreadState(Thread::TS_FailStarted))
{
GCX_COOP();
PulseAllHelper(pNewThread);
pNewThread->HandleThreadStartupFailure();
}
}
// Note that you can manipulate the priority of a thread that hasn't started yet,
// or one that is running. But you get an exception if you manipulate the priority
// of a thread that has died.
FCIMPL1(INT32, ThreadNative::GetPriority, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
// validate the handle
if (ThreadIsDead(pThisUNSAFE->GetInternal()))
FCThrowRes(kThreadStateException, W("ThreadState_Dead_Priority"));
return pThisUNSAFE->m_Priority;
}
FCIMPLEND
FCIMPL2(void, ThreadNative::SetPriority, ThreadBaseObject* pThisUNSAFE, INT32 iPriority)
{
FCALL_CONTRACT;
int priority;
Thread *thread;
THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_1(pThis);
if (pThis==NULL)
{
COMPlusThrow(kNullReferenceException, W("NullReference_This"));
}
// translate the priority (validating as well)
priority = MapToNTPriority(iPriority); // can throw; needs a frame
// validate the thread
thread = pThis->GetInternal();
if (ThreadIsDead(thread))
{
COMPlusThrow(kThreadStateException, W("ThreadState_Dead_Priority"));
}
INT32 oldPriority = pThis->m_Priority;
// Eliminate the race condition by establishing m_Priority before we check for if
// the thread is running. See ThreadNative::Start() for the other half.
pThis->m_Priority = iPriority;
if (!thread->SetThreadPriority(priority))
{
pThis->m_Priority = oldPriority;
COMPlusThrow(kThreadStateException, W("ThreadState_SetPriorityFailed"));
}
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
// This service can be called on unstarted and dead threads. For unstarted ones, the
// next wait will be interrupted. For dead ones, this service quietly does nothing.
FCIMPL1(void, ThreadNative::Interrupt, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
Thread *thread = pThisUNSAFE->GetInternal();
if (thread == 0)
FCThrowExVoid(kThreadStateException, IDS_EE_THREAD_CANNOT_GET, NULL, NULL, NULL);
HELPER_METHOD_FRAME_BEGIN_0();
thread->UserInterrupt(Thread::TI_Interrupt);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
FCIMPL1(FC_BOOL_RET, ThreadNative::IsAlive, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
THREADBASEREF thisRef(pThisUNSAFE);
BOOL ret = false;
// Keep managed Thread object alive, since the native object's
// lifetime is tied to the managed object's finalizer. And with
// resurrection, it may be possible to get a dangling pointer here -
// consider both protecting thisRef and setting the managed object's
// Thread* to NULL in the GC's ScanForFinalization method.
HELPER_METHOD_FRAME_BEGIN_RET_1(thisRef);
Thread *thread = thisRef->GetInternal();
if (thread == 0)
COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
ret = ThreadIsRunning(thread);
HELPER_METHOD_POLL();
HELPER_METHOD_FRAME_END();
FC_RETURN_BOOL(ret);
}
FCIMPLEND
FCIMPL2(FC_BOOL_RET, ThreadNative::Join, ThreadBaseObject* pThisUNSAFE, INT32 Timeout)
{
FCALL_CONTRACT;
BOOL retVal = FALSE;
THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
if (pThis==NULL)
COMPlusThrow(kNullReferenceException, W("NullReference_This"));
// validate the timeout
if ((Timeout < 0) && (Timeout != INFINITE_TIMEOUT))
COMPlusThrowArgumentOutOfRange(W("millisecondsTimeout"), W("ArgumentOutOfRange_NeedNonNegOrNegative1"));
retVal = DoJoin(pThis, Timeout);
HELPER_METHOD_FRAME_END();
FC_RETURN_BOOL(retVal);
}
FCIMPLEND
#undef Sleep
FCIMPL1(void, ThreadNative::Sleep, INT32 iTime)
{
FCALL_CONTRACT;
HELPER_METHOD_FRAME_BEGIN_0();
GetThread()->UserSleep(iTime);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
#define Sleep(dwMilliseconds) Dont_Use_Sleep(dwMilliseconds)
extern "C" void QCALLTYPE ThreadNative_UninterruptibleSleep0()
{
QCALL_CONTRACT;
BEGIN_QCALL;
ClrSleepEx(0, false);
END_QCALL;
}
FCIMPL1(INT32, ThreadNative::GetManagedThreadId, ThreadBaseObject* th) {
FCALL_CONTRACT;
FC_GC_POLL_NOT_NEEDED();
if (th == NULL)
FCThrow(kNullReferenceException);
return th->GetManagedThreadId();
}
FCIMPLEND
NOINLINE static Object* GetCurrentThreadHelper()
{
FCALL_CONTRACT;
FC_INNER_PROLOG(ThreadNative::GetCurrentThread);
OBJECTREF refRetVal = NULL;
HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, refRetVal);
refRetVal = GetThread()->GetExposedObject();
HELPER_METHOD_FRAME_END();
FC_INNER_EPILOG();
return OBJECTREFToObject(refRetVal);
}
FCIMPL0(Object*, ThreadNative::GetCurrentThread)
{
FCALL_CONTRACT;
OBJECTHANDLE ExposedObject = GetThread()->m_ExposedObject;
_ASSERTE(ExposedObject != 0); //Thread's constructor always initializes its GCHandle
Object* result = *((Object**) ExposedObject);
if (result != 0)
return result;
FC_INNER_RETURN(Object*, GetCurrentThreadHelper());
}
FCIMPLEND
extern "C" UINT64 QCALLTYPE ThreadNative_GetCurrentOSThreadId()
{
QCALL_CONTRACT;
// The Windows API GetCurrentThreadId returns a 32-bit integer thread ID.
// On some non-Windows platforms (e.g. OSX), the thread ID is a 64-bit value.
// We special case the API for non-Windows to get the 64-bit value and zero-extend
// the Windows value to return a single data type on all platforms.
UINT64 threadId;
BEGIN_QCALL;
#ifndef TARGET_UNIX
threadId = (UINT64) GetCurrentThreadId();
#else
threadId = (UINT64) PAL_GetCurrentOSThreadId();
#endif
END_QCALL;
return threadId;
}
FCIMPL1(void, ThreadNative::Initialize, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_1(pThis);
_ASSERTE(pThis != NULL);
_ASSERTE(pThis->m_InternalThread == NULL);
// if we don't have an internal Thread object associated with this exposed object,
// now is our first opportunity to create one.
Thread *unstarted = SetupUnstartedThread();
PREFIX_ASSUME(unstarted != NULL);
if (GetThread()->GetDomain()->IgnoreUnhandledExceptions())
{
unstarted->SetThreadStateNC(Thread::TSNC_IgnoreUnhandledExceptions);
}
pThis->SetInternal(unstarted);
pThis->SetManagedThreadId(unstarted->GetThreadId());
unstarted->SetExposedObject(pThis);
// Initialize the thread priority to normal.
pThis->SetPriority(ThreadNative::PRIORITY_NORMAL);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
// Set whether or not this is a background thread.
FCIMPL2(void, ThreadNative::SetBackground, ThreadBaseObject* pThisUNSAFE, CLR_BOOL isBackground)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
// validate the thread
Thread *thread = pThisUNSAFE->GetInternal();
if (ThreadIsDead(thread))
FCThrowResVoid(kThreadStateException, W("ThreadState_Dead_State"));
HELPER_METHOD_FRAME_BEGIN_0();
thread->SetBackground(isBackground);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
// Return whether or not this is a background thread.
FCIMPL1(FC_BOOL_RET, ThreadNative::IsBackground, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
// validate the thread
Thread *thread = pThisUNSAFE->GetInternal();
if (ThreadIsDead(thread))
FCThrowRes(kThreadStateException, W("ThreadState_Dead_State"));
FC_RETURN_BOOL(thread->IsBackground());
}
FCIMPLEND
// Deliver the state of the thread as a consistent set of bits.
// This copied in VM\EEDbgInterfaceImpl.h's
// CorDebugUserState GetUserState( Thread *pThread )
// , so propagate changes to both functions
FCIMPL1(INT32, ThreadNative::GetThreadState, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
INT32 res = 0;
Thread::ThreadState state;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
// validate the thread. Failure here implies that the thread was finalized
// and then resurrected.
Thread *thread = pThisUNSAFE->GetInternal();
if (!thread)
FCThrowEx(kThreadStateException, IDS_EE_THREAD_CANNOT_GET, NULL, NULL, NULL);
HELPER_METHOD_FRAME_BEGIN_RET_0();
// grab a snapshot
state = thread->GetSnapshotState();
if (state & Thread::TS_Background)
res |= ThreadBackground;
if (state & Thread::TS_Unstarted)
res |= ThreadUnstarted;
// Don't report a StopRequested if the thread has actually stopped.
if (state & Thread::TS_Dead)
{
res |= ThreadStopped;
}
else
{
if (state & Thread::TS_AbortRequested)
res |= ThreadAbortRequested;
}
if (state & Thread::TS_Interruptible)
res |= ThreadWaitSleepJoin;
HELPER_METHOD_POLL();
HELPER_METHOD_FRAME_END();
return res;
}
FCIMPLEND
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// Indicate whether the thread will host an STA (this may fail if the thread has
// already been made part of the MTA, use GetApartmentState or the return state
// from this routine to check for this).
FCIMPL2(INT32, ThreadNative::SetApartmentState, ThreadBaseObject* pThisUNSAFE, INT32 iState)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
INT32 retVal = ApartmentUnknown;
BOOL ok = TRUE;
THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
// Translate state input. ApartmentUnknown is not an acceptable input state.
// Throw an exception here rather than pass it through to the internal
// routine, which asserts.
Thread::ApartmentState state = Thread::AS_Unknown;
if (iState == ApartmentSTA)
state = Thread::AS_InSTA;
else if (iState == ApartmentMTA)
state = Thread::AS_InMTA;
else if (iState == ApartmentUnknown)
state = Thread::AS_Unknown;
else
COMPlusThrow(kArgumentOutOfRangeException, W("ArgumentOutOfRange_Enum"));
Thread *thread = pThis->GetInternal();
if (!thread)
COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
{
pThis->EnterObjMonitor();
// We can only change the apartment if the thread is unstarted or
// running, and if it's running we have to be in the thread's
// context.
if ((!ThreadNotStarted(thread) && !ThreadIsRunning(thread)) ||
(!ThreadNotStarted(thread) && (GetThread() != thread)))
ok = FALSE;
else
{
EX_TRY
{
state = thread->SetApartment(state);
}
EX_CATCH
{
pThis->LeaveObjMonitor();
EX_RETHROW;
}
EX_END_CATCH_UNREACHABLE;
}
pThis->LeaveObjMonitor();
}
// Now it's safe to throw exceptions again.
if (!ok)
COMPlusThrow(kThreadStateException);
// Translate state back into external form
if (state == Thread::AS_InSTA)
retVal = ApartmentSTA;
else if (state == Thread::AS_InMTA)
retVal = ApartmentMTA;
else if (state == Thread::AS_Unknown)
retVal = ApartmentUnknown;
else
_ASSERTE(!"Invalid state returned from SetApartment");
HELPER_METHOD_FRAME_END();
return retVal;
}
FCIMPLEND
// Return whether the thread hosts an STA, is a member of the MTA or is not
// currently initialized for COM.
FCIMPL1(INT32, ThreadNative::GetApartmentState, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
INT32 retVal = 0;
THREADBASEREF refThis = (THREADBASEREF) ObjectToOBJECTREF(pThisUNSAFE);
HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
if (refThis == NULL)
{
COMPlusThrow(kNullReferenceException, W("NullReference_This"));
}
Thread* thread = refThis->GetInternal();
if (ThreadIsDead(thread))
{
COMPlusThrow(kThreadStateException, W("ThreadState_Dead_State"));
}
Thread::ApartmentState state = thread->GetApartment();
#ifdef FEATURE_COMINTEROP
if (state == Thread::AS_Unknown)
{
// If the CLR hasn't started COM yet, start it up and attempt the call again.
// We do this in order to minimize the number of situations under which we return
// ApartmentState.Unknown to our callers.
if (!g_fComStarted)
{
EnsureComStarted();
state = thread->GetApartment();
}
}
#endif // FEATURE_COMINTEROP
// Translate state into external form
retVal = ApartmentUnknown;
if (state == Thread::AS_InSTA)
{
retVal = ApartmentSTA;
}
else if (state == Thread::AS_InMTA)
{
retVal = ApartmentMTA;
}
else if (state == Thread::AS_Unknown)
{
retVal = ApartmentUnknown;
}
else
{
_ASSERTE(!"Invalid state returned from GetApartment");
}
HELPER_METHOD_FRAME_END();
return retVal;
}
FCIMPLEND
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
void ReleaseThreadExternalCount(Thread * pThread)
{
WRAPPER_NO_CONTRACT;
pThread->DecExternalCount(FALSE);
}
typedef Holder<Thread *, DoNothing, ReleaseThreadExternalCount> ThreadExternalCountHolder;
// Wait for the thread to die
BOOL ThreadNative::DoJoin(THREADBASEREF DyingThread, INT32 timeout)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(DyingThread != NULL);
PRECONDITION((timeout >= 0) || (timeout == INFINITE_TIMEOUT));
}
CONTRACTL_END;
Thread * DyingInternal = DyingThread->GetInternal();
// Validate the handle. It's valid to Join a thread that's not running -- so
// long as it was once started.
if (DyingInternal == 0 ||
!(DyingInternal->m_State & Thread::TS_LegalToJoin))
{
COMPlusThrow(kThreadStateException, W("ThreadState_NotStarted"));
}
// Don't grab the handle until we know it has started, to eliminate the race
// condition.
if (ThreadIsDead(DyingInternal) || !DyingInternal->HasValidThreadHandle())
return TRUE;
DWORD dwTimeOut32 = (timeout == INFINITE_TIMEOUT
? INFINITE
: (DWORD) timeout);
// There is a race here. DyingThread is going to close its thread handle.
// If we grab the handle and then DyingThread closes it, we will wait forever
// in DoAppropriateWait.
int RefCount = DyingInternal->IncExternalCount();
if (RefCount == 1)
{
// !!! We resurrect the Thread Object.
// !!! We will keep the Thread ref count to be 1 so that we will not try
// !!! to destroy the Thread Object again.
// !!! Do not call DecExternalCount here!
_ASSERTE (!DyingInternal->HasValidThreadHandle());
return TRUE;
}
ThreadExternalCountHolder dyingInternalHolder(DyingInternal);
if (!DyingInternal->HasValidThreadHandle())
{
return TRUE;
}
GCX_PREEMP();
DWORD rv = DyingInternal->JoinEx(dwTimeOut32, (WaitMode)(WaitMode_Alertable/*alertable*/|WaitMode_InDeadlock));
switch(rv)
{
case WAIT_OBJECT_0:
return TRUE;
case WAIT_TIMEOUT:
break;
case WAIT_FAILED:
if(!DyingInternal->HasValidThreadHandle())
return TRUE;
break;
default:
_ASSERTE(!"This return code is not understood \n");
break;
}
return FALSE;
}
// If the exposed object is created after-the-fact, for an existing thread, we call
// InitExisting on it. This is the other "construction", as opposed to SetDelegate.
void ThreadBaseObject::InitExisting()
{
CONTRACTL
{
GC_NOTRIGGER;
NOTHROW;
MODE_COOPERATIVE;
}
CONTRACTL_END;
Thread *pThread = GetInternal();
_ASSERTE (pThread);
switch (pThread->GetThreadPriority())
{
case THREAD_PRIORITY_LOWEST:
case THREAD_PRIORITY_IDLE:
m_Priority = ThreadNative::PRIORITY_LOWEST;
break;
case THREAD_PRIORITY_BELOW_NORMAL:
m_Priority = ThreadNative::PRIORITY_BELOW_NORMAL;
break;
case THREAD_PRIORITY_NORMAL:
m_Priority = ThreadNative::PRIORITY_NORMAL;
break;
case THREAD_PRIORITY_ABOVE_NORMAL:
m_Priority = ThreadNative::PRIORITY_ABOVE_NORMAL;
break;
case THREAD_PRIORITY_HIGHEST:
case THREAD_PRIORITY_TIME_CRITICAL:
m_Priority = ThreadNative::PRIORITY_HIGHEST;
break;
case THREAD_PRIORITY_ERROR_RETURN:
_ASSERTE(FALSE);
m_Priority = ThreadNative::PRIORITY_NORMAL;
break;
default:
m_Priority = ThreadNative::PRIORITY_NORMAL;
break;
}
}
FCIMPL1(void, ThreadNative::Finalize, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
// This function is intentionally blank.
// See comment in code:MethodTable::CallFinalizer.
_ASSERTE (!"Should not be called");
FCUnique(0x21);
}
FCIMPLEND
#ifdef FEATURE_COMINTEROP
FCIMPL1(void, ThreadNative::DisableComObjectEagerCleanup, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
_ASSERTE(pThisUNSAFE != NULL);
VALIDATEOBJECT(pThisUNSAFE);
Thread *pThread = pThisUNSAFE->GetInternal();
HELPER_METHOD_FRAME_BEGIN_0();
if (pThread == NULL)
COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
pThread->SetDisableComObjectEagerCleanup();
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
#endif //FEATURE_COMINTEROP
extern "C" void QCALLTYPE ThreadNative_InformThreadNameChange(QCall::ThreadHandle thread, LPCWSTR name, INT32 len)
{
QCALL_CONTRACT;
BEGIN_QCALL;
ThreadNative::InformThreadNameChange(thread, name, len);
END_QCALL;
}
void ThreadNative::InformThreadNameChange(Thread* pThread, LPCWSTR name, INT32 len)
{
// Set on Windows 10 Creators Update and later machines the unmanaged thread name as well. That will show up in ETW traces and debuggers which is very helpful
// if more and more threads get a meaningful name
// Will also show up in Linux in gdb and such.
if (len > 0 && name != NULL && pThread->GetThreadHandle() != INVALID_HANDLE_VALUE)
{
SetThreadName(pThread->GetThreadHandle(), name);
}
#ifdef PROFILING_SUPPORTED
{
BEGIN_PROFILER_CALLBACK(CORProfilerTrackThreads());
if (name == NULL)
{
(&g_profControlBlock)->ThreadNameChanged((ThreadID)pThread, 0, NULL);
}
else
{
(&g_profControlBlock)->ThreadNameChanged((ThreadID)pThread, len, (WCHAR*)name);
}
END_PROFILER_CALLBACK();
}
#endif // PROFILING_SUPPORTED
#ifdef DEBUGGING_SUPPORTED
if (CORDebuggerAttached())
{
_ASSERTE(NULL != g_pDebugInterface);
g_pDebugInterface->NameChangeEvent(NULL, pThread);
}
#endif // DEBUGGING_SUPPORTED
}
extern "C" UINT64 QCALLTYPE ThreadNative_GetProcessDefaultStackSize()
{
QCALL_CONTRACT;
SIZE_T reserve = 0;
SIZE_T commit = 0;
BEGIN_QCALL;
if (!Thread::GetProcessDefaultStackSize(&reserve, &commit))
reserve = 1024 * 1024;
END_QCALL;
return (UINT64)reserve;
}
FCIMPL1(FC_BOOL_RET, ThreadNative::IsThreadpoolThread, ThreadBaseObject* thread)
{
FCALL_CONTRACT;
if (thread==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
Thread *pThread = thread->GetInternal();
if (pThread == NULL)
FCThrowRes(kThreadStateException, W("ThreadState_Dead_State"));
BOOL ret = pThread->IsThreadPoolThread();
FC_GC_POLL_RET();
FC_RETURN_BOOL(ret);
}
FCIMPLEND
FCIMPL1(void, ThreadNative::SetIsThreadpoolThread, ThreadBaseObject* thread)
{
FCALL_CONTRACT;
if (thread == NULL)
FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
Thread *pThread = thread->GetInternal();
if (pThread == NULL)
FCThrowResVoid(kThreadStateException, W("ThreadState_Dead_State"));
pThread->SetIsThreadPoolThread();
}
FCIMPLEND
FCIMPL0(INT32, ThreadNative::GetOptimalMaxSpinWaitsPerSpinIteration)
{
FCALL_CONTRACT;
return (INT32)YieldProcessorNormalization::GetOptimalMaxNormalizedYieldsPerSpinIteration();
}
FCIMPLEND
FCIMPL1(void, ThreadNative::SpinWait, int iterations)
{
FCALL_CONTRACT;
if (iterations <= 0)
{
return;
}
//
// If we're not going to spin for long, it's ok to remain in cooperative mode.
// The threshold is determined by the cost of entering preemptive mode; if we're
// spinning for less than that number of cycles, then switching to preemptive
// mode won't help a GC start any faster.
//
if (iterations <= 100000)
{
YieldProcessorNormalized(iterations);
return;
}
//
// Too many iterations; better switch to preemptive mode to avoid stalling a GC.
//
HELPER_METHOD_FRAME_BEGIN_NOPOLL();
GCX_PREEMP();
YieldProcessorNormalized(iterations);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
extern "C" BOOL QCALLTYPE ThreadNative_YieldThread()
{
QCALL_CONTRACT;
BOOL ret = FALSE;
BEGIN_QCALL
ret = __SwitchToThread(0, CALLER_LIMITS_SPINNING);
END_QCALL
return ret;
}
FCIMPL0(INT32, ThreadNative::GetCurrentProcessorNumber)
{
FCALL_CONTRACT;
#ifndef TARGET_UNIX
PROCESSOR_NUMBER proc_no_cpu_group;
GetCurrentProcessorNumberEx(&proc_no_cpu_group);
return (proc_no_cpu_group.Group << 6) | proc_no_cpu_group.Number;
#else
return ::GetCurrentProcessorNumber();
#endif //!TARGET_UNIX
}
FCIMPLEND;
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Header: COMSynchronizable.cpp
**
** Purpose: Native methods on System.SynchronizableObject
** and its subclasses.
**
**
===========================================================*/
#include "common.h"
#include <object.h>
#include "threads.h"
#include "excep.h"
#include "vars.hpp"
#include "field.h"
#include "comsynchronizable.h"
#include "dbginterface.h"
#include "comdelegate.h"
#include "eeconfig.h"
#include "callhelpers.h"
#include "appdomain.hpp"
#include "appdomain.inl"
#ifndef TARGET_UNIX
#include "utilcode.h"
#endif
// For the following helpers, we make no attempt to synchronize. The app developer
// is responsible for managing their own race conditions.
//
// Note: if the internal Thread is NULL, this implies that the exposed object has
// finalized and then been resurrected.
static inline BOOL ThreadNotStarted(Thread *t)
{
WRAPPER_NO_CONTRACT;
return (t && t->IsUnstarted() && !t->HasValidThreadHandle());
}
static inline BOOL ThreadIsRunning(Thread *t)
{
WRAPPER_NO_CONTRACT;
return (t &&
(t->m_State & (Thread::TS_ReportDead|Thread::TS_Dead)) == 0 &&
(t->HasValidThreadHandle()));
}
static inline BOOL ThreadIsDead(Thread *t)
{
WRAPPER_NO_CONTRACT;
return (t == 0 || t->IsDead());
}
// Map our exposed notion of thread priorities into the enumeration that NT uses.
static INT32 MapToNTPriority(INT32 ours)
{
CONTRACTL
{
GC_NOTRIGGER;
THROWS;
MODE_ANY;
}
CONTRACTL_END;
INT32 NTPriority = 0;
switch (ours)
{
case ThreadNative::PRIORITY_LOWEST:
NTPriority = THREAD_PRIORITY_LOWEST;
break;
case ThreadNative::PRIORITY_BELOW_NORMAL:
NTPriority = THREAD_PRIORITY_BELOW_NORMAL;
break;
case ThreadNative::PRIORITY_NORMAL:
NTPriority = THREAD_PRIORITY_NORMAL;
break;
case ThreadNative::PRIORITY_ABOVE_NORMAL:
NTPriority = THREAD_PRIORITY_ABOVE_NORMAL;
break;
case ThreadNative::PRIORITY_HIGHEST:
NTPriority = THREAD_PRIORITY_HIGHEST;
break;
default:
COMPlusThrow(kArgumentOutOfRangeException, W("Argument_InvalidFlag"));
}
return NTPriority;
}
// Map to our exposed notion of thread priorities from the enumeration that NT uses.
INT32 MapFromNTPriority(INT32 NTPriority)
{
LIMITED_METHOD_CONTRACT;
INT32 ours = 0;
if (NTPriority <= THREAD_PRIORITY_LOWEST)
{
// managed code does not support IDLE. Map it to PRIORITY_LOWEST.
ours = ThreadNative::PRIORITY_LOWEST;
}
else if (NTPriority >= THREAD_PRIORITY_HIGHEST)
{
ours = ThreadNative::PRIORITY_HIGHEST;
}
else if (NTPriority == THREAD_PRIORITY_BELOW_NORMAL)
{
ours = ThreadNative::PRIORITY_BELOW_NORMAL;
}
else if (NTPriority == THREAD_PRIORITY_NORMAL)
{
ours = ThreadNative::PRIORITY_NORMAL;
}
else if (NTPriority == THREAD_PRIORITY_ABOVE_NORMAL)
{
ours = ThreadNative::PRIORITY_ABOVE_NORMAL;
}
else
{
_ASSERTE (!"not supported priority");
ours = ThreadNative::PRIORITY_NORMAL;
}
return ours;
}
void ThreadNative::KickOffThread_Worker(LPVOID ptr)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
KickOffThread_Args *pKickOffArgs = (KickOffThread_Args *) ptr;
pKickOffArgs->retVal = 0;
PREPARE_NONVIRTUAL_CALLSITE(METHOD__THREAD__START_CALLBACK);
DECLARE_ARGHOLDER_ARRAY(args, 1);
args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(GetThread()->GetExposedObjectRaw());
CALL_MANAGED_METHOD_NORET(args);
}
// Helper to avoid two EX_TRY/EX_CATCH blocks in one function
static void PulseAllHelper(Thread* pThread)
{
CONTRACTL
{
GC_TRIGGERS;
DISABLED(NOTHROW);
MODE_COOPERATIVE;
}
CONTRACTL_END;
EX_TRY
{
// GetExposedObject() will either throw, or we have a valid object. Note
// that we re-acquire it each time, since it may move during calls.
pThread->GetExposedObject()->EnterObjMonitor();
pThread->GetExposedObject()->PulseAll();
pThread->GetExposedObject()->LeaveObjMonitor();
}
EX_CATCH
{
// just keep going...
}
EX_END_CATCH(SwallowAllExceptions)
}
// When an exposed thread is started by Win32, this is where it starts.
ULONG WINAPI ThreadNative::KickOffThread(void* pass)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
}
CONTRACTL_END;
Thread* pThread = (Thread*)pass;
_ASSERTE(pThread != NULL);
if (pThread->HasStarted())
{
// Do not swallow the unhandled exception here
//
// Fire ETW event to correlate with the thread that created current thread
if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadRunning))
FireEtwThreadRunning(pThread, GetClrInstanceId());
// We have a sticky problem here.
//
// Under some circumstances, the context of 'this' doesn't match the context
// of the thread. Today this can only happen if the thread is marked for an
// STA. If so, the delegate that is stored in the object may not be directly
// suitable for invocation. Instead, we need to call through a proxy so that
// the correct context transitions occur.
//
// All the changes occur inside HasStarted(), which will switch this thread
// over to a brand new STA as necessary. We have to notice this happening, so
// we can adjust the delegate we are going to invoke on.
_ASSERTE(GetThread() == pThread); // Now that it's started
KickOffThread_Args args;
args.share = NULL;
args.pThread = pThread;
ManagedThreadBase::KickOff(KickOffThread_Worker, &args);
PulseAllHelper(pThread);
GCX_PREEMP_NO_DTOR();
pThread->ClearThreadCPUGroupAffinity();
DestroyThread(pThread);
}
return 0;
}
extern "C" void QCALLTYPE ThreadNative_Start(QCall::ThreadHandle thread, int threadStackSize, int priority, PCWSTR pThreadName)
{
QCALL_CONTRACT;
BEGIN_QCALL;
ThreadNative::Start(thread, threadStackSize, priority, pThreadName);
END_QCALL;
}
void ThreadNative::Start(Thread* pNewThread, int threadStackSize, int priority, PCWSTR pThreadName)
{
_ASSERTE(pNewThread != NULL);
// Is the thread already started? You can't restart a thread.
if (!ThreadNotStarted(pNewThread))
{
COMPlusThrow(kThreadStateException, W("ThreadState_AlreadyStarted"));
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// Attempt to eagerly set the apartment state during thread startup.
Thread::ApartmentState as = pNewThread->GetExplicitApartment();
if (as == Thread::AS_Unknown)
{
pNewThread->SetApartment(Thread::AS_InMTA);
}
#endif
pNewThread->IncExternalCount();
// Fire an ETW event to mark the current thread as the launcher of the new thread
if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadCreating))
FireEtwThreadCreating(pNewThread, GetClrInstanceId());
// As soon as we create the new thread, it is eligible for suspension, etc.
// So it gets transitioned to cooperative mode before this call returns to
// us. It is our duty to start it running immediately, so that GC isn't blocked.
BOOL success = pNewThread->CreateNewThread(
threadStackSize /* 0 stackSize override*/,
KickOffThread, pNewThread, pThreadName);
if (!success)
{
pNewThread->DecExternalCount(FALSE);
COMPlusThrowOM();
}
// After we have established the thread handle, we can check m_Priority.
// This ordering is required to eliminate the race condition on setting the
// priority of a thread just as it starts up.
pNewThread->SetThreadPriority(MapToNTPriority(priority));
pNewThread->ChooseThreadCPUGroupAffinity();
FastInterlockOr((ULONG *) &pNewThread->m_State, Thread::TS_LegalToJoin);
DWORD ret = pNewThread->StartThread();
// When running under a user mode native debugger there is a race
// between the moment we've created the thread (in CreateNewThread) and
// the moment we resume it (in StartThread); the debugger may receive
// the "ct" (create thread) notification, and it will attempt to
// suspend/resume all threads in the process. Now imagine the debugger
// resumes this thread first, and only later does it try to resume the
// newly created thread. In these conditions our call to ResumeThread
// may come before the debugger's call to ResumeThread actually causing
// ret to equal 2.
// We cannot use IsDebuggerPresent() in the condition below because the
// debugger may have been detached between the time it got the notification
// and the moment we execute the test below.
_ASSERTE(ret == 1 || ret == 2);
// Synchronize with HasStarted.
YIELD_WHILE (!pNewThread->HasThreadState(Thread::TS_FailStarted) &&
pNewThread->HasThreadState(Thread::TS_Unstarted));
if (pNewThread->HasThreadState(Thread::TS_FailStarted))
{
GCX_COOP();
PulseAllHelper(pNewThread);
pNewThread->HandleThreadStartupFailure();
}
}
// Note that you can manipulate the priority of a thread that hasn't started yet,
// or one that is running. But you get an exception if you manipulate the priority
// of a thread that has died.
FCIMPL1(INT32, ThreadNative::GetPriority, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
// validate the handle
if (ThreadIsDead(pThisUNSAFE->GetInternal()))
FCThrowRes(kThreadStateException, W("ThreadState_Dead_Priority"));
return pThisUNSAFE->m_Priority;
}
FCIMPLEND
FCIMPL2(void, ThreadNative::SetPriority, ThreadBaseObject* pThisUNSAFE, INT32 iPriority)
{
FCALL_CONTRACT;
int priority;
Thread *thread;
THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_1(pThis);
if (pThis==NULL)
{
COMPlusThrow(kNullReferenceException, W("NullReference_This"));
}
// translate the priority (validating as well)
priority = MapToNTPriority(iPriority); // can throw; needs a frame
// validate the thread
thread = pThis->GetInternal();
if (ThreadIsDead(thread))
{
COMPlusThrow(kThreadStateException, W("ThreadState_Dead_Priority"));
}
INT32 oldPriority = pThis->m_Priority;
// Eliminate the race condition by establishing m_Priority before we check for if
// the thread is running. See ThreadNative::Start() for the other half.
pThis->m_Priority = iPriority;
if (!thread->SetThreadPriority(priority))
{
pThis->m_Priority = oldPriority;
COMPlusThrow(kThreadStateException, W("ThreadState_SetPriorityFailed"));
}
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
// This service can be called on unstarted and dead threads. For unstarted ones, the
// next wait will be interrupted. For dead ones, this service quietly does nothing.
FCIMPL1(void, ThreadNative::Interrupt, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
Thread *thread = pThisUNSAFE->GetInternal();
if (thread == 0)
FCThrowExVoid(kThreadStateException, IDS_EE_THREAD_CANNOT_GET, NULL, NULL, NULL);
HELPER_METHOD_FRAME_BEGIN_0();
thread->UserInterrupt(Thread::TI_Interrupt);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
FCIMPL1(FC_BOOL_RET, ThreadNative::IsAlive, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
THREADBASEREF thisRef(pThisUNSAFE);
BOOL ret = false;
// Keep managed Thread object alive, since the native object's
// lifetime is tied to the managed object's finalizer. And with
// resurrection, it may be possible to get a dangling pointer here -
// consider both protecting thisRef and setting the managed object's
// Thread* to NULL in the GC's ScanForFinalization method.
HELPER_METHOD_FRAME_BEGIN_RET_1(thisRef);
Thread *thread = thisRef->GetInternal();
if (thread == 0)
COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
ret = ThreadIsRunning(thread);
HELPER_METHOD_POLL();
HELPER_METHOD_FRAME_END();
FC_RETURN_BOOL(ret);
}
FCIMPLEND
FCIMPL2(FC_BOOL_RET, ThreadNative::Join, ThreadBaseObject* pThisUNSAFE, INT32 Timeout)
{
FCALL_CONTRACT;
BOOL retVal = FALSE;
THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
if (pThis==NULL)
COMPlusThrow(kNullReferenceException, W("NullReference_This"));
// validate the timeout
if ((Timeout < 0) && (Timeout != INFINITE_TIMEOUT))
COMPlusThrowArgumentOutOfRange(W("millisecondsTimeout"), W("ArgumentOutOfRange_NeedNonNegOrNegative1"));
retVal = DoJoin(pThis, Timeout);
HELPER_METHOD_FRAME_END();
FC_RETURN_BOOL(retVal);
}
FCIMPLEND
#undef Sleep
FCIMPL1(void, ThreadNative::Sleep, INT32 iTime)
{
FCALL_CONTRACT;
HELPER_METHOD_FRAME_BEGIN_0();
GetThread()->UserSleep(iTime);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
#define Sleep(dwMilliseconds) Dont_Use_Sleep(dwMilliseconds)
extern "C" void QCALLTYPE ThreadNative_UninterruptibleSleep0()
{
QCALL_CONTRACT;
BEGIN_QCALL;
ClrSleepEx(0, false);
END_QCALL;
}
FCIMPL1(INT32, ThreadNative::GetManagedThreadId, ThreadBaseObject* th) {
FCALL_CONTRACT;
FC_GC_POLL_NOT_NEEDED();
if (th == NULL)
FCThrow(kNullReferenceException);
return th->GetManagedThreadId();
}
FCIMPLEND
NOINLINE static Object* GetCurrentThreadHelper()
{
FCALL_CONTRACT;
FC_INNER_PROLOG(ThreadNative::GetCurrentThread);
OBJECTREF refRetVal = NULL;
HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, refRetVal);
refRetVal = GetThread()->GetExposedObject();
HELPER_METHOD_FRAME_END();
FC_INNER_EPILOG();
return OBJECTREFToObject(refRetVal);
}
FCIMPL0(Object*, ThreadNative::GetCurrentThread)
{
FCALL_CONTRACT;
OBJECTHANDLE ExposedObject = GetThread()->m_ExposedObject;
_ASSERTE(ExposedObject != 0); //Thread's constructor always initializes its GCHandle
Object* result = *((Object**) ExposedObject);
if (result != 0)
return result;
FC_INNER_RETURN(Object*, GetCurrentThreadHelper());
}
FCIMPLEND
extern "C" UINT64 QCALLTYPE ThreadNative_GetCurrentOSThreadId()
{
QCALL_CONTRACT;
// The Windows API GetCurrentThreadId returns a 32-bit integer thread ID.
// On some non-Windows platforms (e.g. OSX), the thread ID is a 64-bit value.
// We special case the API for non-Windows to get the 64-bit value and zero-extend
// the Windows value to return a single data type on all platforms.
UINT64 threadId;
BEGIN_QCALL;
#ifndef TARGET_UNIX
threadId = (UINT64) GetCurrentThreadId();
#else
threadId = (UINT64) PAL_GetCurrentOSThreadId();
#endif
END_QCALL;
return threadId;
}
FCIMPL1(void, ThreadNative::Initialize, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_1(pThis);
_ASSERTE(pThis != NULL);
_ASSERTE(pThis->m_InternalThread == NULL);
// if we don't have an internal Thread object associated with this exposed object,
// now is our first opportunity to create one.
Thread *unstarted = SetupUnstartedThread();
PREFIX_ASSUME(unstarted != NULL);
if (GetThread()->GetDomain()->IgnoreUnhandledExceptions())
{
unstarted->SetThreadStateNC(Thread::TSNC_IgnoreUnhandledExceptions);
}
pThis->SetInternal(unstarted);
pThis->SetManagedThreadId(unstarted->GetThreadId());
unstarted->SetExposedObject(pThis);
// Initialize the thread priority to normal.
pThis->SetPriority(ThreadNative::PRIORITY_NORMAL);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
// Set whether or not this is a background thread.
FCIMPL2(void, ThreadNative::SetBackground, ThreadBaseObject* pThisUNSAFE, CLR_BOOL isBackground)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
// validate the thread
Thread *thread = pThisUNSAFE->GetInternal();
if (ThreadIsDead(thread))
FCThrowResVoid(kThreadStateException, W("ThreadState_Dead_State"));
HELPER_METHOD_FRAME_BEGIN_0();
thread->SetBackground(isBackground);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
// Return whether or not this is a background thread.
FCIMPL1(FC_BOOL_RET, ThreadNative::IsBackground, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
// validate the thread
Thread *thread = pThisUNSAFE->GetInternal();
if (ThreadIsDead(thread))
FCThrowRes(kThreadStateException, W("ThreadState_Dead_State"));
FC_RETURN_BOOL(thread->IsBackground());
}
FCIMPLEND
// Deliver the state of the thread as a consistent set of bits.
// This copied in VM\EEDbgInterfaceImpl.h's
// CorDebugUserState GetUserState( Thread *pThread )
// , so propagate changes to both functions
FCIMPL1(INT32, ThreadNative::GetThreadState, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
INT32 res = 0;
Thread::ThreadState state;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
// validate the thread. Failure here implies that the thread was finalized
// and then resurrected.
Thread *thread = pThisUNSAFE->GetInternal();
if (!thread)
FCThrowEx(kThreadStateException, IDS_EE_THREAD_CANNOT_GET, NULL, NULL, NULL);
HELPER_METHOD_FRAME_BEGIN_RET_0();
// grab a snapshot
state = thread->GetSnapshotState();
if (state & Thread::TS_Background)
res |= ThreadBackground;
if (state & Thread::TS_Unstarted)
res |= ThreadUnstarted;
// Don't report a StopRequested if the thread has actually stopped.
if (state & Thread::TS_Dead)
{
res |= ThreadStopped;
}
else
{
if (state & Thread::TS_AbortRequested)
res |= ThreadAbortRequested;
}
if (state & Thread::TS_Interruptible)
res |= ThreadWaitSleepJoin;
HELPER_METHOD_POLL();
HELPER_METHOD_FRAME_END();
return res;
}
FCIMPLEND
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// Indicate whether the thread will host an STA (this may fail if the thread has
// already been made part of the MTA, use GetApartmentState or the return state
// from this routine to check for this).
FCIMPL2(INT32, ThreadNative::SetApartmentState, ThreadBaseObject* pThisUNSAFE, INT32 iState)
{
FCALL_CONTRACT;
if (pThisUNSAFE==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
INT32 retVal = ApartmentUnknown;
BOOL ok = TRUE;
THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
// Translate state input. ApartmentUnknown is not an acceptable input state.
// Throw an exception here rather than pass it through to the internal
// routine, which asserts.
Thread::ApartmentState state = Thread::AS_Unknown;
if (iState == ApartmentSTA)
state = Thread::AS_InSTA;
else if (iState == ApartmentMTA)
state = Thread::AS_InMTA;
else if (iState == ApartmentUnknown)
state = Thread::AS_Unknown;
else
COMPlusThrow(kArgumentOutOfRangeException, W("ArgumentOutOfRange_Enum"));
Thread *thread = pThis->GetInternal();
if (!thread)
COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
{
pThis->EnterObjMonitor();
// We can only change the apartment if the thread is unstarted or
// running, and if it's running we have to be in the thread's
// context.
if ((!ThreadNotStarted(thread) && !ThreadIsRunning(thread)) ||
(!ThreadNotStarted(thread) && (GetThread() != thread)))
ok = FALSE;
else
{
EX_TRY
{
state = thread->SetApartment(state);
}
EX_CATCH
{
pThis->LeaveObjMonitor();
EX_RETHROW;
}
EX_END_CATCH_UNREACHABLE;
}
pThis->LeaveObjMonitor();
}
// Now it's safe to throw exceptions again.
if (!ok)
COMPlusThrow(kThreadStateException);
// Translate state back into external form
if (state == Thread::AS_InSTA)
retVal = ApartmentSTA;
else if (state == Thread::AS_InMTA)
retVal = ApartmentMTA;
else if (state == Thread::AS_Unknown)
retVal = ApartmentUnknown;
else
_ASSERTE(!"Invalid state returned from SetApartment");
HELPER_METHOD_FRAME_END();
return retVal;
}
FCIMPLEND
// Return whether the thread hosts an STA, is a member of the MTA or is not
// currently initialized for COM.
FCIMPL1(INT32, ThreadNative::GetApartmentState, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
INT32 retVal = 0;
THREADBASEREF refThis = (THREADBASEREF) ObjectToOBJECTREF(pThisUNSAFE);
HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
if (refThis == NULL)
{
COMPlusThrow(kNullReferenceException, W("NullReference_This"));
}
Thread* thread = refThis->GetInternal();
if (ThreadIsDead(thread))
{
COMPlusThrow(kThreadStateException, W("ThreadState_Dead_State"));
}
Thread::ApartmentState state = thread->GetApartment();
#ifdef FEATURE_COMINTEROP
if (state == Thread::AS_Unknown)
{
// If the CLR hasn't started COM yet, start it up and attempt the call again.
// We do this in order to minimize the number of situations under which we return
// ApartmentState.Unknown to our callers.
if (!g_fComStarted)
{
EnsureComStarted();
state = thread->GetApartment();
}
}
#endif // FEATURE_COMINTEROP
// Translate state into external form
retVal = ApartmentUnknown;
if (state == Thread::AS_InSTA)
{
retVal = ApartmentSTA;
}
else if (state == Thread::AS_InMTA)
{
retVal = ApartmentMTA;
}
else if (state == Thread::AS_Unknown)
{
retVal = ApartmentUnknown;
}
else
{
_ASSERTE(!"Invalid state returned from GetApartment");
}
HELPER_METHOD_FRAME_END();
return retVal;
}
FCIMPLEND
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
void ReleaseThreadExternalCount(Thread * pThread)
{
WRAPPER_NO_CONTRACT;
pThread->DecExternalCount(FALSE);
}
typedef Holder<Thread *, DoNothing, ReleaseThreadExternalCount> ThreadExternalCountHolder;
// Wait for the thread to die
BOOL ThreadNative::DoJoin(THREADBASEREF DyingThread, INT32 timeout)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(DyingThread != NULL);
PRECONDITION((timeout >= 0) || (timeout == INFINITE_TIMEOUT));
}
CONTRACTL_END;
Thread * DyingInternal = DyingThread->GetInternal();
// Validate the handle. It's valid to Join a thread that's not running -- so
// long as it was once started.
if (DyingInternal == 0 ||
!(DyingInternal->m_State & Thread::TS_LegalToJoin))
{
COMPlusThrow(kThreadStateException, W("ThreadState_NotStarted"));
}
// Don't grab the handle until we know it has started, to eliminate the race
// condition.
if (ThreadIsDead(DyingInternal) || !DyingInternal->HasValidThreadHandle())
return TRUE;
DWORD dwTimeOut32 = (timeout == INFINITE_TIMEOUT
? INFINITE
: (DWORD) timeout);
// There is a race here. DyingThread is going to close its thread handle.
// If we grab the handle and then DyingThread closes it, we will wait forever
// in DoAppropriateWait.
int RefCount = DyingInternal->IncExternalCount();
if (RefCount == 1)
{
// !!! We resurrect the Thread Object.
// !!! We will keep the Thread ref count to be 1 so that we will not try
// !!! to destroy the Thread Object again.
// !!! Do not call DecExternalCount here!
_ASSERTE (!DyingInternal->HasValidThreadHandle());
return TRUE;
}
ThreadExternalCountHolder dyingInternalHolder(DyingInternal);
if (!DyingInternal->HasValidThreadHandle())
{
return TRUE;
}
GCX_PREEMP();
DWORD rv = DyingInternal->JoinEx(dwTimeOut32, (WaitMode)(WaitMode_Alertable/*alertable*/|WaitMode_InDeadlock));
switch(rv)
{
case WAIT_OBJECT_0:
return TRUE;
case WAIT_TIMEOUT:
break;
case WAIT_FAILED:
if(!DyingInternal->HasValidThreadHandle())
return TRUE;
break;
default:
_ASSERTE(!"This return code is not understood \n");
break;
}
return FALSE;
}
// If the exposed object is created after-the-fact, for an existing thread, we call
// InitExisting on it. This is the other "construction", as opposed to SetDelegate.
void ThreadBaseObject::InitExisting()
{
CONTRACTL
{
GC_NOTRIGGER;
NOTHROW;
MODE_COOPERATIVE;
}
CONTRACTL_END;
Thread *pThread = GetInternal();
_ASSERTE (pThread);
switch (pThread->GetThreadPriority())
{
case THREAD_PRIORITY_LOWEST:
case THREAD_PRIORITY_IDLE:
m_Priority = ThreadNative::PRIORITY_LOWEST;
break;
case THREAD_PRIORITY_BELOW_NORMAL:
m_Priority = ThreadNative::PRIORITY_BELOW_NORMAL;
break;
case THREAD_PRIORITY_NORMAL:
m_Priority = ThreadNative::PRIORITY_NORMAL;
break;
case THREAD_PRIORITY_ABOVE_NORMAL:
m_Priority = ThreadNative::PRIORITY_ABOVE_NORMAL;
break;
case THREAD_PRIORITY_HIGHEST:
case THREAD_PRIORITY_TIME_CRITICAL:
m_Priority = ThreadNative::PRIORITY_HIGHEST;
break;
case THREAD_PRIORITY_ERROR_RETURN:
_ASSERTE(FALSE);
m_Priority = ThreadNative::PRIORITY_NORMAL;
break;
default:
m_Priority = ThreadNative::PRIORITY_NORMAL;
break;
}
}
FCIMPL1(void, ThreadNative::Finalize, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
// This function is intentionally blank.
// See comment in code:MethodTable::CallFinalizer.
_ASSERTE (!"Should not be called");
FCUnique(0x21);
}
FCIMPLEND
#ifdef FEATURE_COMINTEROP
FCIMPL1(void, ThreadNative::DisableComObjectEagerCleanup, ThreadBaseObject* pThisUNSAFE)
{
FCALL_CONTRACT;
_ASSERTE(pThisUNSAFE != NULL);
VALIDATEOBJECT(pThisUNSAFE);
Thread *pThread = pThisUNSAFE->GetInternal();
HELPER_METHOD_FRAME_BEGIN_0();
if (pThread == NULL)
COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
pThread->SetDisableComObjectEagerCleanup();
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
#endif //FEATURE_COMINTEROP
extern "C" void QCALLTYPE ThreadNative_InformThreadNameChange(QCall::ThreadHandle thread, LPCWSTR name, INT32 len)
{
QCALL_CONTRACT;
BEGIN_QCALL;
ThreadNative::InformThreadNameChange(thread, name, len);
END_QCALL;
}
void ThreadNative::InformThreadNameChange(Thread* pThread, LPCWSTR name, INT32 len)
{
// Set on Windows 10 Creators Update and later machines the unmanaged thread name as well. That will show up in ETW traces and debuggers which is very helpful
// if more and more threads get a meaningful name
// Will also show up in Linux in gdb and such.
if (len > 0 && name != NULL && pThread->GetThreadHandle() != INVALID_HANDLE_VALUE)
{
SetThreadName(pThread->GetThreadHandle(), name);
}
#ifdef PROFILING_SUPPORTED
{
BEGIN_PROFILER_CALLBACK(CORProfilerTrackThreads());
if (name == NULL)
{
(&g_profControlBlock)->ThreadNameChanged((ThreadID)pThread, 0, NULL);
}
else
{
(&g_profControlBlock)->ThreadNameChanged((ThreadID)pThread, len, (WCHAR*)name);
}
END_PROFILER_CALLBACK();
}
#endif // PROFILING_SUPPORTED
#ifdef DEBUGGING_SUPPORTED
if (CORDebuggerAttached())
{
_ASSERTE(NULL != g_pDebugInterface);
g_pDebugInterface->NameChangeEvent(NULL, pThread);
}
#endif // DEBUGGING_SUPPORTED
}
extern "C" UINT64 QCALLTYPE ThreadNative_GetProcessDefaultStackSize()
{
QCALL_CONTRACT;
SIZE_T reserve = 0;
SIZE_T commit = 0;
BEGIN_QCALL;
if (!Thread::GetProcessDefaultStackSize(&reserve, &commit))
reserve = 1024 * 1024;
END_QCALL;
return (UINT64)reserve;
}
FCIMPL1(FC_BOOL_RET, ThreadNative::IsThreadpoolThread, ThreadBaseObject* thread)
{
FCALL_CONTRACT;
if (thread==NULL)
FCThrowRes(kNullReferenceException, W("NullReference_This"));
Thread *pThread = thread->GetInternal();
if (pThread == NULL)
FCThrowRes(kThreadStateException, W("ThreadState_Dead_State"));
BOOL ret = pThread->IsThreadPoolThread();
FC_GC_POLL_RET();
FC_RETURN_BOOL(ret);
}
FCIMPLEND
FCIMPL1(void, ThreadNative::SetIsThreadpoolThread, ThreadBaseObject* thread)
{
FCALL_CONTRACT;
if (thread == NULL)
FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
Thread *pThread = thread->GetInternal();
if (pThread == NULL)
FCThrowResVoid(kThreadStateException, W("ThreadState_Dead_State"));
pThread->SetIsThreadPoolThread();
}
FCIMPLEND
FCIMPL0(INT32, ThreadNative::GetOptimalMaxSpinWaitsPerSpinIteration)
{
FCALL_CONTRACT;
return (INT32)YieldProcessorNormalization::GetOptimalMaxNormalizedYieldsPerSpinIteration();
}
FCIMPLEND
FCIMPL1(void, ThreadNative::SpinWait, int iterations)
{
FCALL_CONTRACT;
if (iterations <= 0)
{
return;
}
//
// If we're not going to spin for long, it's ok to remain in cooperative mode.
// The threshold is determined by the cost of entering preemptive mode; if we're
// spinning for less than that number of cycles, then switching to preemptive
// mode won't help a GC start any faster.
//
if (iterations <= 100000)
{
YieldProcessorNormalized(iterations);
return;
}
//
// Too many iterations; better switch to preemptive mode to avoid stalling a GC.
//
HELPER_METHOD_FRAME_BEGIN_NOPOLL();
GCX_PREEMP();
YieldProcessorNormalized(iterations);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
extern "C" BOOL QCALLTYPE ThreadNative_YieldThread()
{
QCALL_CONTRACT;
BOOL ret = FALSE;
BEGIN_QCALL
ret = __SwitchToThread(0, CALLER_LIMITS_SPINNING);
END_QCALL
return ret;
}
FCIMPL0(INT32, ThreadNative::GetCurrentProcessorNumber)
{
FCALL_CONTRACT;
#ifndef TARGET_UNIX
PROCESSOR_NUMBER proc_no_cpu_group;
GetCurrentProcessorNumberEx(&proc_no_cpu_group);
return (proc_no_cpu_group.Group << 6) | proc_no_cpu_group.Number;
#else
return ::GetCurrentProcessorNumber();
#endif //!TARGET_UNIX
}
FCIMPLEND;
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/inc/rt/poppack.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: poppack.h
//
// ===========================================================================
/*
Abstract:
This file turns packing of structures off. (That is, it enables
automatic alignment of structure fields.) An include file is needed
because various compilers do this in different ways.
poppack.h is the complement to pshpack?.h. An inclusion of poppack.h
MUST ALWAYS be preceded by an inclusion of one of pshpack?.h, in one-to-one
correspondence.
For Microsoft compatible compilers, this file uses the pop option
to the pack pragma so that it can restore the previous saved by the
pshpack?.h include file.
*/
#if ! (defined(lint) || defined(RC_INVOKED))
#if ( _MSC_VER >= 800 && !defined(_M_I86)) || defined(_PUSHPOP_SUPPORTED)
#pragma warning(disable:4103)
#if !(defined( MIDL_PASS )) || defined( __midl )
#pragma pack(pop)
#else
#pragma pack()
#endif
#else
#pragma pack()
#endif
#endif // ! (defined(lint) || defined(RC_INVOKED))
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: poppack.h
//
// ===========================================================================
/*
Abstract:
This file turns packing of structures off. (That is, it enables
automatic alignment of structure fields.) An include file is needed
because various compilers do this in different ways.
poppack.h is the complement to pshpack?.h. An inclusion of poppack.h
MUST ALWAYS be preceded by an inclusion of one of pshpack?.h, in one-to-one
correspondence.
For Microsoft compatible compilers, this file uses the pop option
to the pack pragma so that it can restore the previous saved by the
pshpack?.h include file.
*/
#if ! (defined(lint) || defined(RC_INVOKED))
#if ( _MSC_VER >= 800 && !defined(_M_I86)) || defined(_PUSHPOP_SUPPORTED)
#pragma warning(disable:4103)
#if !(defined( MIDL_PASS )) || defined( __midl )
#pragma pack(pop)
#else
#pragma pack()
#endif
#else
#pragma pack()
#endif
#endif // ! (defined(lint) || defined(RC_INVOKED))
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/tools/metainfo/mdobj.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <ctype.h>
#include <crtdbg.h>
#include "mdinfo.h"
#ifndef STRING_BUFFER_LEN
#define STRING_BUFFER_LEN 4096
#endif
#define OBJ_EXT ".obj"
#define OBJ_EXT_W W(".obj")
#define OBJ_EXT_LEN 4
#define LIB_EXT ".lib"
#define LIB_EXT_W W(".lib")
#define LIB_EXT_LEN 4
extern IMetaDataDispenserEx *g_pDisp;
extern DWORD g_ValModuleType;
// This function is copied from peparse.c file. Making this static, so we won't end up with
// duplicate definitions causing confusion.
static const char g_szCORMETA[] = ".cormeta";
static HRESULT FindObjMetaData(PVOID pImage, PVOID *ppMetaData, long *pcbMetaData)
{
IMAGE_FILE_HEADER *pImageHdr; // Header for the .obj file.
IMAGE_SECTION_HEADER *pSectionHdr; // Section header.
WORD i; // Loop control.
// Get a pointer to the header and the first section.
pImageHdr = (IMAGE_FILE_HEADER *) pImage;
pSectionHdr = (IMAGE_SECTION_HEADER *)(pImageHdr + 1);
// Avoid confusion.
*ppMetaData = NULL;
*pcbMetaData = 0;
// Walk each section looking for .cormeta.
for (i=0; i<VAL16(pImageHdr->NumberOfSections); i++, pSectionHdr++)
{
// Simple comparison to section name.
if (strcmp((const char *) pSectionHdr->Name, g_szCORMETA) == 0)
{
*pcbMetaData = VAL32(pSectionHdr->SizeOfRawData);
*ppMetaData = (void *) ((UINT_PTR)pImage + VAL32(pSectionHdr->PointerToRawData));
break;
}
}
// Check for errors.
if (*ppMetaData == NULL || *pcbMetaData == 0)
return (E_FAIL);
return (S_OK);
}
// This function returns the address to the MapView of file and file size.
void GetMapViewOfFile(_In_ WCHAR *szFile, PBYTE *ppbMap, DWORD *pdwFileSize)
{
HANDLE hMapFile;
DWORD dwHighSize;
HANDLE hFile = WszCreateFile(szFile,
GENERIC_READ,
FILE_SHARE_READ,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN,
NULL);
if (hFile == INVALID_HANDLE_VALUE)
MDInfo::Error("CreateFileA failed!");
*pdwFileSize = GetFileSize(hFile, &dwHighSize);
if ((*pdwFileSize == 0xFFFFFFFF) && (GetLastError() != NO_ERROR))
{
CloseHandle(hFile);
MDInfo::Error("GetFileSize failed!");
}
_ASSERTE(dwHighSize == 0);
hMapFile = CreateFileMappingW(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
CloseHandle(hFile);
if (!hMapFile)
MDInfo::Error("CreateFileMappingW failed!");
*ppbMap = (PBYTE) MapViewOfFile(hMapFile, FILE_MAP_READ, 0, 0, 0);
CloseHandle(hMapFile);
if (!*ppbMap)
MDInfo::Error("MapViewOfFile failed!");
} // void GetMapViewOfFile()
// This function skips a member given the pointer to the member header
// and returns a pointer to the next header.
PBYTE SkipMember(PBYTE pbMapAddress)
{
PIMAGE_ARCHIVE_MEMBER_HEADER pMemHdr;
ULONG ulMemSize;
int j;
pMemHdr = (PIMAGE_ARCHIVE_MEMBER_HEADER)pbMapAddress;
// Get size of the member.
ulMemSize = 0;
for (j = 0; j < 10; j++)
{
if (pMemHdr->Size[j] < '0' || pMemHdr->Size[j] > '9')
break;
else
ulMemSize = ulMemSize * 10 + pMemHdr->Size[j] - '0';
}
// Skip past the header.
pbMapAddress += IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR + ulMemSize;
// Find the next even address if the current one is not even.
if ((ULONG_PTR)pbMapAddress % 2)
pbMapAddress++;
return pbMapAddress;
} // void SkipMember()
// This function returns the name of the given Obj. If the name fits in the header,
// szBuf will be filled in and returned from the function. Else an offset into the long
// names section will be returned.
char *GetNameOfObj(PBYTE pbLongNames, PIMAGE_ARCHIVE_MEMBER_HEADER pMemHdr, char szBuf[17])
{
if (pMemHdr->Name[0] == '/')
{
ULONG ulOffset = 0;
// Long Names section must exist if the .obj file name starts with '/'.
_ASSERTE(pbLongNames &&
"Corrupt archive file - .obj file name in the header starts with "
"'/' but no long names section present in the archive file.");
// Calculate the offset into the long names section.
for (int j = 1; j < 16; j++)
{
if (pMemHdr->Name[j] < '0' || pMemHdr->Name[j] > '9')
break;
else
ulOffset = ulOffset * 10 + pMemHdr->Name[j] - '0';
}
return (char *)(pbLongNames + ulOffset);
}
else
{
int j;
for (j = 0; j < 16; j++)
if ((szBuf[j] = pMemHdr->Name[j]) == '/')
break;
szBuf[j] = '\0';
return szBuf;
}
} // char *GetNameOfObj()
// DisplayArchive() function
//
// Opens the .LIB file, and displays the metadata in the specified object files.
void DisplayArchive(_In_z_ WCHAR* szFile, ULONG DumpFilter, _In_opt_z_ WCHAR* szObjName, strPassBackFn pDisplayString)
{
PBYTE pbMapAddress;
PBYTE pbStartAddress;
PBYTE pbLongNameAddress;
PIMAGE_ARCHIVE_MEMBER_HEADER pMemHdr;
DWORD dwFileSize;
PVOID pvMetaData;
char *szName;
WCHAR wzName[1024];
char szBuf[17];
long cbMetaData;
int i;
HRESULT hr;
char szString[1024];
GetMapViewOfFile(szFile, &pbMapAddress, &dwFileSize);
pbStartAddress = pbMapAddress;
// Verify and skip archive signature.
if (dwFileSize < IMAGE_ARCHIVE_START_SIZE ||
strncmp((char *)pbMapAddress, IMAGE_ARCHIVE_START, IMAGE_ARCHIVE_START_SIZE))
{
MDInfo::Error("Bad file format - archive signature mis-match!");
}
pbMapAddress += IMAGE_ARCHIVE_START_SIZE;
// Skip linker member 1, linker member 2.
for (i = 0; i < 2; i++)
pbMapAddress = SkipMember(pbMapAddress);
// Save address of the long name member and skip it if there exists one.
pMemHdr = (PIMAGE_ARCHIVE_MEMBER_HEADER)pbMapAddress;
if (pMemHdr->Name[0] == '/' && pMemHdr->Name[1] == '/')
{
pbLongNameAddress = pbMapAddress + IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR;
pbMapAddress = SkipMember(pbMapAddress);
}
else
pbLongNameAddress = 0;
pDisplayString ("\n");
// Get the MetaData for each object file and display it.
while (DWORD(pbMapAddress - pbStartAddress) < dwFileSize)
{
if((szName = GetNameOfObj(pbLongNameAddress, (PIMAGE_ARCHIVE_MEMBER_HEADER)pbMapAddress, szBuf))!=NULL)
{
if (Wsz_mbstowcs(wzName, szName, 1024) == -1)
MDInfo::Error("Conversion from Multi-Byte to Wide-Char failed.");
// Display metadata only for object files.
// If szObjName is specified, display metadata only for that one object file.
if (!_stricmp(&szName[strlen(szName) - OBJ_EXT_LEN], OBJ_EXT) &&
(!szObjName || !_wcsicmp(szObjName, wzName)))
{
// Try to find the MetaData section in the current object file.
hr = FindObjMetaData(pbMapAddress+IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR, &pvMetaData, &cbMetaData);
if (SUCCEEDED(hr))
{
sprintf_s (szString,1024,"MetaData for object file %s:\n", szName);
pDisplayString(szString);
MDInfo archiveInfo(g_pDisp,
(PBYTE)pvMetaData,
cbMetaData,
pDisplayString,
DumpFilter);
archiveInfo.DisplayMD();
}
else
{
sprintf_s(szString,1024,"MetaData not found for object file %s!\n\n", szName);
pDisplayString(szString);
}
}
}
// Skip past the object file.
pbMapAddress = SkipMember(pbMapAddress);
}
UnmapViewOfFile(pbStartAddress);
} // void DisplayArchive()
// DisplayFile() function
//
// Opens the meta data content of a .EXE, .CLB, .CLASS, .TLB, .DLL or .LIB file, and
// calls RawDisplay()
void DisplayFile(_In_z_ WCHAR* szFile, BOOL isFile, ULONG DumpFilter, _In_opt_z_ WCHAR* szObjName, strPassBackFn pDisplayString)
{
// Open the emit scope
// We need to make sure this file isn't too long. Checking _MAX_PATH is probably safe, but since we have a much
// larger buffer, we might as well use it all.
if (wcslen(szFile) > 1000)
return;
WCHAR szScope[1024];
char szString[1024];
if (isFile)
{
wcscpy_s(szScope, 1024, W("file:"));
wcscat_s(szScope, 1024, szFile);
}
else
wcscpy_s(szScope, 1024, szFile);
// print bar that separates different files
pDisplayString("////////////////////////////////////////////////////////////////\n");
WCHAR rcFname[_MAX_FNAME], rcExt[_MAX_EXT];
_wsplitpath_s(szFile, NULL, 0, NULL, 0, rcFname, _MAX_FNAME, rcExt, _MAX_EXT);
sprintf_s(szString,1024,"\nFile %S%S: \n",rcFname, rcExt);
pDisplayString(szString);
if (DumpFilter & MDInfo::dumpValidate)
{
if (!_wcsicmp(rcExt, OBJ_EXT_W) || !_wcsicmp(rcExt, LIB_EXT_W))
g_ValModuleType = ValidatorModuleTypeObj;
else
g_ValModuleType = ValidatorModuleTypePE;
}
if (!_wcsicmp(rcExt, LIB_EXT_W))
DisplayArchive(szFile, DumpFilter, szObjName, pDisplayString);
else
{
MDInfo metaDataInfo(g_pDisp, szScope, pDisplayString, DumpFilter);
metaDataInfo.DisplayMD();
}
} // void DisplayFile()
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <ctype.h>
#include <crtdbg.h>
#include "mdinfo.h"
#ifndef STRING_BUFFER_LEN
#define STRING_BUFFER_LEN 4096
#endif
#define OBJ_EXT ".obj"
#define OBJ_EXT_W W(".obj")
#define OBJ_EXT_LEN 4
#define LIB_EXT ".lib"
#define LIB_EXT_W W(".lib")
#define LIB_EXT_LEN 4
extern IMetaDataDispenserEx *g_pDisp;
extern DWORD g_ValModuleType;
// This function is copied from peparse.c file. Making this static, so we won't end up with
// duplicate definitions causing confusion.
static const char g_szCORMETA[] = ".cormeta";
static HRESULT FindObjMetaData(PVOID pImage, PVOID *ppMetaData, long *pcbMetaData)
{
IMAGE_FILE_HEADER *pImageHdr; // Header for the .obj file.
IMAGE_SECTION_HEADER *pSectionHdr; // Section header.
WORD i; // Loop control.
// Get a pointer to the header and the first section.
pImageHdr = (IMAGE_FILE_HEADER *) pImage;
pSectionHdr = (IMAGE_SECTION_HEADER *)(pImageHdr + 1);
// Avoid confusion.
*ppMetaData = NULL;
*pcbMetaData = 0;
// Walk each section looking for .cormeta.
for (i=0; i<VAL16(pImageHdr->NumberOfSections); i++, pSectionHdr++)
{
// Simple comparison to section name.
if (strcmp((const char *) pSectionHdr->Name, g_szCORMETA) == 0)
{
*pcbMetaData = VAL32(pSectionHdr->SizeOfRawData);
*ppMetaData = (void *) ((UINT_PTR)pImage + VAL32(pSectionHdr->PointerToRawData));
break;
}
}
// Check for errors.
if (*ppMetaData == NULL || *pcbMetaData == 0)
return (E_FAIL);
return (S_OK);
}
// This function returns the address to the MapView of file and file size.
void GetMapViewOfFile(_In_ WCHAR *szFile, PBYTE *ppbMap, DWORD *pdwFileSize)
{
HANDLE hMapFile;
DWORD dwHighSize;
HANDLE hFile = WszCreateFile(szFile,
GENERIC_READ,
FILE_SHARE_READ,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN,
NULL);
if (hFile == INVALID_HANDLE_VALUE)
MDInfo::Error("CreateFileA failed!");
*pdwFileSize = GetFileSize(hFile, &dwHighSize);
if ((*pdwFileSize == 0xFFFFFFFF) && (GetLastError() != NO_ERROR))
{
CloseHandle(hFile);
MDInfo::Error("GetFileSize failed!");
}
_ASSERTE(dwHighSize == 0);
hMapFile = CreateFileMappingW(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
CloseHandle(hFile);
if (!hMapFile)
MDInfo::Error("CreateFileMappingW failed!");
*ppbMap = (PBYTE) MapViewOfFile(hMapFile, FILE_MAP_READ, 0, 0, 0);
CloseHandle(hMapFile);
if (!*ppbMap)
MDInfo::Error("MapViewOfFile failed!");
} // void GetMapViewOfFile()
// This function skips a member given the pointer to the member header
// and returns a pointer to the next header.
PBYTE SkipMember(PBYTE pbMapAddress)
{
PIMAGE_ARCHIVE_MEMBER_HEADER pMemHdr;
ULONG ulMemSize;
int j;
pMemHdr = (PIMAGE_ARCHIVE_MEMBER_HEADER)pbMapAddress;
// Get size of the member.
ulMemSize = 0;
for (j = 0; j < 10; j++)
{
if (pMemHdr->Size[j] < '0' || pMemHdr->Size[j] > '9')
break;
else
ulMemSize = ulMemSize * 10 + pMemHdr->Size[j] - '0';
}
// Skip past the header.
pbMapAddress += IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR + ulMemSize;
// Find the next even address if the current one is not even.
if ((ULONG_PTR)pbMapAddress % 2)
pbMapAddress++;
return pbMapAddress;
} // void SkipMember()
// This function returns the name of the given Obj. If the name fits in the header,
// szBuf will be filled in and returned from the function. Else an offset into the long
// names section will be returned.
char *GetNameOfObj(PBYTE pbLongNames, PIMAGE_ARCHIVE_MEMBER_HEADER pMemHdr, char szBuf[17])
{
if (pMemHdr->Name[0] == '/')
{
ULONG ulOffset = 0;
// Long Names section must exist if the .obj file name starts with '/'.
_ASSERTE(pbLongNames &&
"Corrupt archive file - .obj file name in the header starts with "
"'/' but no long names section present in the archive file.");
// Calculate the offset into the long names section.
for (int j = 1; j < 16; j++)
{
if (pMemHdr->Name[j] < '0' || pMemHdr->Name[j] > '9')
break;
else
ulOffset = ulOffset * 10 + pMemHdr->Name[j] - '0';
}
return (char *)(pbLongNames + ulOffset);
}
else
{
int j;
for (j = 0; j < 16; j++)
if ((szBuf[j] = pMemHdr->Name[j]) == '/')
break;
szBuf[j] = '\0';
return szBuf;
}
} // char *GetNameOfObj()
// DisplayArchive() function
//
// Opens the .LIB file, and displays the metadata in the specified object files.
void DisplayArchive(_In_z_ WCHAR* szFile, ULONG DumpFilter, _In_opt_z_ WCHAR* szObjName, strPassBackFn pDisplayString)
{
PBYTE pbMapAddress;
PBYTE pbStartAddress;
PBYTE pbLongNameAddress;
PIMAGE_ARCHIVE_MEMBER_HEADER pMemHdr;
DWORD dwFileSize;
PVOID pvMetaData;
char *szName;
WCHAR wzName[1024];
char szBuf[17];
long cbMetaData;
int i;
HRESULT hr;
char szString[1024];
GetMapViewOfFile(szFile, &pbMapAddress, &dwFileSize);
pbStartAddress = pbMapAddress;
// Verify and skip archive signature.
if (dwFileSize < IMAGE_ARCHIVE_START_SIZE ||
strncmp((char *)pbMapAddress, IMAGE_ARCHIVE_START, IMAGE_ARCHIVE_START_SIZE))
{
MDInfo::Error("Bad file format - archive signature mis-match!");
}
pbMapAddress += IMAGE_ARCHIVE_START_SIZE;
// Skip linker member 1, linker member 2.
for (i = 0; i < 2; i++)
pbMapAddress = SkipMember(pbMapAddress);
// Save address of the long name member and skip it if there exists one.
pMemHdr = (PIMAGE_ARCHIVE_MEMBER_HEADER)pbMapAddress;
if (pMemHdr->Name[0] == '/' && pMemHdr->Name[1] == '/')
{
pbLongNameAddress = pbMapAddress + IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR;
pbMapAddress = SkipMember(pbMapAddress);
}
else
pbLongNameAddress = 0;
pDisplayString ("\n");
// Get the MetaData for each object file and display it.
while (DWORD(pbMapAddress - pbStartAddress) < dwFileSize)
{
if((szName = GetNameOfObj(pbLongNameAddress, (PIMAGE_ARCHIVE_MEMBER_HEADER)pbMapAddress, szBuf))!=NULL)
{
if (Wsz_mbstowcs(wzName, szName, 1024) == -1)
MDInfo::Error("Conversion from Multi-Byte to Wide-Char failed.");
// Display metadata only for object files.
// If szObjName is specified, display metadata only for that one object file.
if (!_stricmp(&szName[strlen(szName) - OBJ_EXT_LEN], OBJ_EXT) &&
(!szObjName || !_wcsicmp(szObjName, wzName)))
{
// Try to find the MetaData section in the current object file.
hr = FindObjMetaData(pbMapAddress+IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR, &pvMetaData, &cbMetaData);
if (SUCCEEDED(hr))
{
sprintf_s (szString,1024,"MetaData for object file %s:\n", szName);
pDisplayString(szString);
MDInfo archiveInfo(g_pDisp,
(PBYTE)pvMetaData,
cbMetaData,
pDisplayString,
DumpFilter);
archiveInfo.DisplayMD();
}
else
{
sprintf_s(szString,1024,"MetaData not found for object file %s!\n\n", szName);
pDisplayString(szString);
}
}
}
// Skip past the object file.
pbMapAddress = SkipMember(pbMapAddress);
}
UnmapViewOfFile(pbStartAddress);
} // void DisplayArchive()
// DisplayFile() function
//
// Opens the meta data content of a .EXE, .CLB, .CLASS, .TLB, .DLL or .LIB file, and
// calls RawDisplay()
void DisplayFile(_In_z_ WCHAR* szFile, BOOL isFile, ULONG DumpFilter, _In_opt_z_ WCHAR* szObjName, strPassBackFn pDisplayString)
{
// Open the emit scope
// We need to make sure this file isn't too long. Checking _MAX_PATH is probably safe, but since we have a much
// larger buffer, we might as well use it all.
if (wcslen(szFile) > 1000)
return;
WCHAR szScope[1024];
char szString[1024];
if (isFile)
{
wcscpy_s(szScope, 1024, W("file:"));
wcscat_s(szScope, 1024, szFile);
}
else
wcscpy_s(szScope, 1024, szFile);
// print bar that separates different files
pDisplayString("////////////////////////////////////////////////////////////////\n");
WCHAR rcFname[_MAX_FNAME], rcExt[_MAX_EXT];
_wsplitpath_s(szFile, NULL, 0, NULL, 0, rcFname, _MAX_FNAME, rcExt, _MAX_EXT);
sprintf_s(szString,1024,"\nFile %S%S: \n",rcFname, rcExt);
pDisplayString(szString);
if (DumpFilter & MDInfo::dumpValidate)
{
if (!_wcsicmp(rcExt, OBJ_EXT_W) || !_wcsicmp(rcExt, LIB_EXT_W))
g_ValModuleType = ValidatorModuleTypeObj;
else
g_ValModuleType = ValidatorModuleTypePE;
}
if (!_wcsicmp(rcExt, LIB_EXT_W))
DisplayArchive(szFile, DumpFilter, szObjName, pDisplayString);
else
{
MDInfo metaDataInfo(g_pDisp, szScope, pDisplayString, DumpFilter);
metaDataInfo.DisplayMD();
}
} // void DisplayFile()
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/pal_specific/PAL_errno/test1/PAL_errno.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: pal_errno.c
**
** Purpose: Positive test the PAL_errno API.
** call PAL_errno to retrieve the pointer to
** the per-thread errno value.
**
**
**============================================================*/
#include <palsuite.h>
PALTEST(pal_specific_PAL_errno_test1_paltest_pal_errno_test1, "pal_specific/PAL_errno/test1/paltest_pal_errno_test1")
{
int err;
FILE *pFile = NULL;
/*Initialize the PAL environment*/
err = PAL_Initialize(argc, argv);
if( 0 != err)
{
return FAIL;
}
/*Try to open a not-exist file to read to generate an error*/
pFile = fopen( "no_exist_file_name", "r" );
if( NULL != pFile )
{
Trace("\nFailed to call fopen to open a not exist for reading, "
"an error is expected, but no error occurred\n");
if( EOF == fclose( pFile ) )
{
Trace("\nFailed to call fclose to close a file stream\n");
}
Fail( "Test failed! fopen() Should not have worked!" );
}
/*retrieve the per-thread error value pointer*/
if( 2 != errno )
{
Fail("\nFailed to call PAL_errno API, this value is not correct."
" The correct value is ENOENT[2] ( No such file or directory.).\n");
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: pal_errno.c
**
** Purpose: Positive test the PAL_errno API.
** call PAL_errno to retrieve the pointer to
** the per-thread errno value.
**
**
**============================================================*/
#include <palsuite.h>
PALTEST(pal_specific_PAL_errno_test1_paltest_pal_errno_test1, "pal_specific/PAL_errno/test1/paltest_pal_errno_test1")
{
int err;
FILE *pFile = NULL;
/*Initialize the PAL environment*/
err = PAL_Initialize(argc, argv);
if( 0 != err)
{
return FAIL;
}
/*Try to open a not-exist file to read to generate an error*/
pFile = fopen( "no_exist_file_name", "r" );
if( NULL != pFile )
{
Trace("\nFailed to call fopen to open a not exist for reading, "
"an error is expected, but no error occurred\n");
if( EOF == fclose( pFile ) )
{
Trace("\nFailed to call fclose to close a file stream\n");
}
Fail( "Test failed! fopen() Should not have worked!" );
}
/*retrieve the per-thread error value pointer*/
if( 2 != errno )
{
Fail("\nFailed to call PAL_errno API, this value is not correct."
" The correct value is ENOENT[2] ( No such file or directory.).\n");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/inc/bundle.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*****************************************************************************
** **
** bundle.h - Information about applications bundled as a single-file **
** **
*****************************************************************************/
#ifndef _BUNDLE_H_
#define _BUNDLE_H_
#include <sstring.h>
#include "coreclrhost.h"
class Bundle;
struct BundleFileLocation
{
INT64 Size;
INT64 Offset;
INT64 UncompresedSize;
BundleFileLocation()
{
LIMITED_METHOD_CONTRACT;
Size = 0;
Offset = 0;
UncompresedSize = 0;
}
static BundleFileLocation Invalid() { LIMITED_METHOD_CONTRACT; return BundleFileLocation(); }
const SString &Path() const;
bool IsValid() const { LIMITED_METHOD_CONTRACT; return Offset != 0; }
};
class Bundle
{
public:
Bundle(LPCSTR bundlePath, BundleProbeFn *probe);
BundleFileLocation Probe(const SString& path, bool pathIsBundleRelative = false) const;
const SString &Path() const { LIMITED_METHOD_CONTRACT; return m_path; }
const SString &BasePath() const { LIMITED_METHOD_CONTRACT; return m_basePath; }
static Bundle* AppBundle; // The BundleInfo for the current app, initialized by coreclr_initialize.
static bool AppIsBundle() { LIMITED_METHOD_CONTRACT; return AppBundle != nullptr; }
static BundleFileLocation ProbeAppBundle(const SString& path, bool pathIsBundleRelative = false);
private:
SString m_path; // The path to single-file executable
BundleProbeFn *m_probe;
SString m_basePath; // The prefix to denote a path within the bundle
COUNT_T m_basePathLength;
};
#endif // _BUNDLE_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*****************************************************************************
** **
** bundle.h - Information about applications bundled as a single-file **
** **
*****************************************************************************/
#ifndef _BUNDLE_H_
#define _BUNDLE_H_
#include <sstring.h>
#include "coreclrhost.h"
class Bundle;
struct BundleFileLocation
{
INT64 Size;
INT64 Offset;
INT64 UncompresedSize;
BundleFileLocation()
{
LIMITED_METHOD_CONTRACT;
Size = 0;
Offset = 0;
UncompresedSize = 0;
}
static BundleFileLocation Invalid() { LIMITED_METHOD_CONTRACT; return BundleFileLocation(); }
const SString &Path() const;
bool IsValid() const { LIMITED_METHOD_CONTRACT; return Offset != 0; }
};
class Bundle
{
public:
Bundle(LPCSTR bundlePath, BundleProbeFn *probe);
BundleFileLocation Probe(const SString& path, bool pathIsBundleRelative = false) const;
const SString &Path() const { LIMITED_METHOD_CONTRACT; return m_path; }
const SString &BasePath() const { LIMITED_METHOD_CONTRACT; return m_basePath; }
static Bundle* AppBundle; // The BundleInfo for the current app, initialized by coreclr_initialize.
static bool AppIsBundle() { LIMITED_METHOD_CONTRACT; return AppBundle != nullptr; }
static BundleFileLocation ProbeAppBundle(const SString& path, bool pathIsBundleRelative = false);
private:
SString m_path; // The path to single-file executable
BundleProbeFn *m_probe;
SString m_basePath; // The prefix to denote a path within the bundle
COUNT_T m_basePathLength;
};
#endif // _BUNDLE_H_
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/peimage.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// --------------------------------------------------------------------------------
// PEImage.cpp
//
// --------------------------------------------------------------------------------
#include "common.h"
#include "peimage.h"
#include "eeconfig.h"
#include <objbase.h>
#include "eventtrace.h"
#include "peimagelayout.inl"
#ifndef DACCESS_COMPILE
CrstStatic PEImage::s_hashLock;
PtrHashMap *PEImage::s_Images = NULL;
CrstStatic PEImage::s_ijwHashLock;
PtrHashMap *PEImage::s_ijwFixupDataHash;
/* static */
void PEImage::Startup()
{
CONTRACT_VOID
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
POSTCONDITION(CheckStartup());
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
if (CheckStartup())
RETURN;
s_hashLock.Init(CrstPEImage, (CrstFlags)(CRST_REENTRANCY|CRST_TAKEN_DURING_SHUTDOWN));
LockOwner lock = { &s_hashLock, IsOwnerOfCrst };
s_Images = ::new PtrHashMap;
s_Images->Init(CompareImage, FALSE, &lock);
s_ijwHashLock.Init(CrstIJWHash, CRST_REENTRANCY);
LockOwner ijwLock = { &s_ijwHashLock, IsOwnerOfCrst };
s_ijwFixupDataHash = ::new PtrHashMap;
s_ijwFixupDataHash->Init(CompareIJWDataBase, FALSE, &ijwLock);
RETURN;
}
/* static */
CHECK PEImage::CheckStartup()
{
WRAPPER_NO_CONTRACT;
CHECK(s_Images != NULL);
CHECK_OK;
}
CHECK PEImage::CheckILFormat()
{
WRAPPER_NO_CONTRACT;
CHECK(GetOrCreateLayout(PEImageLayout::LAYOUT_ANY)->CheckILFormat());
CHECK_OK;
};
// PEImage is always unique on CoreCLR so a simple pointer check is sufficient in PEImage::Equals
CHECK PEImage::CheckUniqueInstance()
{
CHECK(GetPath().IsEmpty() || m_bInHashMap);
CHECK_OK;
}
PEImage::~PEImage()
{
CONTRACTL
{
PRECONDITION(CheckStartup());
PRECONDITION(m_refCount == 0);
DESTRUCTOR_CHECK;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
GCX_PREEMP();
if (m_pLayoutLock)
delete m_pLayoutLock;
if(m_hFile!=INVALID_HANDLE_VALUE)
CloseHandle(m_hFile);
for (unsigned int i=0;i<ARRAY_SIZE(m_pLayouts);i++)
{
if (m_pLayouts[i]!=NULL)
m_pLayouts[i]->Release();
}
if (m_pMDImport)
m_pMDImport->Release();
if(m_pNativeMDImport)
m_pNativeMDImport->Release();
#ifdef METADATATRACKER_ENABLED
if (m_pMDTracker != NULL)
m_pMDTracker->Deactivate();
#endif // METADATATRACKER_ENABLED
}
/* static */
BOOL PEImage::CompareIJWDataBase(UPTR base, UPTR mapping)
{
CONTRACTL{
PRECONDITION(CheckStartup());
PRECONDITION(CheckPointer((BYTE *)(base << 1)));
PRECONDITION(CheckPointer((IJWFixupData *)mapping));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
} CONTRACTL_END;
return ((BYTE *)(base << 1) == ((IJWFixupData*)mapping)->GetBase());
}
ULONG PEImage::Release()
{
CONTRACTL
{
DESTRUCTOR_CHECK;
NOTHROW;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
CONTRACT_VIOLATION(FaultViolation|ThrowsViolation);
COUNT_T result = 0;
{
// Use scoping to hold the hash lock
CrstHolder holder(&s_hashLock);
// Decrement and check the refcount - if we hit 0, remove it from the hash and delete it.
result=FastInterlockDecrement(&m_refCount);
if (result == 0 )
{
LOG((LF_LOADER, LL_INFO100, "PEImage: Closing Image %S\n", (LPCWSTR) m_path));
if(m_bInHashMap)
{
PEImageLocator locator(this);
PEImage* deleted = (PEImage *)s_Images->DeleteValue(GetPathHash(), &locator);
_ASSERTE(deleted == this);
}
}
}
// This needs to be done outside of the hash lock, since this can call FreeLibrary,
// which can cause _CorDllMain to be executed, which can cause the hash lock to be
// taken again because we need to release the IJW fixup data in another PEImage hash.
if (result == 0)
delete this;
return result;
}
/* static */
CHECK PEImage::CheckCanonicalFullPath(const SString &path)
{
CONTRACT_CHECK
{
PRECONDITION(CheckValue(path));
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACT_CHECK_END;
CCHECK_START
{
// This is not intended to be an exhaustive test, just to provide a sanity check
SString::CIterator i = path.Begin();
SString sNetworkPathPrefix(SString::Literal, W("\\\\"));
if (path.Skip(i, sNetworkPathPrefix))
{
// Network path
}
else if (iswalpha(*i))
{
// Drive path
i++;
SString sDrivePath(SString::Literal, ":\\");
CCHECK(path.Skip(i, sDrivePath));
}
else
{
CCHECK_FAIL("Not a full path");
}
while (i != path.End())
{
// Check for multiple slashes
if(*i != '\\')
{
// Check for . or ..
SString sParentDir(SString::Ascii, "..");
SString sCurrentDir(SString::Ascii, ".");
if ((path.Skip(i, sParentDir) || path.Skip(i, sCurrentDir))
&& (path.Match(i, '\\')))
{
CCHECK_FAIL("Illegal . or ..");
}
if (!path.Find(i, '\\'))
break;
}
i++;
}
}
CCHECK_END;
CHECK_OK;
}
/* static */
BOOL PEImage::CompareImage(UPTR u1, UPTR u2)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
// This is the input to the lookup
PEImageLocator *pLocator = (PEImageLocator *) (u1<<1);
// This is the value stored in the table
PEImage *pImage = (PEImage *) u2;
if (pLocator->m_bIsInBundle != pImage->IsInBundle())
{
return FALSE;
}
BOOL ret = FALSE;
HRESULT hr;
EX_TRY
{
SString path(SString::Literal, pLocator->m_pPath);
#ifdef FEATURE_CASE_SENSITIVE_FILESYSTEM
if (pImage->GetPath().Equals(path))
#else
if (pImage->GetPath().EqualsCaseInsensitive(path))
#endif
{
ret = TRUE;
}
}
EX_CATCH_HRESULT(hr); //<TODO>ignores failure!</TODO>
return ret;
}
BOOL PEImage::Equals(PEImage *pImage)
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pImage));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
// PEImage is always unique on CoreCLR so a simple pointer check is sufficient
_ASSERTE(CheckUniqueInstance());
_ASSERTE(pImage->CheckUniqueInstance());
return dac_cast<TADDR>(pImage) == dac_cast<TADDR>(this);
}
IMDInternalImport* PEImage::GetMDImport()
{
WRAPPER_NO_CONTRACT;
if (!m_pMDImport)
OpenMDImport();
return m_pMDImport;
}
IMDInternalImport* PEImage::GetNativeMDImport(BOOL loadAllowed)
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(HasReadyToRunHeader());
if (loadAllowed) GC_TRIGGERS; else GC_NOTRIGGER;
if (loadAllowed) THROWS; else NOTHROW;
if (loadAllowed) INJECT_FAULT(COMPlusThrowOM()); else FORBID_FAULT;
MODE_ANY;
}
CONTRACTL_END;
if (m_pNativeMDImport == NULL)
{
if (loadAllowed)
OpenNativeMDImport();
else
return NULL;
}
_ASSERTE(m_pNativeMDImport);
return m_pNativeMDImport;
}
void PEImage::OpenNativeMDImport()
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(HasReadyToRunHeader());
GC_TRIGGERS;
THROWS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
if (m_pNativeMDImport==NULL)
{
IMDInternalImport* m_pNewImport;
COUNT_T cMeta=0;
const void* pMeta=GetNativeManifestMetadata(&cMeta);
if(pMeta==NULL)
return;
IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
cMeta,
ofRead,
IID_IMDInternalImport,
(void **) &m_pNewImport));
if(FastInterlockCompareExchangePointer(&m_pNativeMDImport, m_pNewImport, NULL))
m_pNewImport->Release();
}
_ASSERTE(m_pNativeMDImport);
}
void PEImage::OpenMDImport()
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(HasCorHeader());
PRECONDITION(HasContents());
GC_TRIGGERS;
THROWS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
if (m_pMDImport==NULL)
{
IMDInternalImport* m_pNewImport;
const void* pMeta=NULL;
COUNT_T cMeta=0;
if(HasNTHeaders() && HasCorHeader())
pMeta=GetMetadata(&cMeta);
if(pMeta==NULL)
return;
#if METADATATRACKER_ENABLED
m_pMDTracker = MetaDataTracker::GetOrCreateMetaDataTracker((BYTE *)pMeta,
cMeta,
GetPath().GetUnicode());
#endif // METADATATRACKER_ENABLED
IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
cMeta,
ofRead,
IID_IMDInternalImport,
(void **) &m_pNewImport));
if(FastInterlockCompareExchangePointer(&m_pMDImport, m_pNewImport, NULL))
{
m_pNewImport->Release();
}
else
{
// grab the module name. This information is only used for dac. But we need to get
// it when module is instantiated in the managed process. The module name is stored
// in Metadata's module table in UTF8. Convert it to unicode.
//
if (m_path.IsEmpty())
{
// No need to check error here since this info is only used by DAC when inspecting
// dump file.
//
LPCSTR strModuleName;
IfFailThrow(m_pMDImport->GetScopeProps(&strModuleName, NULL));
m_sModuleFileNameHintUsedByDac.SetUTF8(strModuleName);
m_sModuleFileNameHintUsedByDac.Normalize();
}
}
}
_ASSERTE(m_pMDImport);
}
void PEImage::GetMVID(GUID *pMvid)
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pMvid));
PRECONDITION(HasCorHeader());
PRECONDITION(HasContents());
GC_TRIGGERS;
THROWS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
IfFailThrow(GetMDImport()->GetScopeProps(NULL, pMvid));
#ifdef _DEBUG
COUNT_T cMeta;
const void *pMeta = GetMetadata(&cMeta);
GUID MvidDEBUG;
if (pMeta == NULL)
ThrowHR(COR_E_BADIMAGEFORMAT);
SafeComHolder<IMDInternalImport> pMDImport;
IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
cMeta,
ofRead,
IID_IMDInternalImport,
(void **) &pMDImport));
pMDImport->GetScopeProps(NULL, &MvidDEBUG);
_ASSERTE(memcmp(pMvid, &MvidDEBUG, sizeof(GUID)) == 0);
#endif // _DEBUG
}
void DECLSPEC_NORETURN PEImage::ThrowFormat(HRESULT hrError)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
}
CONTRACTL_END;
EEFileLoadException::Throw(m_path, hrError);
}
//may outlive PEImage
PEImage::IJWFixupData::IJWFixupData(void *pBase)
: m_lock(CrstIJWFixupData),
m_base(pBase), m_flags(0), m_DllThunkHeap(NULL), m_iNextFixup(0), m_iNextMethod(0)
{
WRAPPER_NO_CONTRACT;
}
PEImage::IJWFixupData::~IJWFixupData()
{
WRAPPER_NO_CONTRACT;
if (m_DllThunkHeap)
delete m_DllThunkHeap;
}
// Self-initializing accessor for m_DllThunkHeap
LoaderHeap *PEImage::IJWFixupData::GetThunkHeap()
{
CONTRACT(LoaderHeap *)
{
INSTANCE_CHECK;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END
if (!m_DllThunkHeap)
{
LoaderHeap *pNewHeap = new LoaderHeap(VIRTUAL_ALLOC_RESERVE_GRANULARITY, // DWORD dwReserveBlockSize
0, // DWORD dwCommitBlockSize
ThunkHeapStubManager::g_pManager->GetRangeList(),
TRUE); // BOOL fMakeExecutable
if (FastInterlockCompareExchangePointer((PVOID*)&m_DllThunkHeap, (VOID*)pNewHeap, (VOID*)0) != 0)
{
delete pNewHeap;
}
}
RETURN m_DllThunkHeap;
}
void PEImage::IJWFixupData::MarkMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod)
{
LIMITED_METHOD_CONTRACT;
// supports only sequential fixup/method
_ASSERTE((iFixup == m_iNextFixup + 1 && iMethod == 0) || //first method of the next fixup or
(iFixup == m_iNextFixup && iMethod == m_iNextMethod)); //the method that was next to fixup
m_iNextFixup = iFixup;
m_iNextMethod = iMethod + 1;
}
BOOL PEImage::IJWFixupData::IsMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod)
{
LIMITED_METHOD_CONTRACT;
if (iFixup < m_iNextFixup)
return TRUE;
if (iFixup > m_iNextFixup)
return FALSE;
if (iMethod < m_iNextMethod)
return TRUE;
return FALSE;
}
/*static */
PTR_LoaderHeap PEImage::GetDllThunkHeap(void *pBase)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
return GetIJWData(pBase)->GetThunkHeap();
}
/* static */
PEImage::IJWFixupData *PEImage::GetIJWData(void *pBase)
{
CONTRACTL{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
} CONTRACTL_END
// Take the IJW hash lock
CrstHolder hashLockHolder(&s_ijwHashLock);
// Try to find the data
IJWFixupData *pData = (IJWFixupData *)s_ijwFixupDataHash->LookupValue((UPTR)pBase, pBase);
// No data, must create
if ((UPTR)pData == (UPTR)INVALIDENTRY)
{
pData = new IJWFixupData(pBase);
s_ijwFixupDataHash->InsertValue((UPTR)pBase, pData);
}
// Return the new data
return (pData);
}
/* static */
void PEImage::UnloadIJWModule(void *pBase)
{
CONTRACTL{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
} CONTRACTL_END
// Take the IJW hash lock
CrstHolder hashLockHolder(&s_ijwHashLock);
// Try to delete the hash entry
IJWFixupData *pData = (IJWFixupData *)s_ijwFixupDataHash->DeleteValue((UPTR)pBase, pBase);
// Now delete the data
if ((UPTR)pData != (UPTR)INVALIDENTRY)
delete pData;
}
#endif // #ifndef DACCESS_COMPILE
#ifdef DACCESS_COMPILE
void PEImage::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
CONTRACTL
{
INSTANCE_CHECK;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
// There are codepaths that will enumerate the PEImage without
// calling EnumMemoryRegions; ensure that we will still get
// these necessary fields enumerated no matter what.
m_path.EnumMemoryRegions(flags);
// We always want this field in mini/triage/heap dumps.
m_sModuleFileNameHintUsedByDac.EnumMemoryRegions(CLRDATA_ENUM_MEM_DEFAULT);
EX_TRY
{
if (HasLoadedLayout() && HasNTHeaders() && HasDirectoryEntry(IMAGE_DIRECTORY_ENTRY_DEBUG))
{
// Get a pointer to the contents and size of the debug directory and report it
COUNT_T cbDebugDir;
TADDR taDebugDir = GetLoadedLayout()->GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_DEBUG, &cbDebugDir);
DacEnumMemoryRegion(taDebugDir, cbDebugDir);
// Report the memory that each debug directory entry points to
UINT cNumEntries = cbDebugDir / sizeof(IMAGE_DEBUG_DIRECTORY);
PTR_IMAGE_DEBUG_DIRECTORY pDebugEntry = dac_cast<PTR_IMAGE_DEBUG_DIRECTORY>(taDebugDir);
for (UINT iIndex = 0; iIndex < cNumEntries; iIndex++)
{
TADDR taEntryAddr = GetLoadedLayout()->GetRvaData(pDebugEntry[iIndex].AddressOfRawData);
DacEnumMemoryRegion(taEntryAddr, pDebugEntry[iIndex].SizeOfData);
// Triage dumps must not dump full paths as they may contain PII data.
// Thus, we replace debug directory's pdbs full path for with filaname only.
if (flags == CLRDATA_ENUM_MEM_TRIAGE &&
pDebugEntry[iIndex].Type == IMAGE_DEBUG_TYPE_CODEVIEW)
{
DWORD CvSignature = *(dac_cast<PTR_DWORD>(taEntryAddr));
if(CvSignature == CV_SIGNATURE_RSDS)
{
CV_INFO_PDB70* pCvInfo = (CV_INFO_PDB70*)DacInstantiateTypeByAddressNoReport(taEntryAddr, sizeof(CV_INFO_PDB70), false);
if (pCvInfo == NULL)
{
continue;
}
// Because data may be corrupted make sure we null terminate the string.
pCvInfo->path[MAX_LONGPATH - 1] = '\0';
//Find the filename from pdb full path
char* fileName = strrchr(pCvInfo->path, '\\');
if (fileName != NULL)
fileName++;
else
fileName = pCvInfo->path;
size_t fileNameLenght = strlen(fileName);
size_t fullPathLenght = strlen(pCvInfo->path);
memmove(pCvInfo->path, fileName, fileNameLenght);
// NULL out the rest of the path buffer.
for (size_t i = fileNameLenght; i < MAX_PATH_FNAME - 1; i++)
{
pCvInfo->path[i] = '\0';
}
DacUpdateMemoryRegion( taEntryAddr + offsetof(CV_INFO_PDB70, path), sizeof(pCvInfo->path), (PBYTE)pCvInfo->path );
}
}
}
}
}
EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
DAC_ENUM_DTHIS();
EMEM_OUT(("MEM: %p PEImage\n", dac_cast<TADDR>(this)));
// This just gets the image headers into the dump.
// This is used, for example, for ngen images to ensure we have the debug directory so we
// can find the managed PDBs.
// No lock here as the processs should be suspended.
if (m_pLayouts[IMAGE_FLAT].IsValid() && m_pLayouts[IMAGE_FLAT]!=NULL)
m_pLayouts[IMAGE_FLAT]->EnumMemoryRegions(flags);
if (m_pLayouts[IMAGE_LOADED].IsValid() && m_pLayouts[IMAGE_LOADED]!=NULL)
m_pLayouts[IMAGE_LOADED]->EnumMemoryRegions(flags);
}
#endif // #ifdef DACCESS_COMPILE
PEImage::PEImage():
m_path(),
m_refCount(1),
m_bInHashMap(FALSE),
m_bundleFileLocation(),
m_hFile(INVALID_HANDLE_VALUE),
m_dwPEKind(0),
m_dwMachine(0),
#ifdef METADATATRACKER_DATA
m_pMDTracker(NULL),
#endif // METADATATRACKER_DATA
m_pMDImport(NULL),
m_pNativeMDImport(NULL)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
for (DWORD i=0;i<ARRAY_SIZE(m_pLayouts);i++)
m_pLayouts[i]=NULL ;
m_pLayoutLock=new SimpleRWLock(PREEMPTIVE,LOCK_TYPE_DEFAULT);
}
// Misnomer under the DAC, but has a lot of callers. The DAC can't create layouts, so in that
// case this is a get.
PTR_PEImageLayout PEImage::GetOrCreateLayout(DWORD imageLayoutMask)
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
// First attempt to find an existing layout matching imageLayoutMask.
// If that fails, try again with auto-creating helper.
// Note: we use reader-writer lock, but only writes are synchronized.
PTR_PEImageLayout pRetVal = GetExistingLayoutInternal(imageLayoutMask);
if (pRetVal == NULL)
{
#ifndef DACCESS_COMPILE
GCX_PREEMP();
SimpleWriteLockHolder lock(m_pLayoutLock);
pRetVal = GetOrCreateLayoutInternal(imageLayoutMask);
#else
// In DAC builds, we can't create any layouts - we must require that they already exist.
// We also don't take any AddRefs or locks in DAC builds - it's inspection-only.
_ASSERTE_MSG(false, "DACization error - caller expects PEImage layout to exist and it doesn't");
DacError(E_UNEXPECTED);
#endif
}
return pRetVal;
}
#ifndef DACCESS_COMPILE
void PEImage::SetLayout(DWORD dwLayout, PEImageLayout* pLayout)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(dwLayout < IMAGE_COUNT);
_ASSERTE(m_pLayoutLock->IsWriterLock());
_ASSERTE(m_pLayouts[dwLayout] == NULL);
m_pLayouts[dwLayout] = pLayout;
}
PTR_PEImageLayout PEImage::GetOrCreateLayoutInternal(DWORD imageLayoutMask)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
PTR_PEImageLayout pRetVal=GetExistingLayoutInternal(imageLayoutMask);
if (pRetVal==NULL)
{
BOOL bIsLoadedLayoutSuitable = ((imageLayoutMask & PEImageLayout::LAYOUT_LOADED) != 0);
BOOL bIsFlatLayoutSuitable = ((imageLayoutMask & PEImageLayout::LAYOUT_FLAT) != 0);
BOOL bIsLoadedLayoutPreferred = !bIsFlatLayoutSuitable;
#ifdef TARGET_WINDOWS
// on Windows we prefer to just load the file using OS loader
if (!IsInBundle() && bIsLoadedLayoutSuitable)
{
bIsLoadedLayoutPreferred = TRUE;
}
#endif // !TARGET_UNIX
_ASSERTE(bIsLoadedLayoutSuitable || bIsFlatLayoutSuitable);
if (bIsLoadedLayoutPreferred)
{
_ASSERTE(bIsLoadedLayoutSuitable);
pRetVal = PEImage::CreateLoadedLayout(!bIsFlatLayoutSuitable);
}
if (pRetVal == NULL)
{
_ASSERTE(bIsFlatLayoutSuitable);
pRetVal = PEImage::CreateFlatLayout();
_ASSERTE(pRetVal != NULL);
}
}
_ASSERTE(pRetVal != NULL);
_ASSERTE(this->IsOpened());
return pRetVal;
}
PTR_PEImageLayout PEImage::CreateLoadedLayout(bool throwOnFailure)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(m_pLayoutLock->IsWriterLock());
}
CONTRACTL_END;
PEImageLayout * pLoadLayout = NULL;
HRESULT loadFailure = S_OK;
pLoadLayout = PEImageLayout::Load(this, &loadFailure);
if (pLoadLayout != NULL)
{
SetLayout(IMAGE_LOADED,pLoadLayout);
// loaded layout is functionally a superset of flat,
// so fill the flat slot, if not filled already.
if (m_pLayouts[IMAGE_FLAT] == NULL)
{
pLoadLayout->AddRef();
SetLayout(IMAGE_FLAT, pLoadLayout);
}
}
if (pLoadLayout == NULL && throwOnFailure)
{
loadFailure = FAILED(loadFailure) ? loadFailure : COR_E_BADIMAGEFORMAT;
EEFileLoadException::Throw(GetPath(), loadFailure);
}
return pLoadLayout;
}
PTR_PEImageLayout PEImage::CreateFlatLayout()
{
CONTRACTL
{
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(m_pLayoutLock->IsWriterLock());
}
CONTRACTL_END;
PTR_PEImageLayout pFlatLayout = PEImageLayout::LoadFlat(this);
SetLayout(IMAGE_FLAT, pFlatLayout);
return pFlatLayout;
}
/* static */
PTR_PEImage PEImage::CreateFromByteArray(const BYTE* array, COUNT_T size)
{
CONTRACT(PTR_PEImage)
{
STANDARD_VM_CHECK;
}
CONTRACT_END;
PEImageHolder pImage(new PEImage());
PTR_PEImageLayout pLayout = PEImageLayout::CreateFromByteArray(pImage, array, size);
_ASSERTE(!pLayout->IsMapped());
SimpleWriteLockHolder lock(pImage->m_pLayoutLock);
pImage->SetLayout(IMAGE_FLAT,pLayout);
RETURN dac_cast<PTR_PEImage>(pImage.Extract());
}
#ifndef TARGET_UNIX
/* static */
PTR_PEImage PEImage::CreateFromHMODULE(HMODULE hMod)
{
CONTRACT(PTR_PEImage)
{
STANDARD_VM_CHECK;
PRECONDITION(hMod!=NULL);
POSTCONDITION(RETVAL->HasLoadedLayout());
}
CONTRACT_END;
StackSString path;
WszGetModuleFileName(hMod, path);
PEImageHolder pImage(PEImage::OpenImage(path, MDInternalImport_Default));
if (!pImage->HasLoadedLayout())
{
PTR_PEImageLayout pLayout = PEImageLayout::CreateFromHMODULE(hMod, pImage);
SimpleWriteLockHolder lock(pImage->m_pLayoutLock);
pImage->SetLayout(IMAGE_LOADED, pLayout);
if (pImage->m_pLayouts[IMAGE_FLAT] == NULL)
{
pLayout->AddRef();
pImage->SetLayout(IMAGE_FLAT, pLayout);
}
}
_ASSERTE(pImage->m_pLayouts[IMAGE_FLAT] != NULL);
RETURN dac_cast<PTR_PEImage>(pImage.Extract());
}
#endif // !TARGET_UNIX
#endif //DACCESS_COMPILE
HANDLE PEImage::GetFileHandle()
{
CONTRACTL
{
STANDARD_VM_CHECK;
PRECONDITION(m_pLayoutLock->IsWriterLock());
}
CONTRACTL_END;
if (m_hFile!=INVALID_HANDLE_VALUE)
return m_hFile;
HRESULT hr = TryOpenFile(/*takeLock*/ false);
if (m_hFile == INVALID_HANDLE_VALUE)
{
#if !defined(DACCESS_COMPILE)
EEFileLoadException::Throw(GetPathToLoad(), hr);
#else // defined(DACCESS_COMPILE)
ThrowHR(hr);
#endif // !defined(DACCESS_COMPILE)
}
return m_hFile;
}
HRESULT PEImage::TryOpenFile(bool takeLock)
{
STANDARD_VM_CONTRACT;
SimpleWriteLockHolder lock(m_pLayoutLock, takeLock);
if (m_hFile!=INVALID_HANDLE_VALUE)
return S_OK;
ErrorModeHolder mode(SEM_NOOPENFILEERRORBOX | SEM_FAILCRITICALERRORS);
m_hFile=WszCreateFile((LPCWSTR)GetPathToLoad(),
GENERIC_READ
#if TARGET_WINDOWS
// the file may have native code sections, make sure we are allowed to execute the file
| GENERIC_EXECUTE
#endif
,
FILE_SHARE_READ|FILE_SHARE_DELETE,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (m_hFile != INVALID_HANDLE_VALUE)
return S_OK;
if (GetLastError())
return HRESULT_FROM_WIN32(GetLastError());
return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
}
BOOL PEImage::IsPtrInImage(PTR_CVOID data)
{
CONTRACTL
{
INSTANCE_CHECK;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END;
for (int i = 0; i < IMAGE_COUNT; i++)
{
if (m_pLayouts[i] != NULL)
{
if (m_pLayouts[i]->PointerInPE(data))
return TRUE;
}
}
return FALSE;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// --------------------------------------------------------------------------------
// PEImage.cpp
//
// --------------------------------------------------------------------------------
#include "common.h"
#include "peimage.h"
#include "eeconfig.h"
#include <objbase.h>
#include "eventtrace.h"
#include "peimagelayout.inl"
#ifndef DACCESS_COMPILE
CrstStatic PEImage::s_hashLock;
PtrHashMap *PEImage::s_Images = NULL;
CrstStatic PEImage::s_ijwHashLock;
PtrHashMap *PEImage::s_ijwFixupDataHash;
/* static */
void PEImage::Startup()
{
CONTRACT_VOID
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
POSTCONDITION(CheckStartup());
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
if (CheckStartup())
RETURN;
s_hashLock.Init(CrstPEImage, (CrstFlags)(CRST_REENTRANCY|CRST_TAKEN_DURING_SHUTDOWN));
LockOwner lock = { &s_hashLock, IsOwnerOfCrst };
s_Images = ::new PtrHashMap;
s_Images->Init(CompareImage, FALSE, &lock);
s_ijwHashLock.Init(CrstIJWHash, CRST_REENTRANCY);
LockOwner ijwLock = { &s_ijwHashLock, IsOwnerOfCrst };
s_ijwFixupDataHash = ::new PtrHashMap;
s_ijwFixupDataHash->Init(CompareIJWDataBase, FALSE, &ijwLock);
RETURN;
}
/* static */
CHECK PEImage::CheckStartup()
{
WRAPPER_NO_CONTRACT;
CHECK(s_Images != NULL);
CHECK_OK;
}
CHECK PEImage::CheckILFormat()
{
WRAPPER_NO_CONTRACT;
CHECK(GetOrCreateLayout(PEImageLayout::LAYOUT_ANY)->CheckILFormat());
CHECK_OK;
};
// PEImage is always unique on CoreCLR so a simple pointer check is sufficient in PEImage::Equals
CHECK PEImage::CheckUniqueInstance()
{
CHECK(GetPath().IsEmpty() || m_bInHashMap);
CHECK_OK;
}
PEImage::~PEImage()
{
CONTRACTL
{
PRECONDITION(CheckStartup());
PRECONDITION(m_refCount == 0);
DESTRUCTOR_CHECK;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
GCX_PREEMP();
if (m_pLayoutLock)
delete m_pLayoutLock;
if(m_hFile!=INVALID_HANDLE_VALUE)
CloseHandle(m_hFile);
for (unsigned int i=0;i<ARRAY_SIZE(m_pLayouts);i++)
{
if (m_pLayouts[i]!=NULL)
m_pLayouts[i]->Release();
}
if (m_pMDImport)
m_pMDImport->Release();
if(m_pNativeMDImport)
m_pNativeMDImport->Release();
#ifdef METADATATRACKER_ENABLED
if (m_pMDTracker != NULL)
m_pMDTracker->Deactivate();
#endif // METADATATRACKER_ENABLED
}
/* static */
BOOL PEImage::CompareIJWDataBase(UPTR base, UPTR mapping)
{
CONTRACTL{
PRECONDITION(CheckStartup());
PRECONDITION(CheckPointer((BYTE *)(base << 1)));
PRECONDITION(CheckPointer((IJWFixupData *)mapping));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
} CONTRACTL_END;
return ((BYTE *)(base << 1) == ((IJWFixupData*)mapping)->GetBase());
}
ULONG PEImage::Release()
{
CONTRACTL
{
DESTRUCTOR_CHECK;
NOTHROW;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
CONTRACT_VIOLATION(FaultViolation|ThrowsViolation);
COUNT_T result = 0;
{
// Use scoping to hold the hash lock
CrstHolder holder(&s_hashLock);
// Decrement and check the refcount - if we hit 0, remove it from the hash and delete it.
result=FastInterlockDecrement(&m_refCount);
if (result == 0 )
{
LOG((LF_LOADER, LL_INFO100, "PEImage: Closing Image %S\n", (LPCWSTR) m_path));
if(m_bInHashMap)
{
PEImageLocator locator(this);
PEImage* deleted = (PEImage *)s_Images->DeleteValue(GetPathHash(), &locator);
_ASSERTE(deleted == this);
}
}
}
// This needs to be done outside of the hash lock, since this can call FreeLibrary,
// which can cause _CorDllMain to be executed, which can cause the hash lock to be
// taken again because we need to release the IJW fixup data in another PEImage hash.
if (result == 0)
delete this;
return result;
}
/* static */
CHECK PEImage::CheckCanonicalFullPath(const SString &path)
{
CONTRACT_CHECK
{
PRECONDITION(CheckValue(path));
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACT_CHECK_END;
CCHECK_START
{
// This is not intended to be an exhaustive test, just to provide a sanity check
SString::CIterator i = path.Begin();
SString sNetworkPathPrefix(SString::Literal, W("\\\\"));
if (path.Skip(i, sNetworkPathPrefix))
{
// Network path
}
else if (iswalpha(*i))
{
// Drive path
i++;
SString sDrivePath(SString::Literal, ":\\");
CCHECK(path.Skip(i, sDrivePath));
}
else
{
CCHECK_FAIL("Not a full path");
}
while (i != path.End())
{
// Check for multiple slashes
if(*i != '\\')
{
// Check for . or ..
SString sParentDir(SString::Ascii, "..");
SString sCurrentDir(SString::Ascii, ".");
if ((path.Skip(i, sParentDir) || path.Skip(i, sCurrentDir))
&& (path.Match(i, '\\')))
{
CCHECK_FAIL("Illegal . or ..");
}
if (!path.Find(i, '\\'))
break;
}
i++;
}
}
CCHECK_END;
CHECK_OK;
}
/* static */
BOOL PEImage::CompareImage(UPTR u1, UPTR u2)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
// This is the input to the lookup
PEImageLocator *pLocator = (PEImageLocator *) (u1<<1);
// This is the value stored in the table
PEImage *pImage = (PEImage *) u2;
if (pLocator->m_bIsInBundle != pImage->IsInBundle())
{
return FALSE;
}
BOOL ret = FALSE;
HRESULT hr;
EX_TRY
{
SString path(SString::Literal, pLocator->m_pPath);
#ifdef FEATURE_CASE_SENSITIVE_FILESYSTEM
if (pImage->GetPath().Equals(path))
#else
if (pImage->GetPath().EqualsCaseInsensitive(path))
#endif
{
ret = TRUE;
}
}
EX_CATCH_HRESULT(hr); //<TODO>ignores failure!</TODO>
return ret;
}
BOOL PEImage::Equals(PEImage *pImage)
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pImage));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
// PEImage is always unique on CoreCLR so a simple pointer check is sufficient
_ASSERTE(CheckUniqueInstance());
_ASSERTE(pImage->CheckUniqueInstance());
return dac_cast<TADDR>(pImage) == dac_cast<TADDR>(this);
}
IMDInternalImport* PEImage::GetMDImport()
{
WRAPPER_NO_CONTRACT;
if (!m_pMDImport)
OpenMDImport();
return m_pMDImport;
}
IMDInternalImport* PEImage::GetNativeMDImport(BOOL loadAllowed)
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(HasReadyToRunHeader());
if (loadAllowed) GC_TRIGGERS; else GC_NOTRIGGER;
if (loadAllowed) THROWS; else NOTHROW;
if (loadAllowed) INJECT_FAULT(COMPlusThrowOM()); else FORBID_FAULT;
MODE_ANY;
}
CONTRACTL_END;
if (m_pNativeMDImport == NULL)
{
if (loadAllowed)
OpenNativeMDImport();
else
return NULL;
}
_ASSERTE(m_pNativeMDImport);
return m_pNativeMDImport;
}
void PEImage::OpenNativeMDImport()
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(HasReadyToRunHeader());
GC_TRIGGERS;
THROWS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
if (m_pNativeMDImport==NULL)
{
IMDInternalImport* m_pNewImport;
COUNT_T cMeta=0;
const void* pMeta=GetNativeManifestMetadata(&cMeta);
if(pMeta==NULL)
return;
IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
cMeta,
ofRead,
IID_IMDInternalImport,
(void **) &m_pNewImport));
if(FastInterlockCompareExchangePointer(&m_pNativeMDImport, m_pNewImport, NULL))
m_pNewImport->Release();
}
_ASSERTE(m_pNativeMDImport);
}
void PEImage::OpenMDImport()
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(HasCorHeader());
PRECONDITION(HasContents());
GC_TRIGGERS;
THROWS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
if (m_pMDImport==NULL)
{
IMDInternalImport* m_pNewImport;
const void* pMeta=NULL;
COUNT_T cMeta=0;
if(HasNTHeaders() && HasCorHeader())
pMeta=GetMetadata(&cMeta);
if(pMeta==NULL)
return;
#if METADATATRACKER_ENABLED
m_pMDTracker = MetaDataTracker::GetOrCreateMetaDataTracker((BYTE *)pMeta,
cMeta,
GetPath().GetUnicode());
#endif // METADATATRACKER_ENABLED
IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
cMeta,
ofRead,
IID_IMDInternalImport,
(void **) &m_pNewImport));
if(FastInterlockCompareExchangePointer(&m_pMDImport, m_pNewImport, NULL))
{
m_pNewImport->Release();
}
else
{
// grab the module name. This information is only used for dac. But we need to get
// it when module is instantiated in the managed process. The module name is stored
// in Metadata's module table in UTF8. Convert it to unicode.
//
if (m_path.IsEmpty())
{
// No need to check error here since this info is only used by DAC when inspecting
// dump file.
//
LPCSTR strModuleName;
IfFailThrow(m_pMDImport->GetScopeProps(&strModuleName, NULL));
m_sModuleFileNameHintUsedByDac.SetUTF8(strModuleName);
m_sModuleFileNameHintUsedByDac.Normalize();
}
}
}
_ASSERTE(m_pMDImport);
}
void PEImage::GetMVID(GUID *pMvid)
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pMvid));
PRECONDITION(HasCorHeader());
PRECONDITION(HasContents());
GC_TRIGGERS;
THROWS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
IfFailThrow(GetMDImport()->GetScopeProps(NULL, pMvid));
#ifdef _DEBUG
COUNT_T cMeta;
const void *pMeta = GetMetadata(&cMeta);
GUID MvidDEBUG;
if (pMeta == NULL)
ThrowHR(COR_E_BADIMAGEFORMAT);
SafeComHolder<IMDInternalImport> pMDImport;
IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
cMeta,
ofRead,
IID_IMDInternalImport,
(void **) &pMDImport));
pMDImport->GetScopeProps(NULL, &MvidDEBUG);
_ASSERTE(memcmp(pMvid, &MvidDEBUG, sizeof(GUID)) == 0);
#endif // _DEBUG
}
void DECLSPEC_NORETURN PEImage::ThrowFormat(HRESULT hrError)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
}
CONTRACTL_END;
EEFileLoadException::Throw(m_path, hrError);
}
//may outlive PEImage
PEImage::IJWFixupData::IJWFixupData(void *pBase)
: m_lock(CrstIJWFixupData),
m_base(pBase), m_flags(0), m_DllThunkHeap(NULL), m_iNextFixup(0), m_iNextMethod(0)
{
WRAPPER_NO_CONTRACT;
}
PEImage::IJWFixupData::~IJWFixupData()
{
WRAPPER_NO_CONTRACT;
if (m_DllThunkHeap)
delete m_DllThunkHeap;
}
// Self-initializing accessor for m_DllThunkHeap
LoaderHeap *PEImage::IJWFixupData::GetThunkHeap()
{
CONTRACT(LoaderHeap *)
{
INSTANCE_CHECK;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END
if (!m_DllThunkHeap)
{
LoaderHeap *pNewHeap = new LoaderHeap(VIRTUAL_ALLOC_RESERVE_GRANULARITY, // DWORD dwReserveBlockSize
0, // DWORD dwCommitBlockSize
ThunkHeapStubManager::g_pManager->GetRangeList(),
TRUE); // BOOL fMakeExecutable
if (FastInterlockCompareExchangePointer((PVOID*)&m_DllThunkHeap, (VOID*)pNewHeap, (VOID*)0) != 0)
{
delete pNewHeap;
}
}
RETURN m_DllThunkHeap;
}
void PEImage::IJWFixupData::MarkMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod)
{
LIMITED_METHOD_CONTRACT;
// supports only sequential fixup/method
_ASSERTE((iFixup == m_iNextFixup + 1 && iMethod == 0) || //first method of the next fixup or
(iFixup == m_iNextFixup && iMethod == m_iNextMethod)); //the method that was next to fixup
m_iNextFixup = iFixup;
m_iNextMethod = iMethod + 1;
}
BOOL PEImage::IJWFixupData::IsMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod)
{
LIMITED_METHOD_CONTRACT;
if (iFixup < m_iNextFixup)
return TRUE;
if (iFixup > m_iNextFixup)
return FALSE;
if (iMethod < m_iNextMethod)
return TRUE;
return FALSE;
}
/*static */
PTR_LoaderHeap PEImage::GetDllThunkHeap(void *pBase)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
return GetIJWData(pBase)->GetThunkHeap();
}
/* static */
PEImage::IJWFixupData *PEImage::GetIJWData(void *pBase)
{
CONTRACTL{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
} CONTRACTL_END
// Take the IJW hash lock
CrstHolder hashLockHolder(&s_ijwHashLock);
// Try to find the data
IJWFixupData *pData = (IJWFixupData *)s_ijwFixupDataHash->LookupValue((UPTR)pBase, pBase);
// No data, must create
if ((UPTR)pData == (UPTR)INVALIDENTRY)
{
pData = new IJWFixupData(pBase);
s_ijwFixupDataHash->InsertValue((UPTR)pBase, pData);
}
// Return the new data
return (pData);
}
/* static */
void PEImage::UnloadIJWModule(void *pBase)
{
CONTRACTL{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
} CONTRACTL_END
// Take the IJW hash lock
CrstHolder hashLockHolder(&s_ijwHashLock);
// Try to delete the hash entry
IJWFixupData *pData = (IJWFixupData *)s_ijwFixupDataHash->DeleteValue((UPTR)pBase, pBase);
// Now delete the data
if ((UPTR)pData != (UPTR)INVALIDENTRY)
delete pData;
}
#endif // #ifndef DACCESS_COMPILE
#ifdef DACCESS_COMPILE
void PEImage::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
CONTRACTL
{
INSTANCE_CHECK;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
// There are codepaths that will enumerate the PEImage without
// calling EnumMemoryRegions; ensure that we will still get
// these necessary fields enumerated no matter what.
m_path.EnumMemoryRegions(flags);
// We always want this field in mini/triage/heap dumps.
m_sModuleFileNameHintUsedByDac.EnumMemoryRegions(CLRDATA_ENUM_MEM_DEFAULT);
EX_TRY
{
if (HasLoadedLayout() && HasNTHeaders() && HasDirectoryEntry(IMAGE_DIRECTORY_ENTRY_DEBUG))
{
// Get a pointer to the contents and size of the debug directory and report it
COUNT_T cbDebugDir;
TADDR taDebugDir = GetLoadedLayout()->GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_DEBUG, &cbDebugDir);
DacEnumMemoryRegion(taDebugDir, cbDebugDir);
// Report the memory that each debug directory entry points to
UINT cNumEntries = cbDebugDir / sizeof(IMAGE_DEBUG_DIRECTORY);
PTR_IMAGE_DEBUG_DIRECTORY pDebugEntry = dac_cast<PTR_IMAGE_DEBUG_DIRECTORY>(taDebugDir);
for (UINT iIndex = 0; iIndex < cNumEntries; iIndex++)
{
TADDR taEntryAddr = GetLoadedLayout()->GetRvaData(pDebugEntry[iIndex].AddressOfRawData);
DacEnumMemoryRegion(taEntryAddr, pDebugEntry[iIndex].SizeOfData);
// Triage dumps must not dump full paths as they may contain PII data.
// Thus, we replace debug directory's pdbs full path for with filaname only.
if (flags == CLRDATA_ENUM_MEM_TRIAGE &&
pDebugEntry[iIndex].Type == IMAGE_DEBUG_TYPE_CODEVIEW)
{
DWORD CvSignature = *(dac_cast<PTR_DWORD>(taEntryAddr));
if(CvSignature == CV_SIGNATURE_RSDS)
{
CV_INFO_PDB70* pCvInfo = (CV_INFO_PDB70*)DacInstantiateTypeByAddressNoReport(taEntryAddr, sizeof(CV_INFO_PDB70), false);
if (pCvInfo == NULL)
{
continue;
}
// Because data may be corrupted make sure we null terminate the string.
pCvInfo->path[MAX_LONGPATH - 1] = '\0';
//Find the filename from pdb full path
char* fileName = strrchr(pCvInfo->path, '\\');
if (fileName != NULL)
fileName++;
else
fileName = pCvInfo->path;
size_t fileNameLenght = strlen(fileName);
size_t fullPathLenght = strlen(pCvInfo->path);
memmove(pCvInfo->path, fileName, fileNameLenght);
// NULL out the rest of the path buffer.
for (size_t i = fileNameLenght; i < MAX_PATH_FNAME - 1; i++)
{
pCvInfo->path[i] = '\0';
}
DacUpdateMemoryRegion( taEntryAddr + offsetof(CV_INFO_PDB70, path), sizeof(pCvInfo->path), (PBYTE)pCvInfo->path );
}
}
}
}
}
EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
DAC_ENUM_DTHIS();
EMEM_OUT(("MEM: %p PEImage\n", dac_cast<TADDR>(this)));
// This just gets the image headers into the dump.
// This is used, for example, for ngen images to ensure we have the debug directory so we
// can find the managed PDBs.
// No lock here as the processs should be suspended.
if (m_pLayouts[IMAGE_FLAT].IsValid() && m_pLayouts[IMAGE_FLAT]!=NULL)
m_pLayouts[IMAGE_FLAT]->EnumMemoryRegions(flags);
if (m_pLayouts[IMAGE_LOADED].IsValid() && m_pLayouts[IMAGE_LOADED]!=NULL)
m_pLayouts[IMAGE_LOADED]->EnumMemoryRegions(flags);
}
#endif // #ifdef DACCESS_COMPILE
PEImage::PEImage():
m_path(),
m_refCount(1),
m_bInHashMap(FALSE),
m_bundleFileLocation(),
m_hFile(INVALID_HANDLE_VALUE),
m_dwPEKind(0),
m_dwMachine(0),
#ifdef METADATATRACKER_DATA
m_pMDTracker(NULL),
#endif // METADATATRACKER_DATA
m_pMDImport(NULL),
m_pNativeMDImport(NULL)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
for (DWORD i=0;i<ARRAY_SIZE(m_pLayouts);i++)
m_pLayouts[i]=NULL ;
m_pLayoutLock=new SimpleRWLock(PREEMPTIVE,LOCK_TYPE_DEFAULT);
}
// Misnomer under the DAC, but has a lot of callers. The DAC can't create layouts, so in that
// case this is a get.
PTR_PEImageLayout PEImage::GetOrCreateLayout(DWORD imageLayoutMask)
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
// First attempt to find an existing layout matching imageLayoutMask.
// If that fails, try again with auto-creating helper.
// Note: we use reader-writer lock, but only writes are synchronized.
PTR_PEImageLayout pRetVal = GetExistingLayoutInternal(imageLayoutMask);
if (pRetVal == NULL)
{
#ifndef DACCESS_COMPILE
GCX_PREEMP();
SimpleWriteLockHolder lock(m_pLayoutLock);
pRetVal = GetOrCreateLayoutInternal(imageLayoutMask);
#else
// In DAC builds, we can't create any layouts - we must require that they already exist.
// We also don't take any AddRefs or locks in DAC builds - it's inspection-only.
_ASSERTE_MSG(false, "DACization error - caller expects PEImage layout to exist and it doesn't");
DacError(E_UNEXPECTED);
#endif
}
return pRetVal;
}
#ifndef DACCESS_COMPILE
void PEImage::SetLayout(DWORD dwLayout, PEImageLayout* pLayout)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(dwLayout < IMAGE_COUNT);
_ASSERTE(m_pLayoutLock->IsWriterLock());
_ASSERTE(m_pLayouts[dwLayout] == NULL);
m_pLayouts[dwLayout] = pLayout;
}
PTR_PEImageLayout PEImage::GetOrCreateLayoutInternal(DWORD imageLayoutMask)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
PTR_PEImageLayout pRetVal=GetExistingLayoutInternal(imageLayoutMask);
if (pRetVal==NULL)
{
BOOL bIsLoadedLayoutSuitable = ((imageLayoutMask & PEImageLayout::LAYOUT_LOADED) != 0);
BOOL bIsFlatLayoutSuitable = ((imageLayoutMask & PEImageLayout::LAYOUT_FLAT) != 0);
BOOL bIsLoadedLayoutPreferred = !bIsFlatLayoutSuitable;
#ifdef TARGET_WINDOWS
// on Windows we prefer to just load the file using OS loader
if (!IsInBundle() && bIsLoadedLayoutSuitable)
{
bIsLoadedLayoutPreferred = TRUE;
}
#endif // !TARGET_UNIX
_ASSERTE(bIsLoadedLayoutSuitable || bIsFlatLayoutSuitable);
if (bIsLoadedLayoutPreferred)
{
_ASSERTE(bIsLoadedLayoutSuitable);
pRetVal = PEImage::CreateLoadedLayout(!bIsFlatLayoutSuitable);
}
if (pRetVal == NULL)
{
_ASSERTE(bIsFlatLayoutSuitable);
pRetVal = PEImage::CreateFlatLayout();
_ASSERTE(pRetVal != NULL);
}
}
_ASSERTE(pRetVal != NULL);
_ASSERTE(this->IsOpened());
return pRetVal;
}
PTR_PEImageLayout PEImage::CreateLoadedLayout(bool throwOnFailure)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(m_pLayoutLock->IsWriterLock());
}
CONTRACTL_END;
PEImageLayout * pLoadLayout = NULL;
HRESULT loadFailure = S_OK;
pLoadLayout = PEImageLayout::Load(this, &loadFailure);
if (pLoadLayout != NULL)
{
SetLayout(IMAGE_LOADED,pLoadLayout);
// loaded layout is functionally a superset of flat,
// so fill the flat slot, if not filled already.
if (m_pLayouts[IMAGE_FLAT] == NULL)
{
pLoadLayout->AddRef();
SetLayout(IMAGE_FLAT, pLoadLayout);
}
}
if (pLoadLayout == NULL && throwOnFailure)
{
loadFailure = FAILED(loadFailure) ? loadFailure : COR_E_BADIMAGEFORMAT;
EEFileLoadException::Throw(GetPath(), loadFailure);
}
return pLoadLayout;
}
PTR_PEImageLayout PEImage::CreateFlatLayout()
{
CONTRACTL
{
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(m_pLayoutLock->IsWriterLock());
}
CONTRACTL_END;
PTR_PEImageLayout pFlatLayout = PEImageLayout::LoadFlat(this);
SetLayout(IMAGE_FLAT, pFlatLayout);
return pFlatLayout;
}
/* static */
PTR_PEImage PEImage::CreateFromByteArray(const BYTE* array, COUNT_T size)
{
CONTRACT(PTR_PEImage)
{
STANDARD_VM_CHECK;
}
CONTRACT_END;
PEImageHolder pImage(new PEImage());
PTR_PEImageLayout pLayout = PEImageLayout::CreateFromByteArray(pImage, array, size);
_ASSERTE(!pLayout->IsMapped());
SimpleWriteLockHolder lock(pImage->m_pLayoutLock);
pImage->SetLayout(IMAGE_FLAT,pLayout);
RETURN dac_cast<PTR_PEImage>(pImage.Extract());
}
#ifndef TARGET_UNIX
/* static */
PTR_PEImage PEImage::CreateFromHMODULE(HMODULE hMod)
{
CONTRACT(PTR_PEImage)
{
STANDARD_VM_CHECK;
PRECONDITION(hMod!=NULL);
POSTCONDITION(RETVAL->HasLoadedLayout());
}
CONTRACT_END;
StackSString path;
WszGetModuleFileName(hMod, path);
PEImageHolder pImage(PEImage::OpenImage(path, MDInternalImport_Default));
if (!pImage->HasLoadedLayout())
{
PTR_PEImageLayout pLayout = PEImageLayout::CreateFromHMODULE(hMod, pImage);
SimpleWriteLockHolder lock(pImage->m_pLayoutLock);
pImage->SetLayout(IMAGE_LOADED, pLayout);
if (pImage->m_pLayouts[IMAGE_FLAT] == NULL)
{
pLayout->AddRef();
pImage->SetLayout(IMAGE_FLAT, pLayout);
}
}
_ASSERTE(pImage->m_pLayouts[IMAGE_FLAT] != NULL);
RETURN dac_cast<PTR_PEImage>(pImage.Extract());
}
#endif // !TARGET_UNIX
#endif //DACCESS_COMPILE
HANDLE PEImage::GetFileHandle()
{
CONTRACTL
{
STANDARD_VM_CHECK;
PRECONDITION(m_pLayoutLock->IsWriterLock());
}
CONTRACTL_END;
if (m_hFile!=INVALID_HANDLE_VALUE)
return m_hFile;
HRESULT hr = TryOpenFile(/*takeLock*/ false);
if (m_hFile == INVALID_HANDLE_VALUE)
{
#if !defined(DACCESS_COMPILE)
EEFileLoadException::Throw(GetPathToLoad(), hr);
#else // defined(DACCESS_COMPILE)
ThrowHR(hr);
#endif // !defined(DACCESS_COMPILE)
}
return m_hFile;
}
HRESULT PEImage::TryOpenFile(bool takeLock)
{
STANDARD_VM_CONTRACT;
SimpleWriteLockHolder lock(m_pLayoutLock, takeLock);
if (m_hFile!=INVALID_HANDLE_VALUE)
return S_OK;
ErrorModeHolder mode(SEM_NOOPENFILEERRORBOX | SEM_FAILCRITICALERRORS);
m_hFile=WszCreateFile((LPCWSTR)GetPathToLoad(),
GENERIC_READ
#if TARGET_WINDOWS
// the file may have native code sections, make sure we are allowed to execute the file
| GENERIC_EXECUTE
#endif
,
FILE_SHARE_READ|FILE_SHARE_DELETE,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (m_hFile != INVALID_HANDLE_VALUE)
return S_OK;
if (GetLastError())
return HRESULT_FROM_WIN32(GetLastError());
return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
}
BOOL PEImage::IsPtrInImage(PTR_CVOID data)
{
CONTRACTL
{
INSTANCE_CHECK;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END;
for (int i = 0; i < IMAGE_COUNT; i++)
{
if (m_pLayouts[i] != NULL)
{
if (m_pLayouts[i]->PointerInPE(data))
return TRUE;
}
}
return FALSE;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/tests/Interop/PInvoke/Generics/GenericsNative.SequentialClassF.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <stdint.h>
#include <xplatform.h>
#include <platformdefines.h>
struct SequentialClassF
{
float value;
};
static SequentialClassF SequentialClassFValue = { };
extern "C" DLL_EXPORT SequentialClassF* STDMETHODCALLTYPE GetSequentialClassF(float value)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetSequentialClassFOut(float value, SequentialClassF** pValue)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
extern "C" DLL_EXPORT const SequentialClassF** STDMETHODCALLTYPE GetSequentialClassFPtr(float value)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
extern "C" DLL_EXPORT SequentialClassF* STDMETHODCALLTYPE AddSequentialClassF(SequentialClassF* lhs, SequentialClassF* rhs)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
extern "C" DLL_EXPORT SequentialClassF* STDMETHODCALLTYPE AddSequentialClassFs(const SequentialClassF** pValues, uint32_t count)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <stdint.h>
#include <xplatform.h>
#include <platformdefines.h>
struct SequentialClassF
{
float value;
};
static SequentialClassF SequentialClassFValue = { };
extern "C" DLL_EXPORT SequentialClassF* STDMETHODCALLTYPE GetSequentialClassF(float value)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetSequentialClassFOut(float value, SequentialClassF** pValue)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
extern "C" DLL_EXPORT const SequentialClassF** STDMETHODCALLTYPE GetSequentialClassFPtr(float value)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
extern "C" DLL_EXPORT SequentialClassF* STDMETHODCALLTYPE AddSequentialClassF(SequentialClassF* lhs, SequentialClassF* rhs)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
extern "C" DLL_EXPORT SequentialClassF* STDMETHODCALLTYPE AddSequentialClassFs(const SequentialClassF** pValues, uint32_t count)
{
throw "P/Invoke for SequentialClass<float> should be unsupported.";
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/miscellaneous/IsBadWritePtr/test1/test.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source: test.c
**
** Purpose: IsBadWritePtr() function
**
**
**=========================================================*/
#include <palsuite.h>
#define MEMORY_AMOUNT 16
PALTEST(miscellaneous_IsBadWritePtr_test1_paltest_isbadwriteptr_test1, "miscellaneous/IsBadWritePtr/test1/paltest_isbadwriteptr_test1")
{
void * TestingPointer = NULL;
BOOL ResultValue = 0;
/*
* Initialize the PAL and return FAILURE if this fails
*/
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
TestingPointer = malloc(MEMORY_AMOUNT);
if ( TestingPointer == NULL )
{
Fail("ERROR: Failed to allocate memory for TestingPointer pointer. "
"Can't properly exec test case without this.\n");
}
/* This should be writeable, return 0 */
ResultValue = IsBadWritePtr(TestingPointer,MEMORY_AMOUNT);
if(ResultValue != 0)
{
free(TestingPointer);
Fail("ERROR: Returned %d when 0 should have been returned, checking "
"to see if writable memory is unwriteable.\n",ResultValue);
}
free(TestingPointer);
/* This should be !writeable, return nonezero */
TestingPointer = (void*)0x08; /* non writeable address */
ResultValue = IsBadWritePtr(TestingPointer,sizeof(int));
if(ResultValue == 0)
{
Fail("ERROR: Returned %d when nonezero should have been returned, "
"checking to see if unwriteable memory is writeable.\n",
ResultValue);
}
/* This should be !writeable, return Nonezero */
ResultValue = IsBadWritePtr(NULL,MEMORY_AMOUNT);
if(ResultValue == 0)
{
Fail("ERROR: Returned %d when nonezero should have been "
"returned,checking "
"to see if a NULL pointer is writeable.\n",
ResultValue);
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source: test.c
**
** Purpose: IsBadWritePtr() function
**
**
**=========================================================*/
#include <palsuite.h>
#define MEMORY_AMOUNT 16
PALTEST(miscellaneous_IsBadWritePtr_test1_paltest_isbadwriteptr_test1, "miscellaneous/IsBadWritePtr/test1/paltest_isbadwriteptr_test1")
{
void * TestingPointer = NULL;
BOOL ResultValue = 0;
/*
* Initialize the PAL and return FAILURE if this fails
*/
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
TestingPointer = malloc(MEMORY_AMOUNT);
if ( TestingPointer == NULL )
{
Fail("ERROR: Failed to allocate memory for TestingPointer pointer. "
"Can't properly exec test case without this.\n");
}
/* This should be writeable, return 0 */
ResultValue = IsBadWritePtr(TestingPointer,MEMORY_AMOUNT);
if(ResultValue != 0)
{
free(TestingPointer);
Fail("ERROR: Returned %d when 0 should have been returned, checking "
"to see if writable memory is unwriteable.\n",ResultValue);
}
free(TestingPointer);
/* This should be !writeable, return nonezero */
TestingPointer = (void*)0x08; /* non writeable address */
ResultValue = IsBadWritePtr(TestingPointer,sizeof(int));
if(ResultValue == 0)
{
Fail("ERROR: Returned %d when nonezero should have been returned, "
"checking to see if unwriteable memory is writeable.\n",
ResultValue);
}
/* This should be !writeable, return Nonezero */
ResultValue = IsBadWritePtr(NULL,MEMORY_AMOUNT);
if(ResultValue == 0)
{
Fail("ERROR: Returned %d when nonezero should have been "
"returned,checking "
"to see if a NULL pointer is writeable.\n",
ResultValue);
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/mono/mono/mini/simd-methods.h | METHOD2(".ctor", ctor)
METHOD(CopyTo)
METHOD(Equals)
METHOD(GreaterThan)
METHOD(GreaterThanOrEqual)
METHOD(LessThan)
METHOD(LessThanOrEqual)
METHOD(Min)
METHOD(Max)
METHOD(MinScalar)
METHOD(MaxScalar)
METHOD(PopCount)
METHOD(LeadingZeroCount)
METHOD(get_Count)
METHOD(get_IsHardwareAccelerated)
METHOD(get_IsSupported)
METHOD(get_AllBitsSet)
METHOD(get_Item)
METHOD(get_One)
METHOD(get_Zero)
METHOD(op_Addition)
METHOD(op_BitwiseAnd)
METHOD(op_BitwiseOr)
METHOD(op_Division)
METHOD(op_Equality)
METHOD(op_ExclusiveOr)
METHOD(op_Explicit)
METHOD(op_Inequality)
METHOD(op_Multiply)
METHOD(op_Subtraction)
// Vector
METHOD(ConvertToInt32)
METHOD(ConvertToInt32WithTruncation)
METHOD(ConvertToUInt32)
METHOD(ConvertToInt64)
METHOD(ConvertToInt64WithTruncation)
METHOD(ConvertToUInt64)
METHOD(ConvertToSingle)
METHOD(ConvertToDouble)
METHOD(Narrow)
METHOD(Widen)
// Vector64, Vector128, Vector256
METHOD(As)
METHOD(AsByte)
METHOD(AsDouble)
METHOD(AsInt16)
METHOD(AsInt32)
METHOD(AsInt64)
METHOD(AsSByte)
METHOD(AsSingle)
METHOD(AsUInt16)
METHOD(AsUInt32)
METHOD(AsUInt64)
METHOD(AsVector128)
METHOD(AsVector2)
METHOD(AsVector256)
METHOD(AsVector3)
METHOD(AsVector4)
METHOD(BitwiseAnd)
METHOD(BitwiseOr)
METHOD(Create)
METHOD(CreateScalar)
METHOD(CreateScalarUnsafe)
METHOD(ConditionalSelect)
METHOD(EqualsAll)
METHOD(EqualsAny)
METHOD(GetElement)
METHOD(GetLower)
METHOD(GetUpper)
METHOD(ToScalar)
METHOD(ToVector128)
METHOD(ToVector128Unsafe)
METHOD(ToVector256)
METHOD(ToVector256Unsafe)
METHOD(WithElement)
METHOD(WithLower)
METHOD(WithUpper)
// Bmi1
METHOD(AndNot)
METHOD(BitFieldExtract)
METHOD(ExtractLowestSetBit)
METHOD(GetMaskUpToLowestSetBit)
METHOD(ResetLowestSetBit)
METHOD(TrailingZeroCount)
// Bmi2
METHOD(ZeroHighBits)
METHOD(MultiplyNoFlags)
METHOD(ParallelBitDeposit)
METHOD(ParallelBitExtract)
// Sse
METHOD(Add)
METHOD(CompareGreaterThanOrEqual)
METHOD(CompareLessThanOrEqual)
METHOD(CompareNotEqual)
METHOD(CompareNotGreaterThan)
METHOD(CompareNotGreaterThanOrEqual)
METHOD(CompareNotLessThan)
METHOD(CompareNotLessThanOrEqual)
METHOD(CompareScalarGreaterThan)
METHOD(CompareScalarGreaterThanOrEqual)
METHOD(CompareScalarLessThan)
METHOD(CompareScalarLessThanOrEqual)
METHOD(CompareScalarNotEqual)
METHOD(CompareScalarNotGreaterThan)
METHOD(CompareScalarNotGreaterThanOrEqual)
METHOD(CompareScalarNotLessThan)
METHOD(CompareScalarNotLessThanOrEqual)
METHOD(CompareScalarOrderedEqual)
METHOD(CompareScalarOrderedGreaterThan)
METHOD(CompareScalarOrderedGreaterThanOrEqual)
METHOD(CompareScalarOrderedLessThan)
METHOD(CompareScalarOrderedLessThanOrEqual)
METHOD(CompareScalarOrderedNotEqual)
METHOD(CompareScalarUnorderedEqual)
METHOD(CompareScalarUnorderedGreaterThan)
METHOD(CompareScalarUnorderedGreaterThanOrEqual)
METHOD(CompareScalarUnorderedLessThan)
METHOD(CompareScalarUnorderedLessThanOrEqual)
METHOD(CompareScalarUnorderedNotEqual)
METHOD(CompareOrdered)
METHOD(CompareUnordered)
METHOD(CompareScalarOrdered)
METHOD(CompareScalarUnordered)
METHOD(ConvertScalarToVector128Single)
METHOD(Divide)
METHOD(DivideScalar)
METHOD(Store)
METHOD(StoreFence)
METHOD(StoreHigh)
METHOD(StoreLow)
METHOD(Subtract)
METHOD(SubtractScalar)
METHOD(CompareEqual)
METHOD(Extract)
METHOD(LoadHigh)
METHOD(LoadLow)
METHOD(LoadVector128)
METHOD(LoadScalarVector128)
METHOD(MoveHighToLow)
METHOD(MoveLowToHigh)
METHOD(MoveMask)
METHOD(MoveScalar)
METHOD(Multiply)
METHOD(MultiplyAddAdjacent)
METHOD(MultiplyScalar)
METHOD(Shuffle)
METHOD(UnpackHigh)
METHOD(UnpackLow)
METHOD(Prefetch0)
METHOD(Prefetch1)
METHOD(Prefetch2)
METHOD(PrefetchNonTemporal)
METHOD(Reciprocal)
METHOD(ReciprocalScalar)
METHOD(ReciprocalSqrt)
METHOD(ReciprocalSqrtScalar)
METHOD(Sqrt)
METHOD(SqrtScalar)
// Sse2
METHOD(AddSaturate)
METHOD(AddScalar)
METHOD(And)
METHOD(Average)
METHOD(Or)
METHOD(LoadAlignedVector128)
METHOD(Xor)
METHOD(CompareGreaterThan)
METHOD(CompareScalarEqual)
METHOD(ConvertScalarToVector128Double)
METHOD(ConvertScalarToVector128Int32)
METHOD(ConvertScalarToVector128Int64)
METHOD(ConvertScalarToVector128UInt32)
METHOD(ConvertScalarToVector128UInt64)
METHOD(ConvertToVector128Double)
METHOD(ConvertToVector128Int32)
METHOD(ConvertToVector128Int32WithTruncation)
METHOD(ConvertToVector128Single)
METHOD(MaskMove)
METHOD(MultiplyHigh)
METHOD(MultiplyLow)
METHOD(PackSignedSaturate)
METHOD(PackUnsignedSaturate)
METHOD(ShuffleHigh)
METHOD(ShuffleLow)
METHOD(SubtractSaturate)
METHOD(SumAbsoluteDifferences)
METHOD(StoreScalar)
METHOD(StoreAligned)
METHOD(StoreAlignedNonTemporal)
METHOD(StoreNonTemporal)
METHOD(ShiftLeftLogical)
METHOD(ShiftLeftLogical128BitLane)
METHOD(ShiftRightArithmetic)
METHOD(ShiftRightLogical)
METHOD(ShiftRightLogical128BitLane)
METHOD(CompareLessThan)
METHOD(LoadFence)
METHOD(MemoryFence)
// Sse3
METHOD(HorizontalAdd)
METHOD(HorizontalSubtract)
METHOD(AddSubtract)
METHOD(LoadAndDuplicateToVector128)
METHOD(LoadDquVector128)
METHOD(MoveAndDuplicate)
METHOD(MoveHighAndDuplicate)
METHOD(MoveLowAndDuplicate)
// Ssse3
METHOD(Abs) // Also used by ARM64
METHOD(AlignRight)
METHOD(HorizontalAddSaturate)
METHOD(HorizontalSubtractSaturate)
METHOD(MultiplyHighRoundScale)
METHOD(Sign)
// Sse41
METHOD(Blend)
METHOD(BlendVariable)
METHOD(Ceiling)
METHOD(CeilingScalar)
METHOD(ConvertToVector128Int16)
METHOD(ConvertToVector128Int64)
METHOD(Floor)
METHOD(FloorScalar)
METHOD(Insert)
METHOD(LoadAlignedVector128NonTemporal)
METHOD(RoundCurrentDirectionScalar)
METHOD(RoundToNearestInteger)
METHOD(RoundToNearestIntegerScalar)
METHOD(RoundToNegativeInfinity)
METHOD(RoundToNegativeInfinityScalar)
METHOD(RoundToPositiveInfinity)
METHOD(RoundToPositiveInfinityScalar)
METHOD(RoundToZero)
METHOD(RoundToZeroScalar)
METHOD(RoundCurrentDirection)
METHOD(MinHorizontal)
METHOD(TestC)
METHOD(TestNotZAndNotC)
METHOD(TestZ)
METHOD(DotProduct)
METHOD(MultipleSumAbsoluteDifferences)
// Sse42
METHOD(Crc32)
// Aes
METHOD(Decrypt)
METHOD(DecryptLast)
METHOD(Encrypt)
METHOD(EncryptLast)
METHOD(InverseMixColumns)
METHOD(KeygenAssist)
METHOD(PolynomialMultiplyWideningLower)
METHOD(PolynomialMultiplyWideningUpper)
// Pclmulqdq
METHOD(CarrylessMultiply)
// ArmBase
METHOD(LeadingSignCount)
METHOD(ReverseElementBits)
// Crc32
METHOD(ComputeCrc32)
METHOD(ComputeCrc32C)
// X86Base
METHOD(BitScanForward)
METHOD(BitScanReverse)
// Crypto
METHOD(FixedRotate)
METHOD(HashUpdateChoose)
METHOD(HashUpdateMajority)
METHOD(HashUpdateParity)
METHOD(HashUpdate1)
METHOD(HashUpdate2)
METHOD(ScheduleUpdate0)
METHOD(ScheduleUpdate1)
METHOD(MixColumns)
// AdvSimd
METHOD(AbsSaturate)
METHOD(AbsSaturateScalar)
METHOD(AbsScalar)
METHOD(AbsoluteCompareGreaterThan)
METHOD(AbsoluteCompareGreaterThanOrEqual)
METHOD(AbsoluteCompareGreaterThanOrEqualScalar)
METHOD(AbsoluteCompareGreaterThanScalar)
METHOD(AbsoluteCompareLessThan)
METHOD(AbsoluteCompareLessThanOrEqual)
METHOD(AbsoluteCompareLessThanOrEqualScalar)
METHOD(AbsoluteCompareLessThanScalar)
METHOD(AbsoluteDifference)
METHOD(AbsoluteDifferenceAdd)
METHOD(AbsoluteDifferenceScalar)
METHOD(AbsoluteDifferenceWideningLower)
METHOD(AbsoluteDifferenceWideningLowerAndAdd)
METHOD(AbsoluteDifferenceWideningUpper)
METHOD(AbsoluteDifferenceWideningUpperAndAdd)
METHOD(AddAcross)
METHOD(AddAcrossWidening)
METHOD(AddHighNarrowingLower)
METHOD(AddHighNarrowingUpper)
METHOD(AddPairwise)
METHOD(AddPairwiseScalar)
METHOD(AddPairwiseWidening)
METHOD(AddPairwiseWideningAndAdd)
METHOD(AddPairwiseWideningAndAddScalar)
METHOD(AddPairwiseWideningScalar)
METHOD(AddRoundedHighNarrowingLower)
METHOD(AddRoundedHighNarrowingUpper)
METHOD(AddSaturateScalar)
METHOD(AddWideningLower)
METHOD(AddWideningUpper)
METHOD(BitwiseClear)
METHOD(BitwiseSelect)
METHOD(CompareEqualScalar)
METHOD(CompareGreaterThanOrEqualScalar)
METHOD(CompareGreaterThanScalar)
METHOD(CompareLessThanOrEqualScalar)
METHOD(CompareLessThanScalar)
METHOD(CompareTest)
METHOD(CompareTestScalar)
METHOD(ConvertToDoubleScalar)
METHOD(ConvertToDoubleUpper)
METHOD(ConvertToInt32RoundAwayFromZero)
METHOD(ConvertToInt32RoundAwayFromZeroScalar)
METHOD(ConvertToInt32RoundToEven)
METHOD(ConvertToInt32RoundToEvenScalar)
METHOD(ConvertToInt32RoundToNegativeInfinity)
METHOD(ConvertToInt32RoundToNegativeInfinityScalar)
METHOD(ConvertToInt32RoundToPositiveInfinity)
METHOD(ConvertToInt32RoundToPositiveInfinityScalar)
METHOD(ConvertToInt32RoundToZero)
METHOD(ConvertToInt32RoundToZeroScalar)
METHOD(ConvertToInt64RoundAwayFromZero)
METHOD(ConvertToInt64RoundAwayFromZeroScalar)
METHOD(ConvertToInt64RoundToEven)
METHOD(ConvertToInt64RoundToEvenScalar)
METHOD(ConvertToInt64RoundToNegativeInfinity)
METHOD(ConvertToInt64RoundToNegativeInfinityScalar)
METHOD(ConvertToInt64RoundToPositiveInfinity)
METHOD(ConvertToInt64RoundToPositiveInfinityScalar)
METHOD(ConvertToInt64RoundToZero)
METHOD(ConvertToInt64RoundToZeroScalar)
METHOD(ConvertToSingleLower)
METHOD(ConvertToSingleRoundToOddLower)
METHOD(ConvertToSingleRoundToOddUpper)
METHOD(ConvertToSingleScalar)
METHOD(ConvertToSingleUpper)
METHOD(ConvertToUInt32RoundAwayFromZero)
METHOD(ConvertToUInt32RoundAwayFromZeroScalar)
METHOD(ConvertToUInt32RoundToEven)
METHOD(ConvertToUInt32RoundToEvenScalar)
METHOD(ConvertToUInt32RoundToNegativeInfinity)
METHOD(ConvertToUInt32RoundToNegativeInfinityScalar)
METHOD(ConvertToUInt32RoundToPositiveInfinity)
METHOD(ConvertToUInt32RoundToPositiveInfinityScalar)
METHOD(ConvertToUInt32RoundToZero)
METHOD(ConvertToUInt32RoundToZeroScalar)
METHOD(ConvertToUInt64RoundAwayFromZero)
METHOD(ConvertToUInt64RoundAwayFromZeroScalar)
METHOD(ConvertToUInt64RoundToEven)
METHOD(ConvertToUInt64RoundToEvenScalar)
METHOD(ConvertToUInt64RoundToNegativeInfinity)
METHOD(ConvertToUInt64RoundToNegativeInfinityScalar)
METHOD(ConvertToUInt64RoundToPositiveInfinity)
METHOD(ConvertToUInt64RoundToPositiveInfinityScalar)
METHOD(ConvertToUInt64RoundToZero)
METHOD(ConvertToUInt64RoundToZeroScalar)
METHOD(DuplicateSelectedScalarToVector128)
METHOD(DuplicateSelectedScalarToVector64)
METHOD(DuplicateToVector128)
METHOD(DuplicateToVector64)
METHOD(ExtractNarrowingLower)
METHOD(ExtractNarrowingSaturateLower)
METHOD(ExtractNarrowingSaturateScalar)
METHOD(ExtractNarrowingSaturateUnsignedLower)
METHOD(ExtractNarrowingSaturateUnsignedScalar)
METHOD(ExtractNarrowingSaturateUnsignedUpper)
METHOD(ExtractNarrowingSaturateUpper)
METHOD(ExtractNarrowingUpper)
METHOD(ExtractVector128)
METHOD(ExtractVector64)
METHOD(FusedAddHalving)
METHOD(FusedAddRoundedHalving)
METHOD(FusedMultiplyAdd)
METHOD(FusedMultiplyAddByScalar)
METHOD(FusedMultiplyAddBySelectedScalar)
METHOD(FusedMultiplyAddNegatedScalar)
METHOD(FusedMultiplyAddScalar)
METHOD(FusedMultiplyAddScalarBySelectedScalar)
METHOD(FusedMultiplySubtract)
METHOD(FusedMultiplySubtractByScalar)
METHOD(FusedMultiplySubtractBySelectedScalar)
METHOD(FusedMultiplySubtractNegatedScalar)
METHOD(FusedMultiplySubtractScalar)
METHOD(FusedMultiplySubtractScalarBySelectedScalar)
METHOD(FusedSubtractHalving)
METHOD(InsertScalar)
METHOD(InsertSelectedScalar)
METHOD(LoadAndInsertScalar)
METHOD(LoadAndReplicateToVector128)
METHOD(LoadAndReplicateToVector64)
METHOD(LoadPairScalarVector64)
METHOD(LoadPairScalarVector64NonTemporal)
METHOD(LoadPairVector128)
METHOD(LoadPairVector128NonTemporal)
METHOD(LoadPairVector64)
METHOD(LoadPairVector64NonTemporal)
METHOD(LoadVector64)
METHOD(MaxAcross)
METHOD(MaxNumber)
METHOD(MaxNumberAcross)
METHOD(MaxNumberPairwise)
METHOD(MaxNumberPairwiseScalar)
METHOD(MaxNumberScalar)
METHOD(MaxPairwise)
METHOD(MaxPairwiseScalar)
METHOD(MinAcross)
METHOD(MinNumber)
METHOD(MinNumberAcross)
METHOD(MinNumberPairwise)
METHOD(MinNumberPairwiseScalar)
METHOD(MinNumberScalar)
METHOD(MinPairwise)
METHOD(MinPairwiseScalar)
METHOD(MultiplyAdd)
METHOD(MultiplyAddByScalar)
METHOD(MultiplyAddBySelectedScalar)
METHOD(MultiplyByScalar)
METHOD(MultiplyBySelectedScalar)
METHOD(MultiplyBySelectedScalarWideningLower)
METHOD(MultiplyBySelectedScalarWideningLowerAndAdd)
METHOD(MultiplyBySelectedScalarWideningLowerAndSubtract)
METHOD(MultiplyBySelectedScalarWideningUpper)
METHOD(MultiplyBySelectedScalarWideningUpperAndAdd)
METHOD(MultiplyBySelectedScalarWideningUpperAndSubtract)
METHOD(MultiplyDoublingByScalarSaturateHigh)
METHOD(MultiplyDoublingBySelectedScalarSaturateHigh)
METHOD(MultiplyDoublingSaturateHigh)
METHOD(MultiplyDoublingSaturateHighScalar)
METHOD(MultiplyDoublingScalarBySelectedScalarSaturateHigh)
METHOD(MultiplyDoublingWideningAndAddSaturateScalar)
METHOD(MultiplyDoublingWideningAndSubtractSaturateScalar)
METHOD(MultiplyDoublingWideningLowerAndAddSaturate)
METHOD(MultiplyDoublingWideningLowerAndSubtractSaturate)
METHOD(MultiplyDoublingWideningLowerByScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningLowerByScalarAndSubtractSaturate)
METHOD(MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate)
METHOD(MultiplyDoublingWideningSaturateLower)
METHOD(MultiplyDoublingWideningSaturateLowerByScalar)
METHOD(MultiplyDoublingWideningSaturateLowerBySelectedScalar)
METHOD(MultiplyDoublingWideningSaturateScalar)
METHOD(MultiplyDoublingWideningSaturateScalarBySelectedScalar)
METHOD(MultiplyDoublingWideningSaturateUpper)
METHOD(MultiplyDoublingWideningSaturateUpperByScalar)
METHOD(MultiplyDoublingWideningSaturateUpperBySelectedScalar)
METHOD(MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate)
METHOD(MultiplyDoublingWideningUpperAndAddSaturate)
METHOD(MultiplyDoublingWideningUpperAndSubtractSaturate)
METHOD(MultiplyDoublingWideningUpperByScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningUpperByScalarAndSubtractSaturate)
METHOD(MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate)
METHOD(MultiplyExtended)
METHOD(MultiplyExtendedByScalar)
METHOD(MultiplyExtendedBySelectedScalar)
METHOD(MultiplyExtendedScalar)
METHOD(MultiplyExtendedScalarBySelectedScalar)
METHOD(MultiplyRoundedDoublingByScalarSaturateHigh)
METHOD(MultiplyRoundedDoublingBySelectedScalarSaturateHigh)
METHOD(MultiplyRoundedDoublingSaturateHigh)
METHOD(MultiplyRoundedDoublingSaturateHighScalar)
METHOD(MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh)
METHOD(MultiplyScalarBySelectedScalar)
METHOD(MultiplySubtract)
METHOD(MultiplySubtractByScalar)
METHOD(MultiplySubtractBySelectedScalar)
METHOD(MultiplyWideningLower)
METHOD(MultiplyWideningLowerAndAdd)
METHOD(MultiplyWideningLowerAndSubtract)
METHOD(MultiplyWideningUpper)
METHOD(MultiplyWideningUpperAndAdd)
METHOD(MultiplyWideningUpperAndSubtract)
METHOD(Negate)
METHOD(NegateSaturate)
METHOD(NegateSaturateScalar)
METHOD(NegateScalar)
METHOD(Not)
METHOD(OrNot)
METHOD(OnesComplement)
METHOD(PolynomialMultiply)
METHOD(ReciprocalEstimate)
METHOD(ReciprocalEstimateScalar)
METHOD(ReciprocalExponentScalar)
METHOD(ReciprocalSquareRootEstimate)
METHOD(ReciprocalSquareRootEstimateScalar)
METHOD(ReciprocalSquareRootStep)
METHOD(ReciprocalSquareRootStepScalar)
METHOD(ReciprocalStep)
METHOD(ReciprocalStepScalar)
METHOD(ReverseElement16)
METHOD(ReverseElement32)
METHOD(ReverseElement8)
METHOD(RoundAwayFromZero)
METHOD(RoundAwayFromZeroScalar)
METHOD(RoundToNearest)
METHOD(RoundToNearestScalar)
METHOD(ShiftArithmetic)
METHOD(ShiftArithmeticRounded)
METHOD(ShiftArithmeticRoundedSaturate)
METHOD(ShiftArithmeticRoundedSaturateScalar)
METHOD(ShiftArithmeticRoundedScalar)
METHOD(ShiftArithmeticSaturate)
METHOD(ShiftArithmeticSaturateScalar)
METHOD(ShiftArithmeticScalar)
METHOD(ShiftLeftAndInsert)
METHOD(ShiftLeftAndInsertScalar)
METHOD(ShiftLeftLogicalSaturate)
METHOD(ShiftLeftLogicalSaturateScalar)
METHOD(ShiftLeftLogicalSaturateUnsigned)
METHOD(ShiftLeftLogicalSaturateUnsignedScalar)
METHOD(ShiftLeftLogicalScalar)
METHOD(ShiftLeftLogicalWideningLower)
METHOD(ShiftLeftLogicalWideningUpper)
METHOD(ShiftLogical)
METHOD(ShiftLogicalRounded)
METHOD(ShiftLogicalRoundedSaturate)
METHOD(ShiftLogicalRoundedSaturateScalar)
METHOD(ShiftLogicalRoundedScalar)
METHOD(ShiftLogicalSaturate)
METHOD(ShiftLogicalSaturateScalar)
METHOD(ShiftLogicalScalar)
METHOD(ShiftRightAndInsert)
METHOD(ShiftRightAndInsertScalar)
METHOD(ShiftRightArithmeticAdd)
METHOD(ShiftRightArithmeticAddScalar)
METHOD(ShiftRightArithmeticNarrowingSaturateLower)
METHOD(ShiftRightArithmeticNarrowingSaturateScalar)
METHOD(ShiftRightArithmeticNarrowingSaturateUnsignedLower)
METHOD(ShiftRightArithmeticNarrowingSaturateUnsignedScalar)
METHOD(ShiftRightArithmeticNarrowingSaturateUnsignedUpper)
METHOD(ShiftRightArithmeticNarrowingSaturateUpper)
METHOD(ShiftRightArithmeticRounded)
METHOD(ShiftRightArithmeticRoundedAdd)
METHOD(ShiftRightArithmeticRoundedAddScalar)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateLower)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateScalar)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateUpper)
METHOD(ShiftRightArithmeticRoundedScalar)
METHOD(ShiftRightArithmeticScalar)
METHOD(ShiftRightLogicalAdd)
METHOD(ShiftRightLogicalAddScalar)
METHOD(ShiftRightLogicalNarrowingLower)
METHOD(ShiftRightLogicalNarrowingSaturateLower)
METHOD(ShiftRightLogicalNarrowingSaturateScalar)
METHOD(ShiftRightLogicalNarrowingSaturateUpper)
METHOD(ShiftRightLogicalNarrowingUpper)
METHOD(ShiftRightLogicalRounded)
METHOD(ShiftRightLogicalRoundedAdd)
METHOD(ShiftRightLogicalRoundedAddScalar)
METHOD(ShiftRightLogicalRoundedNarrowingLower)
METHOD(ShiftRightLogicalRoundedNarrowingSaturateLower)
METHOD(ShiftRightLogicalRoundedNarrowingSaturateScalar)
METHOD(ShiftRightLogicalRoundedNarrowingSaturateUpper)
METHOD(ShiftRightLogicalRoundedNarrowingUpper)
METHOD(ShiftRightLogicalRoundedScalar)
METHOD(ShiftRightLogicalScalar)
METHOD(SignExtendWideningLower)
METHOD(SignExtendWideningUpper)
METHOD(StorePair)
METHOD(StorePairNonTemporal)
METHOD(StorePairScalar)
METHOD(StorePairScalarNonTemporal)
METHOD(StoreSelectedScalar)
METHOD(SubtractHighNarrowingLower)
METHOD(SubtractHighNarrowingUpper)
METHOD(SubtractRoundedHighNarrowingLower)
METHOD(SubtractRoundedHighNarrowingUpper)
METHOD(SubtractSaturateScalar)
METHOD(SubtractWideningLower)
METHOD(SubtractWideningUpper)
METHOD(TransposeEven)
METHOD(TransposeOdd)
METHOD(UnzipEven)
METHOD(UnzipOdd)
METHOD(VectorTableLookup)
METHOD(VectorTableLookupExtension)
METHOD(ZeroExtendWideningLower)
METHOD(ZeroExtendWideningUpper)
METHOD(ZipHigh)
METHOD(ZipLow)
// Arm.Rdm
METHOD(MultiplyRoundedDoublingAndAddSaturateHigh)
METHOD(MultiplyRoundedDoublingAndSubtractSaturateHigh)
METHOD(MultiplyRoundedDoublingBySelectedScalarAndAddSaturateHigh)
METHOD(MultiplyRoundedDoublingBySelectedScalarAndSubtractSaturateHigh)
// Arm.Rdm.Arm64
METHOD(MultiplyRoundedDoublingAndAddSaturateHighScalar)
METHOD(MultiplyRoundedDoublingAndSubtractSaturateHighScalar)
METHOD(MultiplyRoundedDoublingScalarBySelectedScalarAndAddSaturateHigh)
METHOD(MultiplyRoundedDoublingScalarBySelectedScalarAndSubtractSaturateHigh)
// Arm.Dp
METHOD(DotProductBySelectedQuadruplet)
| METHOD2(".ctor", ctor)
METHOD(CopyTo)
METHOD(Equals)
METHOD(GreaterThan)
METHOD(GreaterThanOrEqual)
METHOD(LessThan)
METHOD(LessThanOrEqual)
METHOD(Min)
METHOD(Max)
METHOD(MinScalar)
METHOD(MaxScalar)
METHOD(PopCount)
METHOD(LeadingZeroCount)
METHOD(get_Count)
METHOD(get_IsHardwareAccelerated)
METHOD(get_IsSupported)
METHOD(get_AllBitsSet)
METHOD(get_Item)
METHOD(get_One)
METHOD(get_Zero)
METHOD(op_Addition)
METHOD(op_BitwiseAnd)
METHOD(op_BitwiseOr)
METHOD(op_Division)
METHOD(op_Equality)
METHOD(op_ExclusiveOr)
METHOD(op_Explicit)
METHOD(op_Inequality)
METHOD(op_Multiply)
METHOD(op_Subtraction)
// Vector
METHOD(ConvertToInt32)
METHOD(ConvertToInt32WithTruncation)
METHOD(ConvertToUInt32)
METHOD(ConvertToInt64)
METHOD(ConvertToInt64WithTruncation)
METHOD(ConvertToUInt64)
METHOD(ConvertToSingle)
METHOD(ConvertToDouble)
METHOD(Narrow)
METHOD(Widen)
// Vector64, Vector128, Vector256
METHOD(As)
METHOD(AsByte)
METHOD(AsDouble)
METHOD(AsInt16)
METHOD(AsInt32)
METHOD(AsInt64)
METHOD(AsSByte)
METHOD(AsSingle)
METHOD(AsUInt16)
METHOD(AsUInt32)
METHOD(AsUInt64)
METHOD(AsVector128)
METHOD(AsVector2)
METHOD(AsVector256)
METHOD(AsVector3)
METHOD(AsVector4)
METHOD(BitwiseAnd)
METHOD(BitwiseOr)
METHOD(Create)
METHOD(CreateScalar)
METHOD(CreateScalarUnsafe)
METHOD(ConditionalSelect)
METHOD(EqualsAll)
METHOD(EqualsAny)
METHOD(GetElement)
METHOD(GetLower)
METHOD(GetUpper)
METHOD(ToScalar)
METHOD(ToVector128)
METHOD(ToVector128Unsafe)
METHOD(ToVector256)
METHOD(ToVector256Unsafe)
METHOD(WithElement)
METHOD(WithLower)
METHOD(WithUpper)
// Bmi1
METHOD(AndNot)
METHOD(BitFieldExtract)
METHOD(ExtractLowestSetBit)
METHOD(GetMaskUpToLowestSetBit)
METHOD(ResetLowestSetBit)
METHOD(TrailingZeroCount)
// Bmi2
METHOD(ZeroHighBits)
METHOD(MultiplyNoFlags)
METHOD(ParallelBitDeposit)
METHOD(ParallelBitExtract)
// Sse
METHOD(Add)
METHOD(CompareGreaterThanOrEqual)
METHOD(CompareLessThanOrEqual)
METHOD(CompareNotEqual)
METHOD(CompareNotGreaterThan)
METHOD(CompareNotGreaterThanOrEqual)
METHOD(CompareNotLessThan)
METHOD(CompareNotLessThanOrEqual)
METHOD(CompareScalarGreaterThan)
METHOD(CompareScalarGreaterThanOrEqual)
METHOD(CompareScalarLessThan)
METHOD(CompareScalarLessThanOrEqual)
METHOD(CompareScalarNotEqual)
METHOD(CompareScalarNotGreaterThan)
METHOD(CompareScalarNotGreaterThanOrEqual)
METHOD(CompareScalarNotLessThan)
METHOD(CompareScalarNotLessThanOrEqual)
METHOD(CompareScalarOrderedEqual)
METHOD(CompareScalarOrderedGreaterThan)
METHOD(CompareScalarOrderedGreaterThanOrEqual)
METHOD(CompareScalarOrderedLessThan)
METHOD(CompareScalarOrderedLessThanOrEqual)
METHOD(CompareScalarOrderedNotEqual)
METHOD(CompareScalarUnorderedEqual)
METHOD(CompareScalarUnorderedGreaterThan)
METHOD(CompareScalarUnorderedGreaterThanOrEqual)
METHOD(CompareScalarUnorderedLessThan)
METHOD(CompareScalarUnorderedLessThanOrEqual)
METHOD(CompareScalarUnorderedNotEqual)
METHOD(CompareOrdered)
METHOD(CompareUnordered)
METHOD(CompareScalarOrdered)
METHOD(CompareScalarUnordered)
METHOD(ConvertScalarToVector128Single)
METHOD(Divide)
METHOD(DivideScalar)
METHOD(Store)
METHOD(StoreFence)
METHOD(StoreHigh)
METHOD(StoreLow)
METHOD(Subtract)
METHOD(SubtractScalar)
METHOD(CompareEqual)
METHOD(Extract)
METHOD(LoadHigh)
METHOD(LoadLow)
METHOD(LoadVector128)
METHOD(LoadScalarVector128)
METHOD(MoveHighToLow)
METHOD(MoveLowToHigh)
METHOD(MoveMask)
METHOD(MoveScalar)
METHOD(Multiply)
METHOD(MultiplyAddAdjacent)
METHOD(MultiplyScalar)
METHOD(Shuffle)
METHOD(UnpackHigh)
METHOD(UnpackLow)
METHOD(Prefetch0)
METHOD(Prefetch1)
METHOD(Prefetch2)
METHOD(PrefetchNonTemporal)
METHOD(Reciprocal)
METHOD(ReciprocalScalar)
METHOD(ReciprocalSqrt)
METHOD(ReciprocalSqrtScalar)
METHOD(Sqrt)
METHOD(SqrtScalar)
// Sse2
METHOD(AddSaturate)
METHOD(AddScalar)
METHOD(And)
METHOD(Average)
METHOD(Or)
METHOD(LoadAlignedVector128)
METHOD(Xor)
METHOD(CompareGreaterThan)
METHOD(CompareScalarEqual)
METHOD(ConvertScalarToVector128Double)
METHOD(ConvertScalarToVector128Int32)
METHOD(ConvertScalarToVector128Int64)
METHOD(ConvertScalarToVector128UInt32)
METHOD(ConvertScalarToVector128UInt64)
METHOD(ConvertToVector128Double)
METHOD(ConvertToVector128Int32)
METHOD(ConvertToVector128Int32WithTruncation)
METHOD(ConvertToVector128Single)
METHOD(MaskMove)
METHOD(MultiplyHigh)
METHOD(MultiplyLow)
METHOD(PackSignedSaturate)
METHOD(PackUnsignedSaturate)
METHOD(ShuffleHigh)
METHOD(ShuffleLow)
METHOD(SubtractSaturate)
METHOD(SumAbsoluteDifferences)
METHOD(StoreScalar)
METHOD(StoreAligned)
METHOD(StoreAlignedNonTemporal)
METHOD(StoreNonTemporal)
METHOD(ShiftLeftLogical)
METHOD(ShiftLeftLogical128BitLane)
METHOD(ShiftRightArithmetic)
METHOD(ShiftRightLogical)
METHOD(ShiftRightLogical128BitLane)
METHOD(CompareLessThan)
METHOD(LoadFence)
METHOD(MemoryFence)
// Sse3
METHOD(HorizontalAdd)
METHOD(HorizontalSubtract)
METHOD(AddSubtract)
METHOD(LoadAndDuplicateToVector128)
METHOD(LoadDquVector128)
METHOD(MoveAndDuplicate)
METHOD(MoveHighAndDuplicate)
METHOD(MoveLowAndDuplicate)
// Ssse3
METHOD(Abs) // Also used by ARM64
METHOD(AlignRight)
METHOD(HorizontalAddSaturate)
METHOD(HorizontalSubtractSaturate)
METHOD(MultiplyHighRoundScale)
METHOD(Sign)
// Sse41
METHOD(Blend)
METHOD(BlendVariable)
METHOD(Ceiling)
METHOD(CeilingScalar)
METHOD(ConvertToVector128Int16)
METHOD(ConvertToVector128Int64)
METHOD(Floor)
METHOD(FloorScalar)
METHOD(Insert)
METHOD(LoadAlignedVector128NonTemporal)
METHOD(RoundCurrentDirectionScalar)
METHOD(RoundToNearestInteger)
METHOD(RoundToNearestIntegerScalar)
METHOD(RoundToNegativeInfinity)
METHOD(RoundToNegativeInfinityScalar)
METHOD(RoundToPositiveInfinity)
METHOD(RoundToPositiveInfinityScalar)
METHOD(RoundToZero)
METHOD(RoundToZeroScalar)
METHOD(RoundCurrentDirection)
METHOD(MinHorizontal)
METHOD(TestC)
METHOD(TestNotZAndNotC)
METHOD(TestZ)
METHOD(DotProduct)
METHOD(MultipleSumAbsoluteDifferences)
// Sse42
METHOD(Crc32)
// Aes
METHOD(Decrypt)
METHOD(DecryptLast)
METHOD(Encrypt)
METHOD(EncryptLast)
METHOD(InverseMixColumns)
METHOD(KeygenAssist)
METHOD(PolynomialMultiplyWideningLower)
METHOD(PolynomialMultiplyWideningUpper)
// Pclmulqdq
METHOD(CarrylessMultiply)
// ArmBase
METHOD(LeadingSignCount)
METHOD(ReverseElementBits)
// Crc32
METHOD(ComputeCrc32)
METHOD(ComputeCrc32C)
// X86Base
METHOD(BitScanForward)
METHOD(BitScanReverse)
// Crypto
METHOD(FixedRotate)
METHOD(HashUpdateChoose)
METHOD(HashUpdateMajority)
METHOD(HashUpdateParity)
METHOD(HashUpdate1)
METHOD(HashUpdate2)
METHOD(ScheduleUpdate0)
METHOD(ScheduleUpdate1)
METHOD(MixColumns)
// AdvSimd
METHOD(AbsSaturate)
METHOD(AbsSaturateScalar)
METHOD(AbsScalar)
METHOD(AbsoluteCompareGreaterThan)
METHOD(AbsoluteCompareGreaterThanOrEqual)
METHOD(AbsoluteCompareGreaterThanOrEqualScalar)
METHOD(AbsoluteCompareGreaterThanScalar)
METHOD(AbsoluteCompareLessThan)
METHOD(AbsoluteCompareLessThanOrEqual)
METHOD(AbsoluteCompareLessThanOrEqualScalar)
METHOD(AbsoluteCompareLessThanScalar)
METHOD(AbsoluteDifference)
METHOD(AbsoluteDifferenceAdd)
METHOD(AbsoluteDifferenceScalar)
METHOD(AbsoluteDifferenceWideningLower)
METHOD(AbsoluteDifferenceWideningLowerAndAdd)
METHOD(AbsoluteDifferenceWideningUpper)
METHOD(AbsoluteDifferenceWideningUpperAndAdd)
METHOD(AddAcross)
METHOD(AddAcrossWidening)
METHOD(AddHighNarrowingLower)
METHOD(AddHighNarrowingUpper)
METHOD(AddPairwise)
METHOD(AddPairwiseScalar)
METHOD(AddPairwiseWidening)
METHOD(AddPairwiseWideningAndAdd)
METHOD(AddPairwiseWideningAndAddScalar)
METHOD(AddPairwiseWideningScalar)
METHOD(AddRoundedHighNarrowingLower)
METHOD(AddRoundedHighNarrowingUpper)
METHOD(AddSaturateScalar)
METHOD(AddWideningLower)
METHOD(AddWideningUpper)
METHOD(BitwiseClear)
METHOD(BitwiseSelect)
METHOD(CompareEqualScalar)
METHOD(CompareGreaterThanOrEqualScalar)
METHOD(CompareGreaterThanScalar)
METHOD(CompareLessThanOrEqualScalar)
METHOD(CompareLessThanScalar)
METHOD(CompareTest)
METHOD(CompareTestScalar)
METHOD(ConvertToDoubleScalar)
METHOD(ConvertToDoubleUpper)
METHOD(ConvertToInt32RoundAwayFromZero)
METHOD(ConvertToInt32RoundAwayFromZeroScalar)
METHOD(ConvertToInt32RoundToEven)
METHOD(ConvertToInt32RoundToEvenScalar)
METHOD(ConvertToInt32RoundToNegativeInfinity)
METHOD(ConvertToInt32RoundToNegativeInfinityScalar)
METHOD(ConvertToInt32RoundToPositiveInfinity)
METHOD(ConvertToInt32RoundToPositiveInfinityScalar)
METHOD(ConvertToInt32RoundToZero)
METHOD(ConvertToInt32RoundToZeroScalar)
METHOD(ConvertToInt64RoundAwayFromZero)
METHOD(ConvertToInt64RoundAwayFromZeroScalar)
METHOD(ConvertToInt64RoundToEven)
METHOD(ConvertToInt64RoundToEvenScalar)
METHOD(ConvertToInt64RoundToNegativeInfinity)
METHOD(ConvertToInt64RoundToNegativeInfinityScalar)
METHOD(ConvertToInt64RoundToPositiveInfinity)
METHOD(ConvertToInt64RoundToPositiveInfinityScalar)
METHOD(ConvertToInt64RoundToZero)
METHOD(ConvertToInt64RoundToZeroScalar)
METHOD(ConvertToSingleLower)
METHOD(ConvertToSingleRoundToOddLower)
METHOD(ConvertToSingleRoundToOddUpper)
METHOD(ConvertToSingleScalar)
METHOD(ConvertToSingleUpper)
METHOD(ConvertToUInt32RoundAwayFromZero)
METHOD(ConvertToUInt32RoundAwayFromZeroScalar)
METHOD(ConvertToUInt32RoundToEven)
METHOD(ConvertToUInt32RoundToEvenScalar)
METHOD(ConvertToUInt32RoundToNegativeInfinity)
METHOD(ConvertToUInt32RoundToNegativeInfinityScalar)
METHOD(ConvertToUInt32RoundToPositiveInfinity)
METHOD(ConvertToUInt32RoundToPositiveInfinityScalar)
METHOD(ConvertToUInt32RoundToZero)
METHOD(ConvertToUInt32RoundToZeroScalar)
METHOD(ConvertToUInt64RoundAwayFromZero)
METHOD(ConvertToUInt64RoundAwayFromZeroScalar)
METHOD(ConvertToUInt64RoundToEven)
METHOD(ConvertToUInt64RoundToEvenScalar)
METHOD(ConvertToUInt64RoundToNegativeInfinity)
METHOD(ConvertToUInt64RoundToNegativeInfinityScalar)
METHOD(ConvertToUInt64RoundToPositiveInfinity)
METHOD(ConvertToUInt64RoundToPositiveInfinityScalar)
METHOD(ConvertToUInt64RoundToZero)
METHOD(ConvertToUInt64RoundToZeroScalar)
METHOD(DuplicateSelectedScalarToVector128)
METHOD(DuplicateSelectedScalarToVector64)
METHOD(DuplicateToVector128)
METHOD(DuplicateToVector64)
METHOD(ExtractNarrowingLower)
METHOD(ExtractNarrowingSaturateLower)
METHOD(ExtractNarrowingSaturateScalar)
METHOD(ExtractNarrowingSaturateUnsignedLower)
METHOD(ExtractNarrowingSaturateUnsignedScalar)
METHOD(ExtractNarrowingSaturateUnsignedUpper)
METHOD(ExtractNarrowingSaturateUpper)
METHOD(ExtractNarrowingUpper)
METHOD(ExtractVector128)
METHOD(ExtractVector64)
METHOD(FusedAddHalving)
METHOD(FusedAddRoundedHalving)
METHOD(FusedMultiplyAdd)
METHOD(FusedMultiplyAddByScalar)
METHOD(FusedMultiplyAddBySelectedScalar)
METHOD(FusedMultiplyAddNegatedScalar)
METHOD(FusedMultiplyAddScalar)
METHOD(FusedMultiplyAddScalarBySelectedScalar)
METHOD(FusedMultiplySubtract)
METHOD(FusedMultiplySubtractByScalar)
METHOD(FusedMultiplySubtractBySelectedScalar)
METHOD(FusedMultiplySubtractNegatedScalar)
METHOD(FusedMultiplySubtractScalar)
METHOD(FusedMultiplySubtractScalarBySelectedScalar)
METHOD(FusedSubtractHalving)
METHOD(InsertScalar)
METHOD(InsertSelectedScalar)
METHOD(LoadAndInsertScalar)
METHOD(LoadAndReplicateToVector128)
METHOD(LoadAndReplicateToVector64)
METHOD(LoadPairScalarVector64)
METHOD(LoadPairScalarVector64NonTemporal)
METHOD(LoadPairVector128)
METHOD(LoadPairVector128NonTemporal)
METHOD(LoadPairVector64)
METHOD(LoadPairVector64NonTemporal)
METHOD(LoadVector64)
METHOD(MaxAcross)
METHOD(MaxNumber)
METHOD(MaxNumberAcross)
METHOD(MaxNumberPairwise)
METHOD(MaxNumberPairwiseScalar)
METHOD(MaxNumberScalar)
METHOD(MaxPairwise)
METHOD(MaxPairwiseScalar)
METHOD(MinAcross)
METHOD(MinNumber)
METHOD(MinNumberAcross)
METHOD(MinNumberPairwise)
METHOD(MinNumberPairwiseScalar)
METHOD(MinNumberScalar)
METHOD(MinPairwise)
METHOD(MinPairwiseScalar)
METHOD(MultiplyAdd)
METHOD(MultiplyAddByScalar)
METHOD(MultiplyAddBySelectedScalar)
METHOD(MultiplyByScalar)
METHOD(MultiplyBySelectedScalar)
METHOD(MultiplyBySelectedScalarWideningLower)
METHOD(MultiplyBySelectedScalarWideningLowerAndAdd)
METHOD(MultiplyBySelectedScalarWideningLowerAndSubtract)
METHOD(MultiplyBySelectedScalarWideningUpper)
METHOD(MultiplyBySelectedScalarWideningUpperAndAdd)
METHOD(MultiplyBySelectedScalarWideningUpperAndSubtract)
METHOD(MultiplyDoublingByScalarSaturateHigh)
METHOD(MultiplyDoublingBySelectedScalarSaturateHigh)
METHOD(MultiplyDoublingSaturateHigh)
METHOD(MultiplyDoublingSaturateHighScalar)
METHOD(MultiplyDoublingScalarBySelectedScalarSaturateHigh)
METHOD(MultiplyDoublingWideningAndAddSaturateScalar)
METHOD(MultiplyDoublingWideningAndSubtractSaturateScalar)
METHOD(MultiplyDoublingWideningLowerAndAddSaturate)
METHOD(MultiplyDoublingWideningLowerAndSubtractSaturate)
METHOD(MultiplyDoublingWideningLowerByScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningLowerByScalarAndSubtractSaturate)
METHOD(MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate)
METHOD(MultiplyDoublingWideningSaturateLower)
METHOD(MultiplyDoublingWideningSaturateLowerByScalar)
METHOD(MultiplyDoublingWideningSaturateLowerBySelectedScalar)
METHOD(MultiplyDoublingWideningSaturateScalar)
METHOD(MultiplyDoublingWideningSaturateScalarBySelectedScalar)
METHOD(MultiplyDoublingWideningSaturateUpper)
METHOD(MultiplyDoublingWideningSaturateUpperByScalar)
METHOD(MultiplyDoublingWideningSaturateUpperBySelectedScalar)
METHOD(MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate)
METHOD(MultiplyDoublingWideningUpperAndAddSaturate)
METHOD(MultiplyDoublingWideningUpperAndSubtractSaturate)
METHOD(MultiplyDoublingWideningUpperByScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningUpperByScalarAndSubtractSaturate)
METHOD(MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate)
METHOD(MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate)
METHOD(MultiplyExtended)
METHOD(MultiplyExtendedByScalar)
METHOD(MultiplyExtendedBySelectedScalar)
METHOD(MultiplyExtendedScalar)
METHOD(MultiplyExtendedScalarBySelectedScalar)
METHOD(MultiplyRoundedDoublingByScalarSaturateHigh)
METHOD(MultiplyRoundedDoublingBySelectedScalarSaturateHigh)
METHOD(MultiplyRoundedDoublingSaturateHigh)
METHOD(MultiplyRoundedDoublingSaturateHighScalar)
METHOD(MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh)
METHOD(MultiplyScalarBySelectedScalar)
METHOD(MultiplySubtract)
METHOD(MultiplySubtractByScalar)
METHOD(MultiplySubtractBySelectedScalar)
METHOD(MultiplyWideningLower)
METHOD(MultiplyWideningLowerAndAdd)
METHOD(MultiplyWideningLowerAndSubtract)
METHOD(MultiplyWideningUpper)
METHOD(MultiplyWideningUpperAndAdd)
METHOD(MultiplyWideningUpperAndSubtract)
METHOD(Negate)
METHOD(NegateSaturate)
METHOD(NegateSaturateScalar)
METHOD(NegateScalar)
METHOD(Not)
METHOD(OrNot)
METHOD(OnesComplement)
METHOD(PolynomialMultiply)
METHOD(ReciprocalEstimate)
METHOD(ReciprocalEstimateScalar)
METHOD(ReciprocalExponentScalar)
METHOD(ReciprocalSquareRootEstimate)
METHOD(ReciprocalSquareRootEstimateScalar)
METHOD(ReciprocalSquareRootStep)
METHOD(ReciprocalSquareRootStepScalar)
METHOD(ReciprocalStep)
METHOD(ReciprocalStepScalar)
METHOD(ReverseElement16)
METHOD(ReverseElement32)
METHOD(ReverseElement8)
METHOD(RoundAwayFromZero)
METHOD(RoundAwayFromZeroScalar)
METHOD(RoundToNearest)
METHOD(RoundToNearestScalar)
METHOD(ShiftArithmetic)
METHOD(ShiftArithmeticRounded)
METHOD(ShiftArithmeticRoundedSaturate)
METHOD(ShiftArithmeticRoundedSaturateScalar)
METHOD(ShiftArithmeticRoundedScalar)
METHOD(ShiftArithmeticSaturate)
METHOD(ShiftArithmeticSaturateScalar)
METHOD(ShiftArithmeticScalar)
METHOD(ShiftLeftAndInsert)
METHOD(ShiftLeftAndInsertScalar)
METHOD(ShiftLeftLogicalSaturate)
METHOD(ShiftLeftLogicalSaturateScalar)
METHOD(ShiftLeftLogicalSaturateUnsigned)
METHOD(ShiftLeftLogicalSaturateUnsignedScalar)
METHOD(ShiftLeftLogicalScalar)
METHOD(ShiftLeftLogicalWideningLower)
METHOD(ShiftLeftLogicalWideningUpper)
METHOD(ShiftLogical)
METHOD(ShiftLogicalRounded)
METHOD(ShiftLogicalRoundedSaturate)
METHOD(ShiftLogicalRoundedSaturateScalar)
METHOD(ShiftLogicalRoundedScalar)
METHOD(ShiftLogicalSaturate)
METHOD(ShiftLogicalSaturateScalar)
METHOD(ShiftLogicalScalar)
METHOD(ShiftRightAndInsert)
METHOD(ShiftRightAndInsertScalar)
METHOD(ShiftRightArithmeticAdd)
METHOD(ShiftRightArithmeticAddScalar)
METHOD(ShiftRightArithmeticNarrowingSaturateLower)
METHOD(ShiftRightArithmeticNarrowingSaturateScalar)
METHOD(ShiftRightArithmeticNarrowingSaturateUnsignedLower)
METHOD(ShiftRightArithmeticNarrowingSaturateUnsignedScalar)
METHOD(ShiftRightArithmeticNarrowingSaturateUnsignedUpper)
METHOD(ShiftRightArithmeticNarrowingSaturateUpper)
METHOD(ShiftRightArithmeticRounded)
METHOD(ShiftRightArithmeticRoundedAdd)
METHOD(ShiftRightArithmeticRoundedAddScalar)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateLower)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateScalar)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper)
METHOD(ShiftRightArithmeticRoundedNarrowingSaturateUpper)
METHOD(ShiftRightArithmeticRoundedScalar)
METHOD(ShiftRightArithmeticScalar)
METHOD(ShiftRightLogicalAdd)
METHOD(ShiftRightLogicalAddScalar)
METHOD(ShiftRightLogicalNarrowingLower)
METHOD(ShiftRightLogicalNarrowingSaturateLower)
METHOD(ShiftRightLogicalNarrowingSaturateScalar)
METHOD(ShiftRightLogicalNarrowingSaturateUpper)
METHOD(ShiftRightLogicalNarrowingUpper)
METHOD(ShiftRightLogicalRounded)
METHOD(ShiftRightLogicalRoundedAdd)
METHOD(ShiftRightLogicalRoundedAddScalar)
METHOD(ShiftRightLogicalRoundedNarrowingLower)
METHOD(ShiftRightLogicalRoundedNarrowingSaturateLower)
METHOD(ShiftRightLogicalRoundedNarrowingSaturateScalar)
METHOD(ShiftRightLogicalRoundedNarrowingSaturateUpper)
METHOD(ShiftRightLogicalRoundedNarrowingUpper)
METHOD(ShiftRightLogicalRoundedScalar)
METHOD(ShiftRightLogicalScalar)
METHOD(SignExtendWideningLower)
METHOD(SignExtendWideningUpper)
METHOD(StorePair)
METHOD(StorePairNonTemporal)
METHOD(StorePairScalar)
METHOD(StorePairScalarNonTemporal)
METHOD(StoreSelectedScalar)
METHOD(SubtractHighNarrowingLower)
METHOD(SubtractHighNarrowingUpper)
METHOD(SubtractRoundedHighNarrowingLower)
METHOD(SubtractRoundedHighNarrowingUpper)
METHOD(SubtractSaturateScalar)
METHOD(SubtractWideningLower)
METHOD(SubtractWideningUpper)
METHOD(TransposeEven)
METHOD(TransposeOdd)
METHOD(UnzipEven)
METHOD(UnzipOdd)
METHOD(VectorTableLookup)
METHOD(VectorTableLookupExtension)
METHOD(ZeroExtendWideningLower)
METHOD(ZeroExtendWideningUpper)
METHOD(ZipHigh)
METHOD(ZipLow)
// Arm.Rdm
METHOD(MultiplyRoundedDoublingAndAddSaturateHigh)
METHOD(MultiplyRoundedDoublingAndSubtractSaturateHigh)
METHOD(MultiplyRoundedDoublingBySelectedScalarAndAddSaturateHigh)
METHOD(MultiplyRoundedDoublingBySelectedScalarAndSubtractSaturateHigh)
// Arm.Rdm.Arm64
METHOD(MultiplyRoundedDoublingAndAddSaturateHighScalar)
METHOD(MultiplyRoundedDoublingAndSubtractSaturateHighScalar)
METHOD(MultiplyRoundedDoublingScalarBySelectedScalarAndAddSaturateHigh)
METHOD(MultiplyRoundedDoublingScalarBySelectedScalarAndSubtractSaturateHigh)
// Arm.Dp
METHOD(DotProductBySelectedQuadruplet)
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/native/external/zlib/crc32.h | /* crc32.h -- tables for rapid CRC calculation
* Generated automatically by crc32.c
*/
local const z_crc_t FAR crc_table[TBLS][256] =
{
{
0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
0x2d02ef8dUL
#ifdef BYFOUR
},
{
0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL,
0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL,
0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL,
0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL,
0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL,
0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL,
0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL,
0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL,
0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL,
0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL,
0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL,
0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL,
0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL,
0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL,
0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL,
0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL,
0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL,
0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL,
0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL,
0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL,
0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL,
0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL,
0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL,
0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL,
0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL,
0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL,
0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL,
0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL,
0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL,
0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL,
0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL,
0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL,
0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL,
0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL,
0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL,
0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL,
0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL,
0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL,
0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL,
0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL,
0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL,
0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL,
0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL,
0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL,
0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL,
0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL,
0x9324fd72UL
},
{
0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL,
0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL,
0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL,
0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL,
0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL,
0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL,
0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL,
0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL,
0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL,
0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL,
0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL,
0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL,
0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL,
0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL,
0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL,
0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL,
0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL,
0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL,
0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL,
0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL,
0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL,
0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL,
0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL,
0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL,
0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL,
0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL,
0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL,
0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL,
0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL,
0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL,
0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL,
0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL,
0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL,
0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL,
0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL,
0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL,
0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL,
0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL,
0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL,
0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL,
0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL,
0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL,
0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL,
0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL,
0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL,
0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL,
0xbe9834edUL
},
{
0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL,
0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL,
0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL,
0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL,
0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL,
0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL,
0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL,
0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL,
0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL,
0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL,
0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL,
0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL,
0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL,
0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL,
0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL,
0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL,
0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL,
0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL,
0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL,
0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL,
0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL,
0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL,
0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL,
0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL,
0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL,
0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL,
0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL,
0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL,
0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL,
0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL,
0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL,
0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL,
0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL,
0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL,
0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL,
0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL,
0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL,
0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL,
0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL,
0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL,
0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL,
0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL,
0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL,
0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL,
0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL,
0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL,
0xde0506f1UL
},
{
0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL,
0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL,
0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL,
0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL,
0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL,
0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL,
0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL,
0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL,
0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL,
0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL,
0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL,
0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL,
0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL,
0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL,
0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL,
0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL,
0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL,
0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL,
0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL,
0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL,
0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL,
0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL,
0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL,
0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL,
0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL,
0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL,
0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL,
0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL,
0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL,
0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL,
0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL,
0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL,
0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL,
0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL,
0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL,
0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL,
0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL,
0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL,
0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL,
0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL,
0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL,
0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL,
0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL,
0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL,
0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL,
0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL,
0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL,
0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL,
0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL,
0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL,
0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL,
0x8def022dUL
},
{
0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL,
0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL,
0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL,
0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL,
0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL,
0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL,
0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL,
0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL,
0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL,
0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL,
0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL,
0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL,
0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL,
0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL,
0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL,
0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL,
0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL,
0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL,
0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL,
0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL,
0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL,
0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL,
0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL,
0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL,
0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL,
0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL,
0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL,
0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL,
0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL,
0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL,
0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL,
0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL,
0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL,
0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL,
0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL,
0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL,
0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL,
0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL,
0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL,
0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL,
0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL,
0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL,
0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL,
0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL,
0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL,
0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL,
0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL,
0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL,
0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL,
0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL,
0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL,
0x72fd2493UL
},
{
0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL,
0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL,
0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL,
0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL,
0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL,
0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL,
0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL,
0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL,
0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL,
0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL,
0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL,
0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL,
0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL,
0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL,
0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL,
0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL,
0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL,
0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL,
0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL,
0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL,
0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL,
0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL,
0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL,
0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL,
0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL,
0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL,
0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL,
0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL,
0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL,
0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL,
0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL,
0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL,
0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL,
0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL,
0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL,
0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL,
0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL,
0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL,
0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL,
0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL,
0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL,
0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL,
0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL,
0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL,
0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL,
0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL,
0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL,
0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL,
0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL,
0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL,
0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL,
0xed3498beUL
},
{
0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL,
0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL,
0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL,
0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL,
0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL,
0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL,
0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL,
0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL,
0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL,
0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL,
0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL,
0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL,
0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL,
0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL,
0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL,
0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL,
0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL,
0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL,
0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL,
0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL,
0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL,
0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL,
0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL,
0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL,
0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL,
0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL,
0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL,
0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL,
0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL,
0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL,
0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL,
0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL,
0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL,
0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL,
0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL,
0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL,
0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL,
0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL,
0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL,
0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL,
0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL,
0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL,
0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL,
0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL,
0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL,
0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL,
0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL,
0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL,
0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL,
0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL,
0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL,
0xf10605deUL
#endif
}
};
| /* crc32.h -- tables for rapid CRC calculation
* Generated automatically by crc32.c
*/
local const z_crc_t FAR crc_table[TBLS][256] =
{
{
0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
0x2d02ef8dUL
#ifdef BYFOUR
},
{
0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL,
0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL,
0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL,
0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL,
0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL,
0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL,
0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL,
0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL,
0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL,
0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL,
0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL,
0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL,
0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL,
0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL,
0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL,
0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL,
0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL,
0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL,
0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL,
0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL,
0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL,
0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL,
0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL,
0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL,
0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL,
0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL,
0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL,
0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL,
0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL,
0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL,
0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL,
0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL,
0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL,
0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL,
0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL,
0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL,
0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL,
0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL,
0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL,
0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL,
0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL,
0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL,
0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL,
0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL,
0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL,
0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL,
0x9324fd72UL
},
{
0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL,
0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL,
0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL,
0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL,
0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL,
0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL,
0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL,
0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL,
0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL,
0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL,
0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL,
0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL,
0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL,
0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL,
0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL,
0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL,
0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL,
0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL,
0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL,
0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL,
0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL,
0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL,
0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL,
0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL,
0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL,
0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL,
0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL,
0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL,
0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL,
0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL,
0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL,
0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL,
0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL,
0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL,
0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL,
0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL,
0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL,
0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL,
0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL,
0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL,
0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL,
0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL,
0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL,
0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL,
0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL,
0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL,
0xbe9834edUL
},
{
0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL,
0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL,
0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL,
0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL,
0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL,
0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL,
0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL,
0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL,
0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL,
0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL,
0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL,
0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL,
0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL,
0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL,
0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL,
0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL,
0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL,
0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL,
0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL,
0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL,
0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL,
0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL,
0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL,
0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL,
0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL,
0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL,
0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL,
0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL,
0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL,
0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL,
0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL,
0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL,
0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL,
0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL,
0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL,
0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL,
0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL,
0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL,
0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL,
0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL,
0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL,
0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL,
0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL,
0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL,
0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL,
0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL,
0xde0506f1UL
},
{
0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL,
0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL,
0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL,
0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL,
0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL,
0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL,
0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL,
0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL,
0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL,
0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL,
0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL,
0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL,
0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL,
0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL,
0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL,
0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL,
0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL,
0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL,
0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL,
0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL,
0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL,
0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL,
0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL,
0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL,
0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL,
0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL,
0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL,
0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL,
0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL,
0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL,
0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL,
0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL,
0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL,
0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL,
0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL,
0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL,
0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL,
0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL,
0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL,
0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL,
0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL,
0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL,
0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL,
0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL,
0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL,
0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL,
0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL,
0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL,
0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL,
0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL,
0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL,
0x8def022dUL
},
{
0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL,
0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL,
0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL,
0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL,
0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL,
0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL,
0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL,
0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL,
0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL,
0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL,
0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL,
0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL,
0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL,
0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL,
0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL,
0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL,
0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL,
0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL,
0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL,
0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL,
0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL,
0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL,
0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL,
0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL,
0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL,
0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL,
0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL,
0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL,
0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL,
0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL,
0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL,
0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL,
0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL,
0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL,
0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL,
0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL,
0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL,
0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL,
0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL,
0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL,
0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL,
0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL,
0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL,
0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL,
0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL,
0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL,
0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL,
0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL,
0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL,
0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL,
0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL,
0x72fd2493UL
},
{
0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL,
0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL,
0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL,
0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL,
0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL,
0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL,
0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL,
0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL,
0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL,
0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL,
0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL,
0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL,
0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL,
0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL,
0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL,
0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL,
0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL,
0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL,
0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL,
0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL,
0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL,
0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL,
0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL,
0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL,
0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL,
0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL,
0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL,
0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL,
0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL,
0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL,
0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL,
0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL,
0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL,
0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL,
0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL,
0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL,
0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL,
0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL,
0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL,
0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL,
0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL,
0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL,
0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL,
0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL,
0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL,
0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL,
0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL,
0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL,
0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL,
0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL,
0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL,
0xed3498beUL
},
{
0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL,
0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL,
0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL,
0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL,
0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL,
0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL,
0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL,
0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL,
0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL,
0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL,
0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL,
0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL,
0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL,
0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL,
0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL,
0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL,
0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL,
0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL,
0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL,
0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL,
0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL,
0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL,
0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL,
0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL,
0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL,
0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL,
0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL,
0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL,
0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL,
0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL,
0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL,
0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL,
0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL,
0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL,
0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL,
0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL,
0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL,
0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL,
0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL,
0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL,
0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL,
0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL,
0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL,
0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL,
0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL,
0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL,
0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL,
0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL,
0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL,
0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL,
0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL,
0xf10605deUL
#endif
}
};
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/debuginfostore.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// DebugInfoStore
#include "common.h"
#include "debuginfostore.h"
#include "nibblestream.h"
#include "patchpointinfo.h"
#ifdef _DEBUG
// For debug builds only.
static bool Dbg_ShouldUseCookies()
{
SUPPORTS_DAC;
// Normally we want this as false b/c it would bloat the image.
// But give us a hook to enable it in case we need it.
return false;
}
#endif
//-----------------------------------------------------------------------------
// We have "Transfer" objects that sit on top of the streams.
// The objects look identical, but one serializes and the other deserializes.
// This lets the compression + restoration routines share all their compression
// logic and just swap out Transfer objects.
//
// It's not ideal that we have a lot of redundancy maintaining both Transfer
// objects, but at least the compiler can enforce that the Reader & Writer are
// in sync. It can't enforce that a 2 separate routines for Compression &
// restoration are in sync.
//
// We could have the TransferReader + Writer be polymorphic off a base class,
// but the virtual function calls will be extra overhead. May as well use
// templates and let the compiler resolve it all statically at compile time.
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
// Serialize to a NibbleWriter stream.
//-----------------------------------------------------------------------------
class TransferWriter
{
public:
TransferWriter(NibbleWriter & w) : m_w(w)
{
}
// Write an raw U32 in nibble encoded form.
void DoEncodedU32(uint32_t dw) { m_w.WriteEncodedU32(dw); }
// Use to encode a monotonically increasing delta.
void DoEncodedDeltaU32(uint32_t & dw, uint32_t dwLast)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(dw >= dwLast);
uint32_t dwDelta = dw - dwLast;
m_w.WriteEncodedU32(dwDelta);
}
// Some U32 may have a few sentinal negative values .
// We adjust it to be a real U32 and then encode that.
// dwAdjust should be the lower bound on the enum.
void DoEncodedAdjustedU32(uint32_t dw, uint32_t dwAdjust)
{
//_ASSERTE(dwAdjust < 0); // some negative lower bound.
m_w.WriteEncodedU32(dw - dwAdjust);
}
// Typesafe versions of EncodeU32.
void DoEncodedSourceType(ICorDebugInfo::SourceTypes & dw) { m_w.WriteEncodedU32(dw); }
void DoEncodedVarLocType(ICorDebugInfo::VarLocType & dw) { m_w.WriteEncodedU32(dw); }
void DoEncodedUnsigned(unsigned & dw) { m_w.WriteEncodedU32(dw); }
// Stack offsets are aligned on a DWORD boundary, so that lets us shave off 2 bits.
void DoEncodedStackOffset(signed & dwOffset)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
#ifdef TARGET_X86
_ASSERTE(dwOffset % sizeof(DWORD) == 0); // should be dword aligned. That'll save us 2 bits.
m_w.WriteEncodedI32(dwOffset / sizeof(DWORD));
#else
// Non x86 platforms don't need it to be dword aligned.
m_w.WriteEncodedI32(dwOffset);
#endif
}
void DoEncodedRegIdx(ICorDebugInfo::RegNum & reg) { m_w.WriteEncodedU32(reg); }
// For debugging purposes, inject cookies into the Compression.
void DoCookie(BYTE b) {
#ifdef _DEBUG
if (Dbg_ShouldUseCookies())
{
m_w.WriteNibble(b);
}
#endif
}
protected:
NibbleWriter & m_w;
};
//-----------------------------------------------------------------------------
// Deserializer that sits on top of a NibbleReader
// This class interface matches TransferWriter exactly. See that for details.
//-----------------------------------------------------------------------------
class TransferReader
{
public:
TransferReader(NibbleReader & r) : m_r(r)
{
SUPPORTS_DAC;
}
void DoEncodedU32(uint32_t & dw)
{
SUPPORTS_DAC;
dw = m_r.ReadEncodedU32();
}
// Use to decode a monotonically increasing delta.
// dwLast was the last value; we update it to the current value on output.
void DoEncodedDeltaU32(uint32_t & dw, uint32_t dwLast)
{
SUPPORTS_DAC;
uint32_t dwDelta = m_r.ReadEncodedU32();
dw = dwLast + dwDelta;
}
void DoEncodedAdjustedU32(uint32_t & dw, uint32_t dwAdjust)
{
SUPPORTS_DAC;
//_ASSERTE(dwAdjust < 0);
dw = m_r.ReadEncodedU32() + dwAdjust;
}
void DoEncodedSourceType(ICorDebugInfo::SourceTypes & dw)
{
SUPPORTS_DAC;
dw = (ICorDebugInfo::SourceTypes) m_r.ReadEncodedU32();
}
void DoEncodedVarLocType(ICorDebugInfo::VarLocType & dw)
{
SUPPORTS_DAC;
dw = (ICorDebugInfo::VarLocType) m_r.ReadEncodedU32();
}
void DoEncodedUnsigned(unsigned & dw)
{
SUPPORTS_DAC;
dw = (unsigned) m_r.ReadEncodedU32();
}
// Stack offsets are aligned on a DWORD boundary, so that lets us shave off 2 bits.
void DoEncodedStackOffset(signed & dwOffset)
{
SUPPORTS_DAC;
#ifdef TARGET_X86
dwOffset = m_r.ReadEncodedI32() * sizeof(DWORD);
#else
// Non x86 platforms don't need it to be dword aligned.
dwOffset = m_r.ReadEncodedI32();
#endif
}
void DoEncodedRegIdx(ICorDebugInfo::RegNum & reg)
{
SUPPORTS_DAC;
reg = (ICorDebugInfo::RegNum) m_r.ReadEncodedU32();
}
// For debugging purposes, inject cookies into the Compression.
void DoCookie(BYTE b)
{
SUPPORTS_DAC;
#ifdef _DEBUG
if (Dbg_ShouldUseCookies())
{
BYTE b2 = m_r.ReadNibble();
_ASSERTE(b == b2);
}
#endif
}
protected:
NibbleReader & m_r;
};
#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
// Perf tracking
static int g_CDI_TotalMethods = 0;
static int g_CDI_bMethodTotalUncompress = 0;
static int g_CDI_bMethodTotalCompress = 0;
static int g_CDI_bVarsTotalUncompress = 0;
static int g_CDI_bVarsTotalCompress = 0;
#endif
//-----------------------------------------------------------------------------
// Serialize Bounds info.
//-----------------------------------------------------------------------------
template <class T>
void DoBounds(
T trans, // transfer object.
ULONG32 cMap,
ICorDebugInfo::OffsetMapping *pMap
)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
// Bounds info contains (Native Offset, IL Offset, flags)
// - Sorted by native offset (so use a delta encoding for that).
// - IL offsets aren't sorted, but they should be close to each other (so a signed delta encoding)
// They may also include a sentinel value from MappingTypes.
// - flags is 3 indepedent bits.
// Loop through and transfer each Entry in the Mapping.
uint32_t dwLastNativeOffset = 0;
for(uint32_t i = 0; i < cMap; i++)
{
ICorDebugInfo::OffsetMapping * pBound = &pMap[i];
trans.DoEncodedDeltaU32(pBound->nativeOffset, dwLastNativeOffset);
dwLastNativeOffset = pBound->nativeOffset;
trans.DoEncodedAdjustedU32(pBound->ilOffset, (DWORD) ICorDebugInfo::MAX_MAPPING_VALUE);
trans.DoEncodedSourceType(pBound->source);
trans.DoCookie(0xA);
}
}
// Helper to write a compressed Native Var Info
template<class T>
void DoNativeVarInfo(
T trans,
ICorDebugInfo::NativeVarInfo * pVar
)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
// Each Varinfo has a:
// - native start +End offset. We can use a delta for the end offset.
// - Il variable number. These are usually small.
// - VarLoc information. This is a tagged variant.
// The entries aren't sorted in any particular order.
trans.DoCookie(0xB);
trans.DoEncodedU32(pVar->startOffset);
trans.DoEncodedDeltaU32(pVar->endOffset, pVar->startOffset);
// record var number.
trans.DoEncodedAdjustedU32(pVar->varNumber, (DWORD) ICorDebugInfo::MAX_ILNUM);
// Now write the VarLoc... This is a variant like structure and so we'll get different
// compressioned depending on what we've got.
trans.DoEncodedVarLocType(pVar->loc.vlType);
switch(pVar->loc.vlType)
{
case ICorDebugInfo::VLT_REG:
case ICorDebugInfo::VLT_REG_FP: // fall through
case ICorDebugInfo::VLT_REG_BYREF: // fall through
trans.DoEncodedRegIdx(pVar->loc.vlReg.vlrReg);
break;
case ICorDebugInfo::VLT_STK:
case ICorDebugInfo::VLT_STK_BYREF: // fall through
trans.DoEncodedRegIdx(pVar->loc.vlStk.vlsBaseReg);
trans.DoEncodedStackOffset(pVar->loc.vlStk.vlsOffset);
break;
case ICorDebugInfo::VLT_REG_REG:
trans.DoEncodedRegIdx(pVar->loc.vlRegReg.vlrrReg1);
trans.DoEncodedRegIdx(pVar->loc.vlRegReg.vlrrReg2);
break;
case ICorDebugInfo::VLT_REG_STK:
trans.DoEncodedRegIdx(pVar->loc.vlRegStk.vlrsReg);
trans.DoEncodedRegIdx(pVar->loc.vlRegStk.vlrsStk.vlrssBaseReg);
trans.DoEncodedStackOffset(pVar->loc.vlRegStk.vlrsStk.vlrssOffset);
break;
case ICorDebugInfo::VLT_STK_REG:
trans.DoEncodedStackOffset(pVar->loc.vlStkReg.vlsrStk.vlsrsOffset);
trans.DoEncodedRegIdx(pVar->loc.vlStkReg.vlsrStk.vlsrsBaseReg);
trans.DoEncodedRegIdx(pVar->loc.vlStkReg.vlsrReg);
break;
case ICorDebugInfo::VLT_STK2:
trans.DoEncodedRegIdx(pVar->loc.vlStk2.vls2BaseReg);
trans.DoEncodedStackOffset(pVar->loc.vlStk2.vls2Offset);
break;
case ICorDebugInfo::VLT_FPSTK:
trans.DoEncodedUnsigned(pVar->loc.vlFPstk.vlfReg);
break;
case ICorDebugInfo::VLT_FIXED_VA:
trans.DoEncodedUnsigned(pVar->loc.vlFixedVarArg.vlfvOffset);
break;
default:
_ASSERTE(!"Unknown varloc type!");
break;
}
trans.DoCookie(0xC);
}
#ifndef DACCESS_COMPILE
void CompressDebugInfo::CompressBoundaries(
IN ULONG32 cMap,
IN ICorDebugInfo::OffsetMapping *pMap,
IN OUT NibbleWriter *pWriter
)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(pWriter != NULL);
_ASSERTE((pMap == NULL) == (cMap == 0));
if (cMap != 0)
{
pWriter->WriteEncodedU32(cMap);
TransferWriter t(*pWriter);
DoBounds(t, cMap, pMap);
pWriter->Flush();
}
#ifdef _DEBUG
DWORD cbBlob;
PVOID pBlob = pWriter->GetBlob(&cbBlob);
// Track perf #s for compression...
g_CDI_TotalMethods++;
g_CDI_bMethodTotalUncompress += sizeof(ICorDebugInfo::OffsetMapping) * cMap;
g_CDI_bMethodTotalCompress += (int) cbBlob;
#endif // _DEBUG
}
void CompressDebugInfo::CompressVars(
IN ULONG32 cVars,
IN ICorDebugInfo::NativeVarInfo *vars,
IN OUT NibbleWriter *pWriter
)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(pWriter != NULL);
_ASSERTE((cVars == 0) == (vars == NULL));
if (cVars != 0)
{
pWriter->WriteEncodedU32(cVars);
TransferWriter t(*pWriter);
for(ULONG32 i = 0; i < cVars; i ++)
{
DoNativeVarInfo(t, &vars[i]);
}
pWriter->Flush();
}
#ifdef _DEBUG
DWORD cbBlob;
PVOID pBlob = pWriter->GetBlob(&cbBlob);
g_CDI_bVarsTotalUncompress += cVars * sizeof(ICorDebugInfo::NativeVarInfo);
g_CDI_bVarsTotalCompress += (int) cbBlob;
#endif
}
PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars(
IN ICorDebugInfo::OffsetMapping * pOffsetMapping,
IN ULONG iOffsetMapping,
IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
IN ULONG iNativeVarInfo,
IN PatchpointInfo * patchpointInfo,
IN OUT SBuffer * pDebugInfoBuffer,
IN LoaderHeap * pLoaderHeap
)
{
CONTRACTL {
THROWS; // compression routines throw
PRECONDITION((iOffsetMapping == 0) == (pOffsetMapping == NULL));
PRECONDITION((iNativeVarInfo == 0) == (pNativeVarInfo == NULL));
PRECONDITION((pDebugInfoBuffer != NULL) ^ (pLoaderHeap != NULL));
} CONTRACTL_END;
// Patchpoint info is currently uncompressed.
DWORD cbPatchpointInfo = 0;
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (patchpointInfo != NULL)
{
cbPatchpointInfo = patchpointInfo->PatchpointInfoSize();
}
#else
_ASSERTE(patchpointInfo == NULL);
#endif
// Actually do the compression. These will throw on oom.
NibbleWriter boundsBuffer;
DWORD cbBounds = 0;
PVOID pBounds = NULL;
if (iOffsetMapping > 0)
{
CompressDebugInfo::CompressBoundaries(iOffsetMapping, pOffsetMapping, &boundsBuffer);
pBounds = boundsBuffer.GetBlob(&cbBounds);
}
NibbleWriter varsBuffer;
DWORD cbVars = 0;
PVOID pVars = NULL;
if (iNativeVarInfo > 0)
{
CompressDebugInfo::CompressVars(iNativeVarInfo, pNativeVarInfo, &varsBuffer);
pVars = varsBuffer.GetBlob(&cbVars);
}
// Now write it all out to the buffer in a compact fashion.
NibbleWriter w;
w.WriteEncodedU32(cbBounds);
w.WriteEncodedU32(cbVars);
w.Flush();
DWORD cbHeader;
PVOID pHeader = w.GetBlob(&cbHeader);
#ifdef FEATURE_ON_STACK_REPLACEMENT
S_UINT32 cbFinalSize = S_UINT32(1) + S_UINT32(cbPatchpointInfo) + S_UINT32(cbHeader) + S_UINT32(cbBounds) + S_UINT32(cbVars);
#else
S_UINT32 cbFinalSize = S_UINT32(cbHeader) + S_UINT32(cbBounds) + S_UINT32(cbVars);
#endif
if (cbFinalSize.IsOverflow())
ThrowHR(COR_E_OVERFLOW);
BYTE *ptrStart = NULL;
if (pLoaderHeap != NULL)
{
ptrStart = (BYTE *)(void *)pLoaderHeap->AllocMem(S_SIZE_T(cbFinalSize.Value()));
}
else
{
// Create a conservatively large buffer to hold all the data.
ptrStart = pDebugInfoBuffer->OpenRawBuffer(cbFinalSize.Value());
}
_ASSERTE(ptrStart != NULL); // throws on oom.
BYTE *ptr = ptrStart;
#ifdef FEATURE_ON_STACK_REPLACEMENT
// First byte is a flag byte:
// 0 - no patchpoint info
// 1 - patchpoint info
*ptr++ = (cbPatchpointInfo > 0) ? 1 : 0;
if (cbPatchpointInfo > 0)
{
memcpy(ptr, (BYTE*) patchpointInfo, cbPatchpointInfo);
ptr += cbPatchpointInfo;
}
#endif
memcpy(ptr, pHeader, cbHeader);
ptr += cbHeader;
memcpy(ptr, pBounds, cbBounds);
ptr += cbBounds;
memcpy(ptr, pVars, cbVars);
ptr += cbVars;
if (pLoaderHeap != NULL)
{
return ptrStart;
}
else
{
pDebugInfoBuffer->CloseRawBuffer(cbFinalSize.Value());
return NULL;
}
}
#endif // DACCESS_COMPILE
//-----------------------------------------------------------------------------
// Uncompression (restore) routines
//-----------------------------------------------------------------------------
// Uncompress data supplied by Compress functions.
void CompressDebugInfo::RestoreBoundariesAndVars(
IN FP_IDS_NEW fpNew, IN void * pNewData,
IN PTR_BYTE pDebugInfo,
OUT ULONG32 * pcMap, // number of entries in ppMap
OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array
OUT ULONG32 *pcVars,
OUT ICorDebugInfo::NativeVarInfo **ppVars,
BOOL hasFlagByte
)
{
CONTRACTL
{
THROWS; // reading from nibble stream may throw on invalid data.
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
if (pcMap != NULL) *pcMap = 0;
if (ppMap != NULL) *ppMap = NULL;
if (pcVars != NULL) *pcVars = 0;
if (ppVars != NULL) *ppVars = NULL;
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (hasFlagByte)
{
// Check flag byte and skip over any patchpoint info
BYTE flagByte = *pDebugInfo;
pDebugInfo++;
if (flagByte == 1)
{
PTR_PatchpointInfo patchpointInfo = dac_cast<PTR_PatchpointInfo>(pDebugInfo);
pDebugInfo += patchpointInfo->PatchpointInfoSize();
}
else
{
_ASSERTE(flagByte == 0);
}
}
#else
_ASSERTE(!hasFlagByte);
#endif
NibbleReader r(pDebugInfo, 12 /* maximum size of compressed 2 UINT32s */);
ULONG cbBounds = r.ReadEncodedU32();
ULONG cbVars = r.ReadEncodedU32();
PTR_BYTE addrBounds = pDebugInfo + r.GetNextByteIndex();
PTR_BYTE addrVars = addrBounds + cbBounds;
if ((pcMap != NULL || ppMap != NULL) && (cbBounds != 0))
{
NibbleReader r(addrBounds, cbBounds);
TransferReader t(r);
UINT32 cNumEntries = r.ReadEncodedU32();
_ASSERTE(cNumEntries > 0);
if (pcMap != NULL)
*pcMap = cNumEntries;
if (ppMap != NULL)
{
ICorDebugInfo::OffsetMapping * pMap = reinterpret_cast<ICorDebugInfo::OffsetMapping *>
(fpNew(pNewData, cNumEntries * sizeof(ICorDebugInfo::OffsetMapping)));
if (pMap == NULL)
{
ThrowOutOfMemory();
}
*ppMap = pMap;
// Main decompression routine.
DoBounds(t, cNumEntries, pMap);
}
}
if ((pcVars != NULL || ppVars != NULL) && (cbVars != 0))
{
NibbleReader r(addrVars, cbVars);
TransferReader t(r);
UINT32 cNumEntries = r.ReadEncodedU32();
_ASSERTE(cNumEntries > 0);
if (pcVars != NULL)
*pcVars = cNumEntries;
if (ppVars != NULL)
{
ICorDebugInfo::NativeVarInfo * pVars = reinterpret_cast<ICorDebugInfo::NativeVarInfo *>
(fpNew(pNewData, cNumEntries * sizeof(ICorDebugInfo::NativeVarInfo)));
if (pVars == NULL)
{
ThrowOutOfMemory();
}
*ppVars = pVars;
for(UINT32 i = 0; i < cNumEntries; i++)
{
DoNativeVarInfo(t, &pVars[i]);
}
}
}
}
#ifdef FEATURE_ON_STACK_REPLACEMENT
PatchpointInfo * CompressDebugInfo::RestorePatchpointInfo(IN PTR_BYTE pDebugInfo)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
PTR_PatchpointInfo patchpointInfo = NULL;
// Check flag byte.
BYTE flagByte = *pDebugInfo;
pDebugInfo++;
if (flagByte == 1)
{
patchpointInfo = dac_cast<PTR_PatchpointInfo>(pDebugInfo);
}
else
{
_ASSERTE(flagByte == 0);
}
return patchpointInfo;
}
#endif
#ifdef DACCESS_COMPILE
void CompressDebugInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
}
CONTRACTL_END;
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (hasFlagByte)
{
// Check flag byte and skip over any patchpoint info
BYTE flagByte = *pDebugInfo;
pDebugInfo++;
if (flagByte == 1)
{
PTR_PatchpointInfo patchpointInfo = dac_cast<PTR_PatchpointInfo>(pDebugInfo);
pDebugInfo += patchpointInfo->PatchpointInfoSize();
}
else
{
_ASSERTE(flagByte == 0);
}
}
#else
_ASSERTE(!hasFlagByte);
#endif
NibbleReader r(pDebugInfo, 12 /* maximum size of compressed 2 UINT32s */);
ULONG cbBounds = r.ReadEncodedU32();
ULONG cbVars = r.ReadEncodedU32();
DacEnumMemoryRegion(dac_cast<TADDR>(pDebugInfo), r.GetNextByteIndex() + cbBounds + cbVars);
}
#endif // DACCESS_COMPILE
// Init given a starting address from the start of code.
void DebugInfoRequest::InitFromStartingAddr(MethodDesc * pMD, PCODE addrCode)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
_ASSERTE(pMD != NULL);
_ASSERTE(addrCode != NULL);
this->m_pMD = pMD;
this->m_addrStart = addrCode;
}
//-----------------------------------------------------------------------------
// Impl for DebugInfoManager's IDebugInfoStore
//-----------------------------------------------------------------------------
BOOL DebugInfoManager::GetBoundariesAndVars(
const DebugInfoRequest & request,
IN FP_IDS_NEW fpNew, IN void * pNewData,
OUT ULONG32 * pcMap,
OUT ICorDebugInfo::OffsetMapping ** ppMap,
OUT ULONG32 * pcVars,
OUT ICorDebugInfo::NativeVarInfo ** ppVars)
{
CONTRACTL
{
THROWS;
WRAPPER(GC_TRIGGERS); // depends on fpNew
SUPPORTS_DAC;
}
CONTRACTL_END;
IJitManager* pJitMan = ExecutionManager::FindJitMan(request.GetStartAddress());
if (pJitMan == NULL)
{
return FALSE; // no info available.
}
return pJitMan->GetBoundariesAndVars(request, fpNew, pNewData, pcMap, ppMap, pcVars, ppVars);
}
#ifdef DACCESS_COMPILE
void DebugInfoManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
}
CONTRACTL_END;
PCODE addrCode = pMD->GetNativeCode();
if (addrCode == NULL)
{
return;
}
IJitManager* pJitMan = ExecutionManager::FindJitMan(addrCode);
if (pJitMan == NULL)
{
return; // no info available.
}
pJitMan->EnumMemoryRegionsForMethodDebugInfo(flags, pMD);
}
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// DebugInfoStore
#include "common.h"
#include "debuginfostore.h"
#include "nibblestream.h"
#include "patchpointinfo.h"
#ifdef _DEBUG
// For debug builds only.
static bool Dbg_ShouldUseCookies()
{
SUPPORTS_DAC;
// Normally we want this as false b/c it would bloat the image.
// But give us a hook to enable it in case we need it.
return false;
}
#endif
//-----------------------------------------------------------------------------
// We have "Transfer" objects that sit on top of the streams.
// The objects look identical, but one serializes and the other deserializes.
// This lets the compression + restoration routines share all their compression
// logic and just swap out Transfer objects.
//
// It's not ideal that we have a lot of redundancy maintaining both Transfer
// objects, but at least the compiler can enforce that the Reader & Writer are
// in sync. It can't enforce that a 2 separate routines for Compression &
// restoration are in sync.
//
// We could have the TransferReader + Writer be polymorphic off a base class,
// but the virtual function calls will be extra overhead. May as well use
// templates and let the compiler resolve it all statically at compile time.
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
// Serialize to a NibbleWriter stream.
//-----------------------------------------------------------------------------
class TransferWriter
{
public:
TransferWriter(NibbleWriter & w) : m_w(w)
{
}
// Write an raw U32 in nibble encoded form.
void DoEncodedU32(uint32_t dw) { m_w.WriteEncodedU32(dw); }
// Use to encode a monotonically increasing delta.
void DoEncodedDeltaU32(uint32_t & dw, uint32_t dwLast)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(dw >= dwLast);
uint32_t dwDelta = dw - dwLast;
m_w.WriteEncodedU32(dwDelta);
}
// Some U32 may have a few sentinal negative values .
// We adjust it to be a real U32 and then encode that.
// dwAdjust should be the lower bound on the enum.
void DoEncodedAdjustedU32(uint32_t dw, uint32_t dwAdjust)
{
//_ASSERTE(dwAdjust < 0); // some negative lower bound.
m_w.WriteEncodedU32(dw - dwAdjust);
}
// Typesafe versions of EncodeU32.
void DoEncodedSourceType(ICorDebugInfo::SourceTypes & dw) { m_w.WriteEncodedU32(dw); }
void DoEncodedVarLocType(ICorDebugInfo::VarLocType & dw) { m_w.WriteEncodedU32(dw); }
void DoEncodedUnsigned(unsigned & dw) { m_w.WriteEncodedU32(dw); }
// Stack offsets are aligned on a DWORD boundary, so that lets us shave off 2 bits.
void DoEncodedStackOffset(signed & dwOffset)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
#ifdef TARGET_X86
_ASSERTE(dwOffset % sizeof(DWORD) == 0); // should be dword aligned. That'll save us 2 bits.
m_w.WriteEncodedI32(dwOffset / sizeof(DWORD));
#else
// Non x86 platforms don't need it to be dword aligned.
m_w.WriteEncodedI32(dwOffset);
#endif
}
void DoEncodedRegIdx(ICorDebugInfo::RegNum & reg) { m_w.WriteEncodedU32(reg); }
// For debugging purposes, inject cookies into the Compression.
void DoCookie(BYTE b) {
#ifdef _DEBUG
if (Dbg_ShouldUseCookies())
{
m_w.WriteNibble(b);
}
#endif
}
protected:
NibbleWriter & m_w;
};
//-----------------------------------------------------------------------------
// Deserializer that sits on top of a NibbleReader
// This class interface matches TransferWriter exactly. See that for details.
//-----------------------------------------------------------------------------
class TransferReader
{
public:
TransferReader(NibbleReader & r) : m_r(r)
{
SUPPORTS_DAC;
}
void DoEncodedU32(uint32_t & dw)
{
SUPPORTS_DAC;
dw = m_r.ReadEncodedU32();
}
// Use to decode a monotonically increasing delta.
// dwLast was the last value; we update it to the current value on output.
void DoEncodedDeltaU32(uint32_t & dw, uint32_t dwLast)
{
SUPPORTS_DAC;
uint32_t dwDelta = m_r.ReadEncodedU32();
dw = dwLast + dwDelta;
}
void DoEncodedAdjustedU32(uint32_t & dw, uint32_t dwAdjust)
{
SUPPORTS_DAC;
//_ASSERTE(dwAdjust < 0);
dw = m_r.ReadEncodedU32() + dwAdjust;
}
void DoEncodedSourceType(ICorDebugInfo::SourceTypes & dw)
{
SUPPORTS_DAC;
dw = (ICorDebugInfo::SourceTypes) m_r.ReadEncodedU32();
}
void DoEncodedVarLocType(ICorDebugInfo::VarLocType & dw)
{
SUPPORTS_DAC;
dw = (ICorDebugInfo::VarLocType) m_r.ReadEncodedU32();
}
void DoEncodedUnsigned(unsigned & dw)
{
SUPPORTS_DAC;
dw = (unsigned) m_r.ReadEncodedU32();
}
// Stack offsets are aligned on a DWORD boundary, so that lets us shave off 2 bits.
void DoEncodedStackOffset(signed & dwOffset)
{
SUPPORTS_DAC;
#ifdef TARGET_X86
dwOffset = m_r.ReadEncodedI32() * sizeof(DWORD);
#else
// Non x86 platforms don't need it to be dword aligned.
dwOffset = m_r.ReadEncodedI32();
#endif
}
void DoEncodedRegIdx(ICorDebugInfo::RegNum & reg)
{
SUPPORTS_DAC;
reg = (ICorDebugInfo::RegNum) m_r.ReadEncodedU32();
}
// For debugging purposes, inject cookies into the Compression.
void DoCookie(BYTE b)
{
SUPPORTS_DAC;
#ifdef _DEBUG
if (Dbg_ShouldUseCookies())
{
BYTE b2 = m_r.ReadNibble();
_ASSERTE(b == b2);
}
#endif
}
protected:
NibbleReader & m_r;
};
#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
// Perf tracking
static int g_CDI_TotalMethods = 0;
static int g_CDI_bMethodTotalUncompress = 0;
static int g_CDI_bMethodTotalCompress = 0;
static int g_CDI_bVarsTotalUncompress = 0;
static int g_CDI_bVarsTotalCompress = 0;
#endif
//-----------------------------------------------------------------------------
// Serialize Bounds info.
//-----------------------------------------------------------------------------
template <class T>
void DoBounds(
T trans, // transfer object.
ULONG32 cMap,
ICorDebugInfo::OffsetMapping *pMap
)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
// Bounds info contains (Native Offset, IL Offset, flags)
// - Sorted by native offset (so use a delta encoding for that).
// - IL offsets aren't sorted, but they should be close to each other (so a signed delta encoding)
// They may also include a sentinel value from MappingTypes.
// - flags is 3 indepedent bits.
// Loop through and transfer each Entry in the Mapping.
uint32_t dwLastNativeOffset = 0;
for(uint32_t i = 0; i < cMap; i++)
{
ICorDebugInfo::OffsetMapping * pBound = &pMap[i];
trans.DoEncodedDeltaU32(pBound->nativeOffset, dwLastNativeOffset);
dwLastNativeOffset = pBound->nativeOffset;
trans.DoEncodedAdjustedU32(pBound->ilOffset, (DWORD) ICorDebugInfo::MAX_MAPPING_VALUE);
trans.DoEncodedSourceType(pBound->source);
trans.DoCookie(0xA);
}
}
// Helper to write a compressed Native Var Info
template<class T>
void DoNativeVarInfo(
T trans,
ICorDebugInfo::NativeVarInfo * pVar
)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
// Each Varinfo has a:
// - native start +End offset. We can use a delta for the end offset.
// - Il variable number. These are usually small.
// - VarLoc information. This is a tagged variant.
// The entries aren't sorted in any particular order.
trans.DoCookie(0xB);
trans.DoEncodedU32(pVar->startOffset);
trans.DoEncodedDeltaU32(pVar->endOffset, pVar->startOffset);
// record var number.
trans.DoEncodedAdjustedU32(pVar->varNumber, (DWORD) ICorDebugInfo::MAX_ILNUM);
// Now write the VarLoc... This is a variant like structure and so we'll get different
// compressioned depending on what we've got.
trans.DoEncodedVarLocType(pVar->loc.vlType);
switch(pVar->loc.vlType)
{
case ICorDebugInfo::VLT_REG:
case ICorDebugInfo::VLT_REG_FP: // fall through
case ICorDebugInfo::VLT_REG_BYREF: // fall through
trans.DoEncodedRegIdx(pVar->loc.vlReg.vlrReg);
break;
case ICorDebugInfo::VLT_STK:
case ICorDebugInfo::VLT_STK_BYREF: // fall through
trans.DoEncodedRegIdx(pVar->loc.vlStk.vlsBaseReg);
trans.DoEncodedStackOffset(pVar->loc.vlStk.vlsOffset);
break;
case ICorDebugInfo::VLT_REG_REG:
trans.DoEncodedRegIdx(pVar->loc.vlRegReg.vlrrReg1);
trans.DoEncodedRegIdx(pVar->loc.vlRegReg.vlrrReg2);
break;
case ICorDebugInfo::VLT_REG_STK:
trans.DoEncodedRegIdx(pVar->loc.vlRegStk.vlrsReg);
trans.DoEncodedRegIdx(pVar->loc.vlRegStk.vlrsStk.vlrssBaseReg);
trans.DoEncodedStackOffset(pVar->loc.vlRegStk.vlrsStk.vlrssOffset);
break;
case ICorDebugInfo::VLT_STK_REG:
trans.DoEncodedStackOffset(pVar->loc.vlStkReg.vlsrStk.vlsrsOffset);
trans.DoEncodedRegIdx(pVar->loc.vlStkReg.vlsrStk.vlsrsBaseReg);
trans.DoEncodedRegIdx(pVar->loc.vlStkReg.vlsrReg);
break;
case ICorDebugInfo::VLT_STK2:
trans.DoEncodedRegIdx(pVar->loc.vlStk2.vls2BaseReg);
trans.DoEncodedStackOffset(pVar->loc.vlStk2.vls2Offset);
break;
case ICorDebugInfo::VLT_FPSTK:
trans.DoEncodedUnsigned(pVar->loc.vlFPstk.vlfReg);
break;
case ICorDebugInfo::VLT_FIXED_VA:
trans.DoEncodedUnsigned(pVar->loc.vlFixedVarArg.vlfvOffset);
break;
default:
_ASSERTE(!"Unknown varloc type!");
break;
}
trans.DoCookie(0xC);
}
#ifndef DACCESS_COMPILE
void CompressDebugInfo::CompressBoundaries(
IN ULONG32 cMap,
IN ICorDebugInfo::OffsetMapping *pMap,
IN OUT NibbleWriter *pWriter
)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(pWriter != NULL);
_ASSERTE((pMap == NULL) == (cMap == 0));
if (cMap != 0)
{
pWriter->WriteEncodedU32(cMap);
TransferWriter t(*pWriter);
DoBounds(t, cMap, pMap);
pWriter->Flush();
}
#ifdef _DEBUG
DWORD cbBlob;
PVOID pBlob = pWriter->GetBlob(&cbBlob);
// Track perf #s for compression...
g_CDI_TotalMethods++;
g_CDI_bMethodTotalUncompress += sizeof(ICorDebugInfo::OffsetMapping) * cMap;
g_CDI_bMethodTotalCompress += (int) cbBlob;
#endif // _DEBUG
}
void CompressDebugInfo::CompressVars(
IN ULONG32 cVars,
IN ICorDebugInfo::NativeVarInfo *vars,
IN OUT NibbleWriter *pWriter
)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(pWriter != NULL);
_ASSERTE((cVars == 0) == (vars == NULL));
if (cVars != 0)
{
pWriter->WriteEncodedU32(cVars);
TransferWriter t(*pWriter);
for(ULONG32 i = 0; i < cVars; i ++)
{
DoNativeVarInfo(t, &vars[i]);
}
pWriter->Flush();
}
#ifdef _DEBUG
DWORD cbBlob;
PVOID pBlob = pWriter->GetBlob(&cbBlob);
g_CDI_bVarsTotalUncompress += cVars * sizeof(ICorDebugInfo::NativeVarInfo);
g_CDI_bVarsTotalCompress += (int) cbBlob;
#endif
}
PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars(
IN ICorDebugInfo::OffsetMapping * pOffsetMapping,
IN ULONG iOffsetMapping,
IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
IN ULONG iNativeVarInfo,
IN PatchpointInfo * patchpointInfo,
IN OUT SBuffer * pDebugInfoBuffer,
IN LoaderHeap * pLoaderHeap
)
{
CONTRACTL {
THROWS; // compression routines throw
PRECONDITION((iOffsetMapping == 0) == (pOffsetMapping == NULL));
PRECONDITION((iNativeVarInfo == 0) == (pNativeVarInfo == NULL));
PRECONDITION((pDebugInfoBuffer != NULL) ^ (pLoaderHeap != NULL));
} CONTRACTL_END;
// Patchpoint info is currently uncompressed.
DWORD cbPatchpointInfo = 0;
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (patchpointInfo != NULL)
{
cbPatchpointInfo = patchpointInfo->PatchpointInfoSize();
}
#else
_ASSERTE(patchpointInfo == NULL);
#endif
// Actually do the compression. These will throw on oom.
NibbleWriter boundsBuffer;
DWORD cbBounds = 0;
PVOID pBounds = NULL;
if (iOffsetMapping > 0)
{
CompressDebugInfo::CompressBoundaries(iOffsetMapping, pOffsetMapping, &boundsBuffer);
pBounds = boundsBuffer.GetBlob(&cbBounds);
}
NibbleWriter varsBuffer;
DWORD cbVars = 0;
PVOID pVars = NULL;
if (iNativeVarInfo > 0)
{
CompressDebugInfo::CompressVars(iNativeVarInfo, pNativeVarInfo, &varsBuffer);
pVars = varsBuffer.GetBlob(&cbVars);
}
// Now write it all out to the buffer in a compact fashion.
NibbleWriter w;
w.WriteEncodedU32(cbBounds);
w.WriteEncodedU32(cbVars);
w.Flush();
DWORD cbHeader;
PVOID pHeader = w.GetBlob(&cbHeader);
#ifdef FEATURE_ON_STACK_REPLACEMENT
S_UINT32 cbFinalSize = S_UINT32(1) + S_UINT32(cbPatchpointInfo) + S_UINT32(cbHeader) + S_UINT32(cbBounds) + S_UINT32(cbVars);
#else
S_UINT32 cbFinalSize = S_UINT32(cbHeader) + S_UINT32(cbBounds) + S_UINT32(cbVars);
#endif
if (cbFinalSize.IsOverflow())
ThrowHR(COR_E_OVERFLOW);
BYTE *ptrStart = NULL;
if (pLoaderHeap != NULL)
{
ptrStart = (BYTE *)(void *)pLoaderHeap->AllocMem(S_SIZE_T(cbFinalSize.Value()));
}
else
{
// Create a conservatively large buffer to hold all the data.
ptrStart = pDebugInfoBuffer->OpenRawBuffer(cbFinalSize.Value());
}
_ASSERTE(ptrStart != NULL); // throws on oom.
BYTE *ptr = ptrStart;
#ifdef FEATURE_ON_STACK_REPLACEMENT
// First byte is a flag byte:
// 0 - no patchpoint info
// 1 - patchpoint info
*ptr++ = (cbPatchpointInfo > 0) ? 1 : 0;
if (cbPatchpointInfo > 0)
{
memcpy(ptr, (BYTE*) patchpointInfo, cbPatchpointInfo);
ptr += cbPatchpointInfo;
}
#endif
memcpy(ptr, pHeader, cbHeader);
ptr += cbHeader;
memcpy(ptr, pBounds, cbBounds);
ptr += cbBounds;
memcpy(ptr, pVars, cbVars);
ptr += cbVars;
if (pLoaderHeap != NULL)
{
return ptrStart;
}
else
{
pDebugInfoBuffer->CloseRawBuffer(cbFinalSize.Value());
return NULL;
}
}
#endif // DACCESS_COMPILE
//-----------------------------------------------------------------------------
// Uncompression (restore) routines
//-----------------------------------------------------------------------------
// Uncompress data supplied by Compress functions.
void CompressDebugInfo::RestoreBoundariesAndVars(
IN FP_IDS_NEW fpNew, IN void * pNewData,
IN PTR_BYTE pDebugInfo,
OUT ULONG32 * pcMap, // number of entries in ppMap
OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array
OUT ULONG32 *pcVars,
OUT ICorDebugInfo::NativeVarInfo **ppVars,
BOOL hasFlagByte
)
{
CONTRACTL
{
THROWS; // reading from nibble stream may throw on invalid data.
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
if (pcMap != NULL) *pcMap = 0;
if (ppMap != NULL) *ppMap = NULL;
if (pcVars != NULL) *pcVars = 0;
if (ppVars != NULL) *ppVars = NULL;
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (hasFlagByte)
{
// Check flag byte and skip over any patchpoint info
BYTE flagByte = *pDebugInfo;
pDebugInfo++;
if (flagByte == 1)
{
PTR_PatchpointInfo patchpointInfo = dac_cast<PTR_PatchpointInfo>(pDebugInfo);
pDebugInfo += patchpointInfo->PatchpointInfoSize();
}
else
{
_ASSERTE(flagByte == 0);
}
}
#else
_ASSERTE(!hasFlagByte);
#endif
NibbleReader r(pDebugInfo, 12 /* maximum size of compressed 2 UINT32s */);
ULONG cbBounds = r.ReadEncodedU32();
ULONG cbVars = r.ReadEncodedU32();
PTR_BYTE addrBounds = pDebugInfo + r.GetNextByteIndex();
PTR_BYTE addrVars = addrBounds + cbBounds;
if ((pcMap != NULL || ppMap != NULL) && (cbBounds != 0))
{
NibbleReader r(addrBounds, cbBounds);
TransferReader t(r);
UINT32 cNumEntries = r.ReadEncodedU32();
_ASSERTE(cNumEntries > 0);
if (pcMap != NULL)
*pcMap = cNumEntries;
if (ppMap != NULL)
{
ICorDebugInfo::OffsetMapping * pMap = reinterpret_cast<ICorDebugInfo::OffsetMapping *>
(fpNew(pNewData, cNumEntries * sizeof(ICorDebugInfo::OffsetMapping)));
if (pMap == NULL)
{
ThrowOutOfMemory();
}
*ppMap = pMap;
// Main decompression routine.
DoBounds(t, cNumEntries, pMap);
}
}
if ((pcVars != NULL || ppVars != NULL) && (cbVars != 0))
{
NibbleReader r(addrVars, cbVars);
TransferReader t(r);
UINT32 cNumEntries = r.ReadEncodedU32();
_ASSERTE(cNumEntries > 0);
if (pcVars != NULL)
*pcVars = cNumEntries;
if (ppVars != NULL)
{
ICorDebugInfo::NativeVarInfo * pVars = reinterpret_cast<ICorDebugInfo::NativeVarInfo *>
(fpNew(pNewData, cNumEntries * sizeof(ICorDebugInfo::NativeVarInfo)));
if (pVars == NULL)
{
ThrowOutOfMemory();
}
*ppVars = pVars;
for(UINT32 i = 0; i < cNumEntries; i++)
{
DoNativeVarInfo(t, &pVars[i]);
}
}
}
}
#ifdef FEATURE_ON_STACK_REPLACEMENT
PatchpointInfo * CompressDebugInfo::RestorePatchpointInfo(IN PTR_BYTE pDebugInfo)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
PTR_PatchpointInfo patchpointInfo = NULL;
// Check flag byte.
BYTE flagByte = *pDebugInfo;
pDebugInfo++;
if (flagByte == 1)
{
patchpointInfo = dac_cast<PTR_PatchpointInfo>(pDebugInfo);
}
else
{
_ASSERTE(flagByte == 0);
}
return patchpointInfo;
}
#endif
#ifdef DACCESS_COMPILE
void CompressDebugInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
}
CONTRACTL_END;
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (hasFlagByte)
{
// Check flag byte and skip over any patchpoint info
BYTE flagByte = *pDebugInfo;
pDebugInfo++;
if (flagByte == 1)
{
PTR_PatchpointInfo patchpointInfo = dac_cast<PTR_PatchpointInfo>(pDebugInfo);
pDebugInfo += patchpointInfo->PatchpointInfoSize();
}
else
{
_ASSERTE(flagByte == 0);
}
}
#else
_ASSERTE(!hasFlagByte);
#endif
NibbleReader r(pDebugInfo, 12 /* maximum size of compressed 2 UINT32s */);
ULONG cbBounds = r.ReadEncodedU32();
ULONG cbVars = r.ReadEncodedU32();
DacEnumMemoryRegion(dac_cast<TADDR>(pDebugInfo), r.GetNextByteIndex() + cbBounds + cbVars);
}
#endif // DACCESS_COMPILE
// Init given a starting address from the start of code.
void DebugInfoRequest::InitFromStartingAddr(MethodDesc * pMD, PCODE addrCode)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
_ASSERTE(pMD != NULL);
_ASSERTE(addrCode != NULL);
this->m_pMD = pMD;
this->m_addrStart = addrCode;
}
//-----------------------------------------------------------------------------
// Impl for DebugInfoManager's IDebugInfoStore
//-----------------------------------------------------------------------------
BOOL DebugInfoManager::GetBoundariesAndVars(
const DebugInfoRequest & request,
IN FP_IDS_NEW fpNew, IN void * pNewData,
OUT ULONG32 * pcMap,
OUT ICorDebugInfo::OffsetMapping ** ppMap,
OUT ULONG32 * pcVars,
OUT ICorDebugInfo::NativeVarInfo ** ppVars)
{
CONTRACTL
{
THROWS;
WRAPPER(GC_TRIGGERS); // depends on fpNew
SUPPORTS_DAC;
}
CONTRACTL_END;
IJitManager* pJitMan = ExecutionManager::FindJitMan(request.GetStartAddress());
if (pJitMan == NULL)
{
return FALSE; // no info available.
}
return pJitMan->GetBoundariesAndVars(request, fpNew, pNewData, pcMap, ppMap, pcVars, ppVars);
}
#ifdef DACCESS_COMPILE
void DebugInfoManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
}
CONTRACTL_END;
PCODE addrCode = pMD->GetNativeCode();
if (addrCode == NULL)
{
return;
}
IJitManager* pJitMan = ExecutionManager::FindJitMan(addrCode);
if (pJitMan == NULL)
{
return; // no info available.
}
pJitMan->EnumMemoryRegionsForMethodDebugInfo(flags, pMD);
}
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/inlinetracking.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// =============================================================================================
// Code for tracking method inlinings in NGen and R2R images.
// The only information stored is "who" got inlined "where", no offsets or inlining depth tracking.
// (No good for debugger yet.)
// This information is later exposed to profilers and can be useful for ReJIT.
// Runtime inlining is not being tracked because profilers can deduce it via callbacks anyway.
// =============================================================================================
#include "common.h"
#include "inlinetracking.h"
#include "ceeload.h"
#include "versionresilienthashcode.h"
using namespace NativeFormat;
#ifndef DACCESS_COMPILE
bool MethodInModule::operator <(const MethodInModule& other) const
{
STANDARD_VM_CONTRACT;
if (m_module == other.m_module)
{
return m_methodDef < other.m_methodDef;
}
else
{
// Since NGen images are supposed to be determenistic,
// we need stable sort order that isn't changing between different runs
// That's why we use names and GUIDs instead of just doing m_module < other.m_module
// First we try to compare simple names (should be fast enough)
LPCUTF8 simpleName = m_module ? m_module->GetSimpleName() : "";
LPCUTF8 otherSimpleName = other.m_module ? other.m_module->GetSimpleName() : "";
int nameCmpResult = strcmp(simpleName, otherSimpleName);
if (nameCmpResult == 0)
{
// Names are equal but module addresses aren't, it's suspicious
// falling back to module GUIDs
GUID thisGuid, otherGuid;
if (m_module == NULL)
{
memset(&thisGuid, 0, sizeof(GUID));
}
else
{
m_module->GetPEAssembly()->GetMVID(&thisGuid);
}
if (other.m_module == NULL)
{
memset(&otherGuid, 0, sizeof(GUID));
}
else
{
other.m_module->GetPEAssembly()->GetMVID(&otherGuid);
}
return memcmp(&thisGuid, &otherGuid, sizeof(GUID)) < 0;
}
else
{
return nameCmpResult < 0;
}
}
}
bool MethodInModule::operator ==(const MethodInModule& other) const
{
LIMITED_METHOD_DAC_CONTRACT;
return m_methodDef == other.m_methodDef &&
m_module == other.m_module;
}
bool MethodInModule::operator !=(const MethodInModule& other) const
{
LIMITED_METHOD_DAC_CONTRACT;
return m_methodDef != other.m_methodDef ||
m_module != other.m_module;
}
void InlineTrackingEntry::SortAndDeduplicate()
{
STANDARD_VM_CONTRACT;
//Sort
MethodInModule *begin = &m_inliners[0];
MethodInModule *end = begin + m_inliners.GetCount();
util::sort(begin, end);
//Deduplicate
MethodInModule *left = begin;
MethodInModule *right = left + 1;
while (right < end)
{
auto rvalue = *right;
if (*left != rvalue)
{
left++;
if (left != right)
{
*left = rvalue;
}
}
right++;
}
//Shrink
int newCount = (int)(left - begin + 1);
m_inliners.SetCount(newCount);
}
InlineTrackingEntry::InlineTrackingEntry(const InlineTrackingEntry& other)
:m_inlinee(other.m_inlinee)
{
STANDARD_VM_CONTRACT;
m_inliners.Set(other.m_inliners);
}
InlineTrackingEntry & InlineTrackingEntry::operator = (const InlineTrackingEntry &other)
{
STANDARD_VM_CONTRACT;
m_inlinee = other.m_inlinee;
m_inliners.Set(other.m_inliners);
return *this;
}
void InlineTrackingEntry::Add(PTR_MethodDesc inliner)
{
STANDARD_VM_CONTRACT;
MethodInModule method(inliner->GetModule(), inliner->GetMemberDef());
// Going through last 10 inliners to check if a given inliner has recently been registered.
// It allows to filter out most duplicates without having to scan through hundreds of inliners
// for methods like Object.ctor or Monitor.Enter.
// We are OK to keep occasional duplicates in m_inliners, we'll get rid of them
// in SortAndDeduplicate() anyway.
int count = static_cast<int>(m_inliners.GetCount());
int start = max(0, count - 10);
for (int i = count - 1; i >= start; i--)
{
if (m_inliners[i] == method)
return;
}
//look like we see this inliner for the first time, add it to the collection
m_inliners.Append(method);
}
InlineTrackingMap::InlineTrackingMap()
: m_mapCrst(CrstInlineTrackingMap)
{
STANDARD_VM_CONTRACT;
}
void InlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee)
{
STANDARD_VM_CONTRACT;
_ASSERTE(inliner != NULL);
_ASSERTE(inlinee != NULL);
MethodInModule inlineeMnM(inlinee->GetModule(), inlinee->GetMemberDef());
if (RidFromToken(inlineeMnM.m_methodDef) == 0 || RidFromToken(inliner->GetMemberDef()) == 0)
{
// Sometimes we do see methods that don't have valid tokens (stubs etc)
// we just ignore them.
return;
}
CrstHolder lock(&m_mapCrst);
InlineTrackingEntry *existingEntry = const_cast<InlineTrackingEntry *>(LookupPtr(inlineeMnM));
if (existingEntry)
{
// We saw this inlinee before, just add one more inliner
existingEntry->Add(inliner);
}
else
{
// We haven't seen this inlinee before, create a new record in the hashtable
// and add a first inliner to it.
InlineTrackingEntry newEntry;
newEntry.m_inlinee = inlineeMnM;
newEntry.Add(inliner);
Add(newEntry);
}
}
#endif //!DACCESS_COMPILE
#ifdef FEATURE_READYTORUN
struct InliningHeader
{
int SizeOfInlineeIndex;
};
#ifndef DACCESS_COMPILE
BOOL PersistentInlineTrackingMapR2R::TryLoad(Module* pModule, const BYTE* pBuffer, DWORD cbBuffer,
AllocMemTracker *pamTracker, PersistentInlineTrackingMapR2R** ppLoadedMap)
{
InliningHeader* pHeader = (InliningHeader*)pBuffer;
if (pHeader->SizeOfInlineeIndex > (int)(cbBuffer - sizeof(InliningHeader)))
{
//invalid serialized data, the index can't be larger the entire block
_ASSERTE(!"R2R image is invalid or there is a bug in the R2R parser");
return FALSE;
}
//NOTE: Error checking on the format is very limited at this point.
//We trust the image format is valid and this initial check is a cheap
//verification that may help catch simple bugs. It does not secure against
//a deliberately maliciously formed binary.
LoaderHeap *pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap();
void * pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(PersistentInlineTrackingMapR2R)));
PersistentInlineTrackingMapR2R* pMap = new (pMemory) PersistentInlineTrackingMapR2R();
pMap->m_module = pModule;
pMap->m_inlineeIndex = (PTR_ZapInlineeRecord)(pHeader + 1);
pMap->m_inlineeIndexSize = pHeader->SizeOfInlineeIndex / sizeof(ZapInlineeRecord);
pMap->m_inlinersBuffer = ((PTR_BYTE)(pHeader+1)) + pHeader->SizeOfInlineeIndex;
pMap->m_inlinersBufferSize = cbBuffer - sizeof(InliningHeader) - pMap->m_inlineeIndexSize;
*ppLoadedMap = pMap;
return TRUE;
}
#endif //!DACCESS_COMPILE
COUNT_T PersistentInlineTrackingMapR2R::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL *incompleteData)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(inlineeOwnerMod);
_ASSERTE(inliners != NULL || inlinersSize == 0);
if (incompleteData)
{
*incompleteData = FALSE;
}
if (m_inlineeIndex == NULL || m_inlinersBuffer == NULL)
{
//No inlines saved in this image.
return 0;
}
if(inlineeOwnerMod != m_module)
{
// no cross module inlining (yet?)
return 0;
}
// Binary search to find all records matching (inlineeTkn)
ZapInlineeRecord probeRecord;
probeRecord.InitForR2R(RidFromToken(inlineeTkn));
ZapInlineeRecord *begin = m_inlineeIndex;
ZapInlineeRecord *end = m_inlineeIndex + m_inlineeIndexSize;
ZapInlineeRecord *foundRecord = util::lower_bound(begin, end, probeRecord);
DWORD result = 0;
DWORD outputIndex = 0;
// Go through all matching records
for (; foundRecord < end && *foundRecord == probeRecord; foundRecord++)
{
DWORD offset = foundRecord->m_offset;
NibbleReader stream(m_inlinersBuffer + offset, m_inlinersBufferSize - offset);
Module *inlinerModule = m_module;
DWORD inlinersCount = stream.ReadEncodedU32();
_ASSERTE(inlinersCount > 0);
RID inlinerRid = 0;
// Reading inliner RIDs one by one, each RID is represented as an adjustment (diff) to the previous one.
// Adding inliners module and coping to the output buffer
for (DWORD i = 0; i < inlinersCount && outputIndex < inlinersSize; i++)
{
inlinerRid += stream.ReadEncodedU32();
mdMethodDef inlinerTkn = TokenFromRid(inlinerRid, mdtMethodDef);
inliners[outputIndex++] = MethodInModule(inlinerModule, inlinerTkn);
}
result += inlinersCount;
}
return result;
}
#ifndef DACCESS_COMPILE
BOOL PersistentInlineTrackingMapR2R2::TryLoad(Module* pModule, const BYTE* pBuffer, DWORD cbBuffer,
AllocMemTracker* pamTracker, PersistentInlineTrackingMapR2R2** ppLoadedMap)
{
LoaderHeap* pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap();
void* pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(PersistentInlineTrackingMapR2R2)));
PersistentInlineTrackingMapR2R2* pMap = new (pMemory) PersistentInlineTrackingMapR2R2();
pMap->m_module = pModule;
pMap->m_reader = NativeReader(pBuffer, cbBuffer);
NativeParser parser = NativeParser(&pMap->m_reader, 0);
pMap->m_hashtable = NativeHashtable(parser);
*ppLoadedMap = pMap;
return TRUE;
}
COUNT_T PersistentInlineTrackingMapR2R2::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL* incompleteData)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(inlineeOwnerMod);
_ASSERTE(inliners != NULL || inlinersSize == 0);
if (incompleteData)
{
*incompleteData = FALSE;
}
DWORD result = 0;
int hashCode = GetVersionResilientModuleHashCode(inlineeOwnerMod);
hashCode ^= inlineeTkn;
NativeHashtable::Enumerator lookup = m_hashtable.Lookup(hashCode);
NativeParser entryParser;
while (lookup.GetNext(entryParser))
{
DWORD streamSize = entryParser.GetUnsigned();
_ASSERTE(streamSize > 1);
// First make sure this is the right inlinee and not just a hash collision
DWORD inlineeRidAndFlag = entryParser.GetUnsigned();
streamSize--;
mdMethodDef inlineeToken = TokenFromRid(inlineeRidAndFlag >> 1, mdtMethodDef);
if (inlineeToken != inlineeTkn)
{
continue;
}
Module* inlineeModule;
if ((inlineeRidAndFlag & 1) != 0)
{
inlineeModule = GetModuleByIndex(entryParser.GetUnsigned());
streamSize--;
_ASSERTE(streamSize > 0);
}
else
{
inlineeModule = m_module;
}
if (inlineeModule != inlineeOwnerMod)
{
continue;
}
// We have the right inlinee, let's look at the inliners
DWORD currentInlinerRid = 0;
do
{
DWORD inlinerRidDeltaAndFlag = entryParser.GetUnsigned();
streamSize--;
currentInlinerRid += inlinerRidDeltaAndFlag >> 1;
Module* inlinerModule;
if ((inlinerRidDeltaAndFlag & 1) != 0)
{
_ASSERTE(streamSize > 0);
inlinerModule = GetModuleByIndex(entryParser.GetUnsigned());
streamSize--;
if (inlinerModule == nullptr && incompleteData)
{
// We can't find module for this inlineeModuleZapIndex, it means it hasn't been loaded yet
// (maybe it never will be), we just report it to the profiler.
// Profiler might want to try later when more modules are loaded.
*incompleteData = TRUE;
continue;
}
}
else
{
inlinerModule = m_module;
}
if (result < inlinersSize)
{
inliners[result].m_methodDef = TokenFromRid(currentInlinerRid, mdtMethodDef);
inliners[result].m_module = inlinerModule;
}
result++;
} while (streamSize > 0);
}
return result;
}
Module* PersistentInlineTrackingMapR2R2::GetModuleByIndex(DWORD index)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
// This "black magic spell" has in fact nothing to do with GenericInstantiationCompare per se, but just sets a thread flag
// that later activates more thorough search inside Module::GetAssemblyIfLoaded, which is indirectly called from GetModuleFromIndexIfLoaded.
// This is useful when ngen image was compiler against a different assembly version than the one loaded now.
ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare);
return m_module->GetModuleFromIndexIfLoaded(index);
}
#endif //!DACCESS_COMPILE
#endif //FEATURE_READYTORUN
#if !defined(DACCESS_COMPILE)
JITInlineTrackingMap::JITInlineTrackingMap(LoaderAllocator *pAssociatedLoaderAllocator) :
m_mapCrst(CrstJitInlineTrackingMap),
m_map()
{
LIMITED_METHOD_CONTRACT;
m_map.Init(pAssociatedLoaderAllocator);
}
BOOL JITInlineTrackingMap::InliningExistsDontTakeLock(MethodDesc *inliner, MethodDesc *inlinee)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_mapCrst.OwnedByCurrentThread());
_ASSERTE(inliner != NULL);
_ASSERTE(inlinee != NULL);
BOOL found = FALSE;
auto lambda = [&](OBJECTREF obj, MethodDesc *lambdaInlinee, MethodDesc *lambdaInliner)
{
_ASSERTE(inlinee == lambdaInlinee);
if (lambdaInliner == inliner)
{
found = TRUE;
return false;
}
return true;
};
m_map.VisitValuesOfKey(inlinee, lambda);
return found;
}
void JITInlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee)
{
LIMITED_METHOD_CONTRACT;
inlinee = inlinee->LoadTypicalMethodDefinition();
CrstHolder holder(&m_mapCrst);
AddInliningDontTakeLock(inliner, inlinee);
}
void JITInlineTrackingMap::AddInliningDontTakeLock(MethodDesc *inliner, MethodDesc *inlinee)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_mapCrst.OwnedByCurrentThread());
_ASSERTE(inliner != NULL);
_ASSERTE(inlinee != NULL);
GCX_COOP();
if (!InliningExistsDontTakeLock(inliner, inlinee))
{
LoaderAllocator *loaderAllocatorOfInliner = inliner->GetLoaderAllocator();
m_map.Add(inlinee, inliner, loaderAllocatorOfInliner);
}
}
#endif // !defined(DACCESS_COMPILE)
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// =============================================================================================
// Code for tracking method inlinings in NGen and R2R images.
// The only information stored is "who" got inlined "where", no offsets or inlining depth tracking.
// (No good for debugger yet.)
// This information is later exposed to profilers and can be useful for ReJIT.
// Runtime inlining is not being tracked because profilers can deduce it via callbacks anyway.
// =============================================================================================
#include "common.h"
#include "inlinetracking.h"
#include "ceeload.h"
#include "versionresilienthashcode.h"
using namespace NativeFormat;
#ifndef DACCESS_COMPILE
bool MethodInModule::operator <(const MethodInModule& other) const
{
STANDARD_VM_CONTRACT;
if (m_module == other.m_module)
{
return m_methodDef < other.m_methodDef;
}
else
{
// Since NGen images are supposed to be determenistic,
// we need stable sort order that isn't changing between different runs
// That's why we use names and GUIDs instead of just doing m_module < other.m_module
// First we try to compare simple names (should be fast enough)
LPCUTF8 simpleName = m_module ? m_module->GetSimpleName() : "";
LPCUTF8 otherSimpleName = other.m_module ? other.m_module->GetSimpleName() : "";
int nameCmpResult = strcmp(simpleName, otherSimpleName);
if (nameCmpResult == 0)
{
// Names are equal but module addresses aren't, it's suspicious
// falling back to module GUIDs
GUID thisGuid, otherGuid;
if (m_module == NULL)
{
memset(&thisGuid, 0, sizeof(GUID));
}
else
{
m_module->GetPEAssembly()->GetMVID(&thisGuid);
}
if (other.m_module == NULL)
{
memset(&otherGuid, 0, sizeof(GUID));
}
else
{
other.m_module->GetPEAssembly()->GetMVID(&otherGuid);
}
return memcmp(&thisGuid, &otherGuid, sizeof(GUID)) < 0;
}
else
{
return nameCmpResult < 0;
}
}
}
bool MethodInModule::operator ==(const MethodInModule& other) const
{
LIMITED_METHOD_DAC_CONTRACT;
return m_methodDef == other.m_methodDef &&
m_module == other.m_module;
}
bool MethodInModule::operator !=(const MethodInModule& other) const
{
LIMITED_METHOD_DAC_CONTRACT;
return m_methodDef != other.m_methodDef ||
m_module != other.m_module;
}
void InlineTrackingEntry::SortAndDeduplicate()
{
STANDARD_VM_CONTRACT;
//Sort
MethodInModule *begin = &m_inliners[0];
MethodInModule *end = begin + m_inliners.GetCount();
util::sort(begin, end);
//Deduplicate
MethodInModule *left = begin;
MethodInModule *right = left + 1;
while (right < end)
{
auto rvalue = *right;
if (*left != rvalue)
{
left++;
if (left != right)
{
*left = rvalue;
}
}
right++;
}
//Shrink
int newCount = (int)(left - begin + 1);
m_inliners.SetCount(newCount);
}
InlineTrackingEntry::InlineTrackingEntry(const InlineTrackingEntry& other)
:m_inlinee(other.m_inlinee)
{
STANDARD_VM_CONTRACT;
m_inliners.Set(other.m_inliners);
}
InlineTrackingEntry & InlineTrackingEntry::operator = (const InlineTrackingEntry &other)
{
STANDARD_VM_CONTRACT;
m_inlinee = other.m_inlinee;
m_inliners.Set(other.m_inliners);
return *this;
}
void InlineTrackingEntry::Add(PTR_MethodDesc inliner)
{
STANDARD_VM_CONTRACT;
MethodInModule method(inliner->GetModule(), inliner->GetMemberDef());
// Going through last 10 inliners to check if a given inliner has recently been registered.
// It allows to filter out most duplicates without having to scan through hundreds of inliners
// for methods like Object.ctor or Monitor.Enter.
// We are OK to keep occasional duplicates in m_inliners, we'll get rid of them
// in SortAndDeduplicate() anyway.
int count = static_cast<int>(m_inliners.GetCount());
int start = max(0, count - 10);
for (int i = count - 1; i >= start; i--)
{
if (m_inliners[i] == method)
return;
}
//look like we see this inliner for the first time, add it to the collection
m_inliners.Append(method);
}
InlineTrackingMap::InlineTrackingMap()
: m_mapCrst(CrstInlineTrackingMap)
{
STANDARD_VM_CONTRACT;
}
void InlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee)
{
STANDARD_VM_CONTRACT;
_ASSERTE(inliner != NULL);
_ASSERTE(inlinee != NULL);
MethodInModule inlineeMnM(inlinee->GetModule(), inlinee->GetMemberDef());
if (RidFromToken(inlineeMnM.m_methodDef) == 0 || RidFromToken(inliner->GetMemberDef()) == 0)
{
// Sometimes we do see methods that don't have valid tokens (stubs etc)
// we just ignore them.
return;
}
CrstHolder lock(&m_mapCrst);
InlineTrackingEntry *existingEntry = const_cast<InlineTrackingEntry *>(LookupPtr(inlineeMnM));
if (existingEntry)
{
// We saw this inlinee before, just add one more inliner
existingEntry->Add(inliner);
}
else
{
// We haven't seen this inlinee before, create a new record in the hashtable
// and add a first inliner to it.
InlineTrackingEntry newEntry;
newEntry.m_inlinee = inlineeMnM;
newEntry.Add(inliner);
Add(newEntry);
}
}
#endif //!DACCESS_COMPILE
#ifdef FEATURE_READYTORUN
struct InliningHeader
{
int SizeOfInlineeIndex;
};
#ifndef DACCESS_COMPILE
BOOL PersistentInlineTrackingMapR2R::TryLoad(Module* pModule, const BYTE* pBuffer, DWORD cbBuffer,
AllocMemTracker *pamTracker, PersistentInlineTrackingMapR2R** ppLoadedMap)
{
InliningHeader* pHeader = (InliningHeader*)pBuffer;
if (pHeader->SizeOfInlineeIndex > (int)(cbBuffer - sizeof(InliningHeader)))
{
//invalid serialized data, the index can't be larger the entire block
_ASSERTE(!"R2R image is invalid or there is a bug in the R2R parser");
return FALSE;
}
//NOTE: Error checking on the format is very limited at this point.
//We trust the image format is valid and this initial check is a cheap
//verification that may help catch simple bugs. It does not secure against
//a deliberately maliciously formed binary.
LoaderHeap *pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap();
void * pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(PersistentInlineTrackingMapR2R)));
PersistentInlineTrackingMapR2R* pMap = new (pMemory) PersistentInlineTrackingMapR2R();
pMap->m_module = pModule;
pMap->m_inlineeIndex = (PTR_ZapInlineeRecord)(pHeader + 1);
pMap->m_inlineeIndexSize = pHeader->SizeOfInlineeIndex / sizeof(ZapInlineeRecord);
pMap->m_inlinersBuffer = ((PTR_BYTE)(pHeader+1)) + pHeader->SizeOfInlineeIndex;
pMap->m_inlinersBufferSize = cbBuffer - sizeof(InliningHeader) - pMap->m_inlineeIndexSize;
*ppLoadedMap = pMap;
return TRUE;
}
#endif //!DACCESS_COMPILE
COUNT_T PersistentInlineTrackingMapR2R::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL *incompleteData)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(inlineeOwnerMod);
_ASSERTE(inliners != NULL || inlinersSize == 0);
if (incompleteData)
{
*incompleteData = FALSE;
}
if (m_inlineeIndex == NULL || m_inlinersBuffer == NULL)
{
//No inlines saved in this image.
return 0;
}
if(inlineeOwnerMod != m_module)
{
// no cross module inlining (yet?)
return 0;
}
// Binary search to find all records matching (inlineeTkn)
ZapInlineeRecord probeRecord;
probeRecord.InitForR2R(RidFromToken(inlineeTkn));
ZapInlineeRecord *begin = m_inlineeIndex;
ZapInlineeRecord *end = m_inlineeIndex + m_inlineeIndexSize;
ZapInlineeRecord *foundRecord = util::lower_bound(begin, end, probeRecord);
DWORD result = 0;
DWORD outputIndex = 0;
// Go through all matching records
for (; foundRecord < end && *foundRecord == probeRecord; foundRecord++)
{
DWORD offset = foundRecord->m_offset;
NibbleReader stream(m_inlinersBuffer + offset, m_inlinersBufferSize - offset);
Module *inlinerModule = m_module;
DWORD inlinersCount = stream.ReadEncodedU32();
_ASSERTE(inlinersCount > 0);
RID inlinerRid = 0;
// Reading inliner RIDs one by one, each RID is represented as an adjustment (diff) to the previous one.
// Adding inliners module and coping to the output buffer
for (DWORD i = 0; i < inlinersCount && outputIndex < inlinersSize; i++)
{
inlinerRid += stream.ReadEncodedU32();
mdMethodDef inlinerTkn = TokenFromRid(inlinerRid, mdtMethodDef);
inliners[outputIndex++] = MethodInModule(inlinerModule, inlinerTkn);
}
result += inlinersCount;
}
return result;
}
#ifndef DACCESS_COMPILE
BOOL PersistentInlineTrackingMapR2R2::TryLoad(Module* pModule, const BYTE* pBuffer, DWORD cbBuffer,
AllocMemTracker* pamTracker, PersistentInlineTrackingMapR2R2** ppLoadedMap)
{
LoaderHeap* pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap();
void* pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(PersistentInlineTrackingMapR2R2)));
PersistentInlineTrackingMapR2R2* pMap = new (pMemory) PersistentInlineTrackingMapR2R2();
pMap->m_module = pModule;
pMap->m_reader = NativeReader(pBuffer, cbBuffer);
NativeParser parser = NativeParser(&pMap->m_reader, 0);
pMap->m_hashtable = NativeHashtable(parser);
*ppLoadedMap = pMap;
return TRUE;
}
COUNT_T PersistentInlineTrackingMapR2R2::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL* incompleteData)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(inlineeOwnerMod);
_ASSERTE(inliners != NULL || inlinersSize == 0);
if (incompleteData)
{
*incompleteData = FALSE;
}
DWORD result = 0;
int hashCode = GetVersionResilientModuleHashCode(inlineeOwnerMod);
hashCode ^= inlineeTkn;
NativeHashtable::Enumerator lookup = m_hashtable.Lookup(hashCode);
NativeParser entryParser;
while (lookup.GetNext(entryParser))
{
DWORD streamSize = entryParser.GetUnsigned();
_ASSERTE(streamSize > 1);
// First make sure this is the right inlinee and not just a hash collision
DWORD inlineeRidAndFlag = entryParser.GetUnsigned();
streamSize--;
mdMethodDef inlineeToken = TokenFromRid(inlineeRidAndFlag >> 1, mdtMethodDef);
if (inlineeToken != inlineeTkn)
{
continue;
}
Module* inlineeModule;
if ((inlineeRidAndFlag & 1) != 0)
{
inlineeModule = GetModuleByIndex(entryParser.GetUnsigned());
streamSize--;
_ASSERTE(streamSize > 0);
}
else
{
inlineeModule = m_module;
}
if (inlineeModule != inlineeOwnerMod)
{
continue;
}
// We have the right inlinee, let's look at the inliners
DWORD currentInlinerRid = 0;
do
{
DWORD inlinerRidDeltaAndFlag = entryParser.GetUnsigned();
streamSize--;
currentInlinerRid += inlinerRidDeltaAndFlag >> 1;
Module* inlinerModule;
if ((inlinerRidDeltaAndFlag & 1) != 0)
{
_ASSERTE(streamSize > 0);
inlinerModule = GetModuleByIndex(entryParser.GetUnsigned());
streamSize--;
if (inlinerModule == nullptr && incompleteData)
{
// We can't find module for this inlineeModuleZapIndex, it means it hasn't been loaded yet
// (maybe it never will be), we just report it to the profiler.
// Profiler might want to try later when more modules are loaded.
*incompleteData = TRUE;
continue;
}
}
else
{
inlinerModule = m_module;
}
if (result < inlinersSize)
{
inliners[result].m_methodDef = TokenFromRid(currentInlinerRid, mdtMethodDef);
inliners[result].m_module = inlinerModule;
}
result++;
} while (streamSize > 0);
}
return result;
}
Module* PersistentInlineTrackingMapR2R2::GetModuleByIndex(DWORD index)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
// This "black magic spell" has in fact nothing to do with GenericInstantiationCompare per se, but just sets a thread flag
// that later activates more thorough search inside Module::GetAssemblyIfLoaded, which is indirectly called from GetModuleFromIndexIfLoaded.
// This is useful when ngen image was compiler against a different assembly version than the one loaded now.
ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare);
return m_module->GetModuleFromIndexIfLoaded(index);
}
#endif //!DACCESS_COMPILE
#endif //FEATURE_READYTORUN
#if !defined(DACCESS_COMPILE)
JITInlineTrackingMap::JITInlineTrackingMap(LoaderAllocator *pAssociatedLoaderAllocator) :
m_mapCrst(CrstJitInlineTrackingMap),
m_map()
{
LIMITED_METHOD_CONTRACT;
m_map.Init(pAssociatedLoaderAllocator);
}
BOOL JITInlineTrackingMap::InliningExistsDontTakeLock(MethodDesc *inliner, MethodDesc *inlinee)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_mapCrst.OwnedByCurrentThread());
_ASSERTE(inliner != NULL);
_ASSERTE(inlinee != NULL);
BOOL found = FALSE;
auto lambda = [&](OBJECTREF obj, MethodDesc *lambdaInlinee, MethodDesc *lambdaInliner)
{
_ASSERTE(inlinee == lambdaInlinee);
if (lambdaInliner == inliner)
{
found = TRUE;
return false;
}
return true;
};
m_map.VisitValuesOfKey(inlinee, lambda);
return found;
}
void JITInlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee)
{
LIMITED_METHOD_CONTRACT;
inlinee = inlinee->LoadTypicalMethodDefinition();
CrstHolder holder(&m_mapCrst);
AddInliningDontTakeLock(inliner, inlinee);
}
void JITInlineTrackingMap::AddInliningDontTakeLock(MethodDesc *inliner, MethodDesc *inlinee)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_mapCrst.OwnedByCurrentThread());
_ASSERTE(inliner != NULL);
_ASSERTE(inlinee != NULL);
GCX_COOP();
if (!InliningExistsDontTakeLock(inliner, inlinee))
{
LoaderAllocator *loaderAllocatorOfInliner = inliner->GetLoaderAllocator();
m_map.Add(inlinee, inliner, loaderAllocatorOfInliner);
}
}
#endif // !defined(DACCESS_COMPILE)
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/filemapping_memmgt/UnmapViewOfFile/test2/unmapviewoffile.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: UnmapViewOfFile.c (test 2)
**
** Purpose: Negative test the UnmapViewOfFile API.
** Call UnmapViewOfFile to unmap a view of
** NULL.
**
**
**============================================================*/
#include <palsuite.h>
PALTEST(filemapping_memmgt_UnmapViewOfFile_test2_paltest_unmapviewoffile_test2, "filemapping_memmgt/UnmapViewOfFile/test2/paltest_unmapviewoffile_test2")
{
int err;
/*Initialize the PAL environment*/
err = PAL_Initialize(argc, argv);
if(0 != err)
{
return FAIL;
}
/* Negative test the UnmapViewOfFile by passing a NULL*/
/* mapping address handle*/
err = UnmapViewOfFile(NULL);
if(0 != err)
{
Fail("ERROR: Able to call UnmapViewOfFile API "
"by passing a NULL mapping address.\n" );
}
/* Terminate the PAL.
*/
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: UnmapViewOfFile.c (test 2)
**
** Purpose: Negative test the UnmapViewOfFile API.
** Call UnmapViewOfFile to unmap a view of
** NULL.
**
**
**============================================================*/
#include <palsuite.h>
PALTEST(filemapping_memmgt_UnmapViewOfFile_test2_paltest_unmapviewoffile_test2, "filemapping_memmgt/UnmapViewOfFile/test2/paltest_unmapviewoffile_test2")
{
int err;
/*Initialize the PAL environment*/
err = PAL_Initialize(argc, argv);
if(0 != err)
{
return FAIL;
}
/* Negative test the UnmapViewOfFile by passing a NULL*/
/* mapping address handle*/
err = UnmapViewOfFile(NULL);
if(0 != err)
{
Fail("ERROR: Able to call UnmapViewOfFile API "
"by passing a NULL mapping address.\n" );
}
/* Terminate the PAL.
*/
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/md/errors_metadata.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <corerror.h>
#include <winerror.h>
// Index into heap/table is too large.
#define METADATA_E_INDEX_NOTFOUND CLDB_E_INDEX_NOTFOUND
// Options:
// * CLDB_E_INDEX_NOTFOUND
// * VLDTR_E_BLOB_INVALID
// * VLDTR_E_GUID_INVALID
// * VLDTR_E_STRING_INVALID
// * VLDTR_E_RID_OUTOFRANGE
// Internal error, it's a runtime assert check to avoid security errors. If this is returned, then there's
// something wrong with MetaData code.
#define METADATA_E_INTERNAL_ERROR CLDB_E_INTERNALERROR
// Options:
// * CLDB_E_INTERNALERROR
// * COR_E_EXECUTIONENGINE
// MetaData space (heap/table) is full, cannot store more items.
#define METADATA_E_HEAP_FULL META_E_STRINGSPACE_FULL
// Options:
// * META_E_STRINGSPACE_FULL
// * CLDB_E_TOO_BIG
// Invalid heap (blob, user string) data encoding.
#define METADATA_E_INVALID_HEAP_DATA META_E_BADMETADATA
// Options:
// * META_E_BADMETADATA
// * META_E_CA_INVALID_BLOB
// * META_E_BAD_SIGNATURE
// * CLDB_E_FILE_CORRUPT
// * COR_E_BADIMAGEFORMAT
// The data is too big to encode (the string/blob is larger than possible heap size).
#define METADATA_E_DATA_TOO_BIG CLDB_E_TOO_BIG
// Options:
// * CLDB_E_TOO_BIG
// Invalid MetaData format (headers, etc.).
#define METADATA_E_INVALID_FORMAT COR_E_BADIMAGEFORMAT
// Options:
// * META_E_BADMETADATA
// * META_E_CA_INVALID_BLOB
// * META_E_BAD_SIGNATURE
// * CLDB_E_FILE_CORRUPT
// * COR_E_BADIMAGEFORMAT
//
// Other used error codes:
// * COR_E_OUTOFMEMORY ... defined as E_OUTOFMEMORY
// Alternatives:
// * E_OUTOFMEMORY (from IfNullGo/IfNullRet macros)
// * COR_E_OVERFLOW
// Alternatives:
// * COR_E_ARITHMETIC
//
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <corerror.h>
#include <winerror.h>
// Index into heap/table is too large.
#define METADATA_E_INDEX_NOTFOUND CLDB_E_INDEX_NOTFOUND
// Options:
// * CLDB_E_INDEX_NOTFOUND
// * VLDTR_E_BLOB_INVALID
// * VLDTR_E_GUID_INVALID
// * VLDTR_E_STRING_INVALID
// * VLDTR_E_RID_OUTOFRANGE
// Internal error, it's a runtime assert check to avoid security errors. If this is returned, then there's
// something wrong with MetaData code.
#define METADATA_E_INTERNAL_ERROR CLDB_E_INTERNALERROR
// Options:
// * CLDB_E_INTERNALERROR
// * COR_E_EXECUTIONENGINE
// MetaData space (heap/table) is full, cannot store more items.
#define METADATA_E_HEAP_FULL META_E_STRINGSPACE_FULL
// Options:
// * META_E_STRINGSPACE_FULL
// * CLDB_E_TOO_BIG
// Invalid heap (blob, user string) data encoding.
#define METADATA_E_INVALID_HEAP_DATA META_E_BADMETADATA
// Options:
// * META_E_BADMETADATA
// * META_E_CA_INVALID_BLOB
// * META_E_BAD_SIGNATURE
// * CLDB_E_FILE_CORRUPT
// * COR_E_BADIMAGEFORMAT
// The data is too big to encode (the string/blob is larger than possible heap size).
#define METADATA_E_DATA_TOO_BIG CLDB_E_TOO_BIG
// Options:
// * CLDB_E_TOO_BIG
// Invalid MetaData format (headers, etc.).
#define METADATA_E_INVALID_FORMAT COR_E_BADIMAGEFORMAT
// Options:
// * META_E_BADMETADATA
// * META_E_CA_INVALID_BLOB
// * META_E_BAD_SIGNATURE
// * CLDB_E_FILE_CORRUPT
// * COR_E_BADIMAGEFORMAT
//
// Other used error codes:
// * COR_E_OUTOFMEMORY ... defined as E_OUTOFMEMORY
// Alternatives:
// * E_OUTOFMEMORY (from IfNullGo/IfNullRet macros)
// * COR_E_OVERFLOW
// Alternatives:
// * COR_E_ARITHMETIC
//
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/native/external/rapidjson/internal/pow10.h | // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_POW10_
#define RAPIDJSON_POW10_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
//! Computes integer powers of 10 in double (10.0^n).
/*! This function uses lookup table for fast and accurate results.
\param n non-negative exponent. Must <= 308.
\return 10.0^n
*/
inline double Pow10(int n) {
static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
1e+0,
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
};
RAPIDJSON_ASSERT(n >= 0 && n <= 308);
return e[n];
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_POW10_
| // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_POW10_
#define RAPIDJSON_POW10_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
//! Computes integer powers of 10 in double (10.0^n).
/*! This function uses lookup table for fast and accurate results.
\param n non-negative exponent. Must <= 308.
\return 10.0^n
*/
inline double Pow10(int n) {
static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
1e+0,
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
};
RAPIDJSON_ASSERT(n >= 0 && n <= 308);
return e[n];
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_POW10_
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/eedbginterface.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// COM+99 EE to Debugger Interface Header
//
#ifndef _eedbginterface_h_
#define _eedbginterface_h_
#include "common.h"
#include "corpriv.h"
#include "hash.h"
#include "class.h"
#include "excep.h"
#include "threads.h"
#include "field.h"
#include "stackwalk.h"
#ifdef EnC_SUPPORTED
#include "encee.h"
#endif
#include "cordebug.h"
#include "../debug/inc/common.h"
class MethodDesc;
class Frame;
//
// The purpose of this object is to provide EE funcationality back to
// the debugger. This represents the entire set of EE functions used
// by the debugger.
//
// We will make this interface smaller over time to minimize the link
// between the EE and the Debugger.
//
//
typedef BOOL (*HashMapEnumCallback)(HashMap* h,
void* pData,
ULONG value);
typedef enum AttachAppDomainEventsEnum
{
ONLY_SEND_APP_DOMAIN_CREATE_EVENTS,
DONT_SEND_CLASS_EVENTS,
ONLY_SEND_CLASS_EVENTS
} AttachAppDomainEventsEnum;
typedef VPTR(class EEDebugInterface) PTR_EEDebugInterface;
// Used for communicating EH Handler info between the LS and EE (DetermineIfOffsetsInFilterOrHandler)
struct DebugOffsetToHandlerInfo
{
// Native offset of interest, or -1 if this entry should be ignored
SIZE_T offset;
// Set to true by the EE if the specified native offset is in an EH filter or handler.
BOOL isInFilterOrHandler;
};
class EEDebugInterface
{
VPTR_BASE_VTABLE_CLASS_AND_CTOR(EEDebugInterface);
public:
//
// Functions exported from the EE to the debugger.
//
virtual Thread* GetThread(void) = 0;
#ifndef DACCESS_COMPILE
virtual StackWalkAction StackWalkFramesEx(Thread* pThread,
PREGDISPLAY pRD,
PSTACKWALKFRAMESCALLBACK pCallback,
VOID* pData,
unsigned int flags) = 0;
virtual Frame *GetFrame(CrawlFrame*) = 0;
virtual bool InitRegDisplay(Thread* pThread,
const PREGDISPLAY pRD,
const PT_CONTEXT pctx,
bool validContext) = 0;
virtual BOOL IsStringObject(Object* o) = 0;
virtual BOOL IsTypedReference(MethodTable* pMT) = 0;
virtual WCHAR* StringObjectGetBuffer(StringObject* so) = 0;
virtual DWORD StringObjectGetStringLength(StringObject* so) = 0;
virtual void *GetObjectFromHandle(OBJECTHANDLE handle) = 0;
virtual OBJECTHANDLE GetHandleFromObject(void *obj,
bool fStrongNewRef,
AppDomain *pAppDomain) = 0;
virtual void DbgDestroyHandle( OBJECTHANDLE oh, bool fStrongNewRef ) = 0;
virtual OBJECTHANDLE GetThreadException(Thread *pThread) = 0;
virtual bool IsThreadExceptionNull(Thread *pThread) = 0;
virtual void ClearThreadException(Thread *pThread) = 0;
virtual bool StartSuspendForDebug(AppDomain *pAppDomain,
BOOL fHoldingThreadStoreLock = FALSE) = 0;
virtual void ResumeFromDebug(AppDomain *pAppDomain)= 0;
virtual void MarkThreadForDebugSuspend(Thread* pRuntimeThread) = 0;
virtual void MarkThreadForDebugStepping(Thread* pRuntimeThread,
bool onOff) = 0;
virtual void SetThreadFilterContext(Thread *thread,
T_CONTEXT *context) = 0;
virtual T_CONTEXT *GetThreadFilterContext(Thread *thread) = 0;
#ifdef FEATURE_INTEROP_DEBUGGING
virtual VOID *GetThreadDebuggerWord() = 0;
virtual void SetThreadDebuggerWord(VOID *dw) = 0;
#endif
virtual BOOL IsManagedNativeCode(const BYTE *address) = 0;
#endif // #ifndef DACCESS_COMPILE
virtual PCODE GetNativeCodeStartAddress(PCODE address) = 0;
virtual MethodDesc *GetNativeCodeMethodDesc(const PCODE address) = 0;
#ifndef DACCESS_COMPILE
#ifndef USE_GC_INFO_DECODER
virtual BOOL IsInPrologOrEpilog(const BYTE *address,
size_t* prologSize) = 0;
#endif
// Determine whether certain native offsets of the specified function are within
// an exception filter or handler.
virtual void DetermineIfOffsetsInFilterOrHandler(const BYTE *functionAddress,
DebugOffsetToHandlerInfo *pOffsetToHandlerInfo,
unsigned offsetToHandlerInfoLength) = 0;
#endif // #ifndef DACCESS_COMPILE
virtual void GetMethodRegionInfo(const PCODE pStart,
PCODE * pCold,
size_t * hotSize,
size_t * coldSize) = 0;
#if defined(FEATURE_EH_FUNCLETS)
virtual DWORD GetFuncletStartOffsets(const BYTE *pStart, DWORD* pStartOffsets, DWORD dwLength) = 0;
virtual StackFrame FindParentStackFrame(CrawlFrame* pCF) = 0;
#endif // FEATURE_EH_FUNCLETS
virtual size_t GetFunctionSize(MethodDesc *pFD) = 0;
virtual PCODE GetFunctionAddress(MethodDesc *pFD) = 0;
#ifndef DACCESS_COMPILE
#ifdef EnC_SUPPORTED
// Apply an EnC edit
virtual HRESULT EnCApplyChanges(EditAndContinueModule *pModule,
DWORD cbMetadata,
BYTE *pMetadata,
DWORD cbIL,
BYTE *pIL) = 0;
// Perform an EnC remap to resume execution in the new version of a method (doesn't return)
virtual void ResumeInUpdatedFunction(EditAndContinueModule *pModule,
MethodDesc *pFD,
void *debuggerFuncHandle,
SIZE_T resumeIP,
CONTEXT *pContext) = 0;
#endif //EnC_SUPPORTED
//
// New methods to support the new debugger.
//
virtual MethodDesc *FindLoadedMethodRefOrDef(Module* pModule,
mdMemberRef memberRef) = 0;
virtual MethodDesc *LoadMethodDef(Module* pModule,
mdMethodDef methodDef,
DWORD numGenericArgs = 0,
TypeHandle *pGenericArgs = NULL,
TypeHandle *pOwnerType = NULL) = 0;
// These will lookup a type, and if it's not loaded, return the null TypeHandle
virtual TypeHandle FindLoadedClass(Module *pModule,
mdTypeDef classToken) = 0;
virtual TypeHandle FindLoadedElementType(CorElementType et) = 0;
virtual TypeHandle FindLoadedInstantiation(Module *pModule,
mdTypeDef typeDef,
DWORD ntypars,
TypeHandle *inst) = 0;
virtual TypeHandle FindLoadedFnptrType(TypeHandle *inst,
DWORD ntypars) = 0;
virtual TypeHandle FindLoadedPointerOrByrefType(CorElementType et,
TypeHandle elemtype) = 0;
virtual TypeHandle FindLoadedArrayType(CorElementType et,
TypeHandle elemtype,
unsigned rank) = 0;
// These will lookup a type, and if it's not loaded, will load and run
// the class init etc.
virtual TypeHandle LoadClass(Module *pModule,
mdTypeDef classToken) = 0;
virtual TypeHandle LoadElementType(CorElementType et) = 0;
virtual TypeHandle LoadInstantiation(Module *pModule,
mdTypeDef typeDef,
DWORD ntypars,
TypeHandle *inst) = 0;
virtual TypeHandle LoadFnptrType(TypeHandle *inst,
DWORD ntypars) = 0;
virtual TypeHandle LoadPointerOrByrefType(CorElementType et,
TypeHandle elemtype) = 0;
virtual TypeHandle LoadArrayType(CorElementType et,
TypeHandle elemtype,
unsigned rank) = 0;
__checkReturn
virtual HRESULT GetMethodImplProps(Module *pModule,
mdToken tk,
DWORD *pRVA,
DWORD *pImplFlags) = 0;
virtual HRESULT GetParentToken(Module *pModule,
mdToken tk,
mdToken *pParentToken) = 0;
virtual bool IsPreemptiveGCDisabled(void) = 0;
virtual void DisablePreemptiveGC(void) = 0;
virtual void EnablePreemptiveGC(void) = 0;
virtual DWORD MethodDescIsStatic(MethodDesc *pFD) = 0;
#endif // #ifndef DACCESS_COMPILE
virtual Module *MethodDescGetModule(MethodDesc *pFD) = 0;
#ifndef DACCESS_COMPILE
virtual COR_ILMETHOD* MethodDescGetILHeader(MethodDesc *pFD) = 0;
virtual ULONG MethodDescGetRVA(MethodDesc *pFD) = 0;
virtual void MarkDebuggerAttached(void) = 0;
virtual void MarkDebuggerUnattached(void) = 0;
virtual bool CrawlFrameIsGcSafe(CrawlFrame *pCF) = 0;
virtual bool SweepThreadsForDebug(bool forceSync) = 0;
virtual void GetRuntimeOffsets(SIZE_T *pTLSIndex,
SIZE_T *pTLSEEThreadOffset,
SIZE_T *pTLSIsSpecialOffset,
SIZE_T *pTLSCantStopOffset,
SIZE_T *pEEThreadStateOffset,
SIZE_T *pEEThreadStateNCOffset,
SIZE_T *pEEThreadPGCDisabledOffset,
DWORD *pEEThreadPGCDisabledValue,
SIZE_T *pEEThreadFrameOffset,
SIZE_T *pEEThreadMaxNeededSize,
DWORD *pEEThreadSteppingStateMask,
DWORD *pEEMaxFrameValue,
SIZE_T *pEEThreadDebuggerFilterContextOffset,
SIZE_T *pEEFrameNextOffset,
DWORD *pEEIsManagedExceptionStateMask) = 0;
virtual bool IsStub(const BYTE *ip) = 0;
#endif // #ifndef DACCESS_COMPILE
virtual bool DetectHandleILStubs(Thread *thread) = 0;
virtual bool TraceStub(const BYTE *ip, TraceDestination *trace) = 0;
#ifndef DACCESS_COMPILE
virtual bool FollowTrace(TraceDestination *trace) = 0;
virtual bool TraceFrame(Thread *thread,
Frame *frame,
BOOL fromPatch,
TraceDestination *trace,
REGDISPLAY *regs) = 0;
virtual bool TraceManager(Thread *thread,
StubManager *stubManager,
TraceDestination *trace,
T_CONTEXT *context,
BYTE **pRetAddr) = 0;
virtual void EnableTraceCall(Thread *thread) = 0;
virtual void DisableTraceCall(Thread *thread) = 0;
#endif // #ifndef DACCESS_COMPILE
#ifndef DACCESS_COMPILE
virtual void DebuggerModifyingLogSwitch (int iNewLevel,
const WCHAR *pLogSwitchName) = 0;
virtual HRESULT SetIPFromSrcToDst(Thread *pThread,
SLOT addrStart,
DWORD offFrom,
DWORD offTo,
bool fCanSetIPOnly,
PREGDISPLAY pReg,
PT_CONTEXT pCtx,
void *pDji,
EHRangeTree *pEHRT) = 0;
virtual void SetDebugState(Thread *pThread,
CorDebugThreadState state) = 0;
virtual void SetAllDebugState(Thread *et,
CorDebugThreadState state) = 0;
virtual CorDebugUserState GetPartialUserState( Thread *pThread ) = 0;
#endif // #ifndef DACCESS_COMPILE
#ifdef DACCESS_COMPILE
virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0;
#endif
virtual unsigned GetSizeForCorElementType(CorElementType etyp) = 0;
#ifndef DACCESS_COMPILE
virtual BOOL ObjIsInstanceOf(Object *pElement, TypeHandle toTypeHnd) = 0;
#endif
virtual void ClearAllDebugInterfaceReferences(void) = 0;
#ifndef DACCESS_COMPILE
#ifdef _DEBUG
virtual void ObjectRefFlush(Thread *pThread) = 0;
#endif
#endif
#ifndef DACCESS_COMPILE
virtual BOOL AdjustContextForJITHelpersForDebugger(CONTEXT* context) = 0;
#endif
};
#endif // _eedbginterface_h_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// COM+99 EE to Debugger Interface Header
//
#ifndef _eedbginterface_h_
#define _eedbginterface_h_
#include "common.h"
#include "corpriv.h"
#include "hash.h"
#include "class.h"
#include "excep.h"
#include "threads.h"
#include "field.h"
#include "stackwalk.h"
#ifdef EnC_SUPPORTED
#include "encee.h"
#endif
#include "cordebug.h"
#include "../debug/inc/common.h"
class MethodDesc;
class Frame;
//
// The purpose of this object is to provide EE funcationality back to
// the debugger. This represents the entire set of EE functions used
// by the debugger.
//
// We will make this interface smaller over time to minimize the link
// between the EE and the Debugger.
//
//
typedef BOOL (*HashMapEnumCallback)(HashMap* h,
void* pData,
ULONG value);
typedef enum AttachAppDomainEventsEnum
{
ONLY_SEND_APP_DOMAIN_CREATE_EVENTS,
DONT_SEND_CLASS_EVENTS,
ONLY_SEND_CLASS_EVENTS
} AttachAppDomainEventsEnum;
typedef VPTR(class EEDebugInterface) PTR_EEDebugInterface;
// Used for communicating EH Handler info between the LS and EE (DetermineIfOffsetsInFilterOrHandler)
struct DebugOffsetToHandlerInfo
{
// Native offset of interest, or -1 if this entry should be ignored
SIZE_T offset;
// Set to true by the EE if the specified native offset is in an EH filter or handler.
BOOL isInFilterOrHandler;
};
class EEDebugInterface
{
VPTR_BASE_VTABLE_CLASS_AND_CTOR(EEDebugInterface);
public:
//
// Functions exported from the EE to the debugger.
//
virtual Thread* GetThread(void) = 0;
#ifndef DACCESS_COMPILE
virtual StackWalkAction StackWalkFramesEx(Thread* pThread,
PREGDISPLAY pRD,
PSTACKWALKFRAMESCALLBACK pCallback,
VOID* pData,
unsigned int flags) = 0;
virtual Frame *GetFrame(CrawlFrame*) = 0;
virtual bool InitRegDisplay(Thread* pThread,
const PREGDISPLAY pRD,
const PT_CONTEXT pctx,
bool validContext) = 0;
virtual BOOL IsStringObject(Object* o) = 0;
virtual BOOL IsTypedReference(MethodTable* pMT) = 0;
virtual WCHAR* StringObjectGetBuffer(StringObject* so) = 0;
virtual DWORD StringObjectGetStringLength(StringObject* so) = 0;
virtual void *GetObjectFromHandle(OBJECTHANDLE handle) = 0;
virtual OBJECTHANDLE GetHandleFromObject(void *obj,
bool fStrongNewRef,
AppDomain *pAppDomain) = 0;
virtual void DbgDestroyHandle( OBJECTHANDLE oh, bool fStrongNewRef ) = 0;
virtual OBJECTHANDLE GetThreadException(Thread *pThread) = 0;
virtual bool IsThreadExceptionNull(Thread *pThread) = 0;
virtual void ClearThreadException(Thread *pThread) = 0;
virtual bool StartSuspendForDebug(AppDomain *pAppDomain,
BOOL fHoldingThreadStoreLock = FALSE) = 0;
virtual void ResumeFromDebug(AppDomain *pAppDomain)= 0;
virtual void MarkThreadForDebugSuspend(Thread* pRuntimeThread) = 0;
virtual void MarkThreadForDebugStepping(Thread* pRuntimeThread,
bool onOff) = 0;
virtual void SetThreadFilterContext(Thread *thread,
T_CONTEXT *context) = 0;
virtual T_CONTEXT *GetThreadFilterContext(Thread *thread) = 0;
#ifdef FEATURE_INTEROP_DEBUGGING
virtual VOID *GetThreadDebuggerWord() = 0;
virtual void SetThreadDebuggerWord(VOID *dw) = 0;
#endif
virtual BOOL IsManagedNativeCode(const BYTE *address) = 0;
#endif // #ifndef DACCESS_COMPILE
virtual PCODE GetNativeCodeStartAddress(PCODE address) = 0;
virtual MethodDesc *GetNativeCodeMethodDesc(const PCODE address) = 0;
#ifndef DACCESS_COMPILE
#ifndef USE_GC_INFO_DECODER
virtual BOOL IsInPrologOrEpilog(const BYTE *address,
size_t* prologSize) = 0;
#endif
// Determine whether certain native offsets of the specified function are within
// an exception filter or handler.
virtual void DetermineIfOffsetsInFilterOrHandler(const BYTE *functionAddress,
DebugOffsetToHandlerInfo *pOffsetToHandlerInfo,
unsigned offsetToHandlerInfoLength) = 0;
#endif // #ifndef DACCESS_COMPILE
virtual void GetMethodRegionInfo(const PCODE pStart,
PCODE * pCold,
size_t * hotSize,
size_t * coldSize) = 0;
#if defined(FEATURE_EH_FUNCLETS)
virtual DWORD GetFuncletStartOffsets(const BYTE *pStart, DWORD* pStartOffsets, DWORD dwLength) = 0;
virtual StackFrame FindParentStackFrame(CrawlFrame* pCF) = 0;
#endif // FEATURE_EH_FUNCLETS
virtual size_t GetFunctionSize(MethodDesc *pFD) = 0;
virtual PCODE GetFunctionAddress(MethodDesc *pFD) = 0;
#ifndef DACCESS_COMPILE
#ifdef EnC_SUPPORTED
// Apply an EnC edit
virtual HRESULT EnCApplyChanges(EditAndContinueModule *pModule,
DWORD cbMetadata,
BYTE *pMetadata,
DWORD cbIL,
BYTE *pIL) = 0;
// Perform an EnC remap to resume execution in the new version of a method (doesn't return)
virtual void ResumeInUpdatedFunction(EditAndContinueModule *pModule,
MethodDesc *pFD,
void *debuggerFuncHandle,
SIZE_T resumeIP,
CONTEXT *pContext) = 0;
#endif //EnC_SUPPORTED
//
// New methods to support the new debugger.
//
virtual MethodDesc *FindLoadedMethodRefOrDef(Module* pModule,
mdMemberRef memberRef) = 0;
virtual MethodDesc *LoadMethodDef(Module* pModule,
mdMethodDef methodDef,
DWORD numGenericArgs = 0,
TypeHandle *pGenericArgs = NULL,
TypeHandle *pOwnerType = NULL) = 0;
// These will lookup a type, and if it's not loaded, return the null TypeHandle
virtual TypeHandle FindLoadedClass(Module *pModule,
mdTypeDef classToken) = 0;
virtual TypeHandle FindLoadedElementType(CorElementType et) = 0;
virtual TypeHandle FindLoadedInstantiation(Module *pModule,
mdTypeDef typeDef,
DWORD ntypars,
TypeHandle *inst) = 0;
virtual TypeHandle FindLoadedFnptrType(TypeHandle *inst,
DWORD ntypars) = 0;
virtual TypeHandle FindLoadedPointerOrByrefType(CorElementType et,
TypeHandle elemtype) = 0;
virtual TypeHandle FindLoadedArrayType(CorElementType et,
TypeHandle elemtype,
unsigned rank) = 0;
// These will lookup a type, and if it's not loaded, will load and run
// the class init etc.
virtual TypeHandle LoadClass(Module *pModule,
mdTypeDef classToken) = 0;
virtual TypeHandle LoadElementType(CorElementType et) = 0;
virtual TypeHandle LoadInstantiation(Module *pModule,
mdTypeDef typeDef,
DWORD ntypars,
TypeHandle *inst) = 0;
virtual TypeHandle LoadFnptrType(TypeHandle *inst,
DWORD ntypars) = 0;
virtual TypeHandle LoadPointerOrByrefType(CorElementType et,
TypeHandle elemtype) = 0;
virtual TypeHandle LoadArrayType(CorElementType et,
TypeHandle elemtype,
unsigned rank) = 0;
__checkReturn
virtual HRESULT GetMethodImplProps(Module *pModule,
mdToken tk,
DWORD *pRVA,
DWORD *pImplFlags) = 0;
virtual HRESULT GetParentToken(Module *pModule,
mdToken tk,
mdToken *pParentToken) = 0;
virtual bool IsPreemptiveGCDisabled(void) = 0;
virtual void DisablePreemptiveGC(void) = 0;
virtual void EnablePreemptiveGC(void) = 0;
virtual DWORD MethodDescIsStatic(MethodDesc *pFD) = 0;
#endif // #ifndef DACCESS_COMPILE
virtual Module *MethodDescGetModule(MethodDesc *pFD) = 0;
#ifndef DACCESS_COMPILE
virtual COR_ILMETHOD* MethodDescGetILHeader(MethodDesc *pFD) = 0;
virtual ULONG MethodDescGetRVA(MethodDesc *pFD) = 0;
virtual void MarkDebuggerAttached(void) = 0;
virtual void MarkDebuggerUnattached(void) = 0;
virtual bool CrawlFrameIsGcSafe(CrawlFrame *pCF) = 0;
virtual bool SweepThreadsForDebug(bool forceSync) = 0;
virtual void GetRuntimeOffsets(SIZE_T *pTLSIndex,
SIZE_T *pTLSEEThreadOffset,
SIZE_T *pTLSIsSpecialOffset,
SIZE_T *pTLSCantStopOffset,
SIZE_T *pEEThreadStateOffset,
SIZE_T *pEEThreadStateNCOffset,
SIZE_T *pEEThreadPGCDisabledOffset,
DWORD *pEEThreadPGCDisabledValue,
SIZE_T *pEEThreadFrameOffset,
SIZE_T *pEEThreadMaxNeededSize,
DWORD *pEEThreadSteppingStateMask,
DWORD *pEEMaxFrameValue,
SIZE_T *pEEThreadDebuggerFilterContextOffset,
SIZE_T *pEEFrameNextOffset,
DWORD *pEEIsManagedExceptionStateMask) = 0;
virtual bool IsStub(const BYTE *ip) = 0;
#endif // #ifndef DACCESS_COMPILE
virtual bool DetectHandleILStubs(Thread *thread) = 0;
virtual bool TraceStub(const BYTE *ip, TraceDestination *trace) = 0;
#ifndef DACCESS_COMPILE
virtual bool FollowTrace(TraceDestination *trace) = 0;
virtual bool TraceFrame(Thread *thread,
Frame *frame,
BOOL fromPatch,
TraceDestination *trace,
REGDISPLAY *regs) = 0;
virtual bool TraceManager(Thread *thread,
StubManager *stubManager,
TraceDestination *trace,
T_CONTEXT *context,
BYTE **pRetAddr) = 0;
virtual void EnableTraceCall(Thread *thread) = 0;
virtual void DisableTraceCall(Thread *thread) = 0;
#endif // #ifndef DACCESS_COMPILE
#ifndef DACCESS_COMPILE
virtual void DebuggerModifyingLogSwitch (int iNewLevel,
const WCHAR *pLogSwitchName) = 0;
virtual HRESULT SetIPFromSrcToDst(Thread *pThread,
SLOT addrStart,
DWORD offFrom,
DWORD offTo,
bool fCanSetIPOnly,
PREGDISPLAY pReg,
PT_CONTEXT pCtx,
void *pDji,
EHRangeTree *pEHRT) = 0;
virtual void SetDebugState(Thread *pThread,
CorDebugThreadState state) = 0;
virtual void SetAllDebugState(Thread *et,
CorDebugThreadState state) = 0;
virtual CorDebugUserState GetPartialUserState( Thread *pThread ) = 0;
#endif // #ifndef DACCESS_COMPILE
#ifdef DACCESS_COMPILE
virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0;
#endif
virtual unsigned GetSizeForCorElementType(CorElementType etyp) = 0;
#ifndef DACCESS_COMPILE
virtual BOOL ObjIsInstanceOf(Object *pElement, TypeHandle toTypeHnd) = 0;
#endif
virtual void ClearAllDebugInterfaceReferences(void) = 0;
#ifndef DACCESS_COMPILE
#ifdef _DEBUG
virtual void ObjectRefFlush(Thread *pThread) = 0;
#endif
#endif
#ifndef DACCESS_COMPILE
virtual BOOL AdjustContextForJITHelpersForDebugger(CONTEXT* context) = 0;
#endif
};
#endif // _eedbginterface_h_
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/threading/DuplicateHandle/test4/test4.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test4.c (DuplicateHandle)
**
** Purpose: Tests the PAL implementation of the DuplicateHandle function.
** This test duplication of a Mutex handle. The test will comprise
** of creating a Mutex and its duplicate and create a thread that
** will get ownership. Another thread will be create that will
** attempt to get ownership of the duplicate Mutex, this will
** fail, since the Mutex is owned by another thread. The Mutex
** will be released and then the thread will attempt to get
** ownership of the duplicate Mutex, this will succeed.
**
**
**===================================================================*/
#include <palsuite.h>
enum wait_results
{
WR_WAITING,
WR_GOT_MUTEX,
WR_TIMED_OUT,
WR_RELEASED
};
volatile int t1_result_DuplicateHandle_test4=WR_WAITING;
volatile int t2_result_DuplicateHandle_test4=WR_WAITING;
DWORD PALAPI ThreadTest1_DuplicateHandle_test4(LPVOID lpParam)
{
DWORD dwWait;
dwWait = WaitForSingleObject((HANDLE)lpParam, 0);
if (dwWait == WAIT_OBJECT_0)
{
/* tell the main thread we got the mutex */
t1_result_DuplicateHandle_test4=WR_GOT_MUTEX;
/* wait for main thread to tell us to release the mutex */
while(WR_GOT_MUTEX == t1_result_DuplicateHandle_test4)
Sleep(1);
ReleaseMutex((HANDLE)lpParam);
/* tell the main thread we released the mutex */
t1_result_DuplicateHandle_test4 = WR_RELEASED;
}
else
{
t1_result_DuplicateHandle_test4 = WR_TIMED_OUT;
}
return 0;
}
DWORD PALAPI ThreadTest2_DuplicateHandle_test4(LPVOID lpParam)
{
DWORD dwWait;
dwWait = WaitForSingleObject((HANDLE)lpParam, 0 );
if (dwWait == WAIT_OBJECT_0)
{
ReleaseMutex((HANDLE)lpParam);
t2_result_DuplicateHandle_test4 = WR_GOT_MUTEX;
}
else
{
t2_result_DuplicateHandle_test4 = WR_TIMED_OUT;
}
return 0;
}
PALTEST(threading_DuplicateHandle_test4_paltest_duplicatehandle_test4, "threading/DuplicateHandle/test4/paltest_duplicatehandle_test4")
{
HANDLE hDupMutex;
HANDLE hMutex;
HANDLE hThread;
HANDLE hThread2;
BOOL bDupHandle=FALSE;
DWORD dwThreadId = 0;
if ((PAL_Initialize(argc,argv)) != 0)
{
return(FAIL);
}
/*Create Mutex without ownership*/
hMutex = CreateMutexW(NULL, // no security attributes
FALSE, // initially not owned
NULL); // name of mutex
if (hMutex == NULL)
{
Fail("ERROR:%u: Unable to create mutex\n",
GetLastError());
}
/*Create Duplicate of the Mutex above*/
bDupHandle = DuplicateHandle(GetCurrentProcess(),
hMutex,
GetCurrentProcess(),
&hDupMutex,
GENERIC_READ|GENERIC_WRITE,
FALSE,
DUPLICATE_SAME_ACCESS);
if (!bDupHandle)
{
Trace("ERROR:%u: Created the duplicate handle to "
"closed event handle hMutex=0x%lx\n",
GetLastError(),
hMutex);
CloseHandle(hMutex);
Fail("");
}
/*Create a thread to test the Mutex*/
hThread = CreateThread(NULL,
0,
&ThreadTest1_DuplicateHandle_test4,
hMutex,
0,
&dwThreadId);
if (hThread == NULL)
{
Trace("ERROR:%u: unable to create thread\n",
GetLastError());
CloseHandle(hMutex);
CloseHandle(hDupMutex);
Fail("");
}
/* wait until thread has taken the mutex */
while (WR_WAITING == t1_result_DuplicateHandle_test4)
Sleep(1);
if(WR_TIMED_OUT == t1_result_DuplicateHandle_test4)
{
Trace("ERROR: %u: thread 1 couldn't acquire the mutex\n");
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
Fail("");
}
/*Create a second thread to use the duplicate Mutex*/
/*This should fail since the Mutex is owned hThread*/
hThread2 = CreateThread(NULL,
0,
&ThreadTest2_DuplicateHandle_test4,
hDupMutex,
0,
&dwThreadId);
if (hThread2 == NULL)
{
Trace("ERROR:%u: unable to create thread\n",
GetLastError());
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
Fail("");
}
/* wait until thread has tried to take the mutex */
while (WR_WAITING == t2_result_DuplicateHandle_test4)
Sleep(1);
if (WR_TIMED_OUT != t2_result_DuplicateHandle_test4 )
{
Trace("ERROR:%u: Able to take mutex %#x while its duplicate %#x is "
"held\n", hDupMutex, hMutex);
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
CloseHandle(hThread2);
Fail("");
}
/* reset second thread status */
t2_result_DuplicateHandle_test4 = WR_WAITING;
/* tell thread 1 to release the mutex */
t1_result_DuplicateHandle_test4 = WR_WAITING;
/* wait for thread 1 to release the mutex */
while (WR_WAITING == t1_result_DuplicateHandle_test4)
Sleep(1);
CloseHandle(hThread2);
/*Re-Create the second thread to reuse the duplicated Mutex*/
/*This test should pass, the Mutex has since been released*/
hThread2 = CreateThread(NULL,
0,
&ThreadTest2_DuplicateHandle_test4,
hDupMutex,
0,
&dwThreadId);
if (hThread2 == NULL)
{
Trace("ERROR:%u: unable to create thread\n",
GetLastError());
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
Fail("");
}
/* wait until thread has taken the mutex */
while (WR_WAITING == t2_result_DuplicateHandle_test4)
Sleep(1);
if (WR_GOT_MUTEX != t2_result_DuplicateHandle_test4 )
{
Trace("ERROR:%u: Unable to take mutex %#x after its duplicate %#x was "
"released\n", hDupMutex, hMutex);
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
CloseHandle(hThread2);
Fail("");
}
/*Cleanup.*/
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
CloseHandle(hThread2);
PAL_Terminate();
return (PASS);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test4.c (DuplicateHandle)
**
** Purpose: Tests the PAL implementation of the DuplicateHandle function.
** This test duplication of a Mutex handle. The test will comprise
** of creating a Mutex and its duplicate and create a thread that
** will get ownership. Another thread will be create that will
** attempt to get ownership of the duplicate Mutex, this will
** fail, since the Mutex is owned by another thread. The Mutex
** will be released and then the thread will attempt to get
** ownership of the duplicate Mutex, this will succeed.
**
**
**===================================================================*/
#include <palsuite.h>
enum wait_results
{
WR_WAITING,
WR_GOT_MUTEX,
WR_TIMED_OUT,
WR_RELEASED
};
volatile int t1_result_DuplicateHandle_test4=WR_WAITING;
volatile int t2_result_DuplicateHandle_test4=WR_WAITING;
DWORD PALAPI ThreadTest1_DuplicateHandle_test4(LPVOID lpParam)
{
DWORD dwWait;
dwWait = WaitForSingleObject((HANDLE)lpParam, 0);
if (dwWait == WAIT_OBJECT_0)
{
/* tell the main thread we got the mutex */
t1_result_DuplicateHandle_test4=WR_GOT_MUTEX;
/* wait for main thread to tell us to release the mutex */
while(WR_GOT_MUTEX == t1_result_DuplicateHandle_test4)
Sleep(1);
ReleaseMutex((HANDLE)lpParam);
/* tell the main thread we released the mutex */
t1_result_DuplicateHandle_test4 = WR_RELEASED;
}
else
{
t1_result_DuplicateHandle_test4 = WR_TIMED_OUT;
}
return 0;
}
DWORD PALAPI ThreadTest2_DuplicateHandle_test4(LPVOID lpParam)
{
DWORD dwWait;
dwWait = WaitForSingleObject((HANDLE)lpParam, 0 );
if (dwWait == WAIT_OBJECT_0)
{
ReleaseMutex((HANDLE)lpParam);
t2_result_DuplicateHandle_test4 = WR_GOT_MUTEX;
}
else
{
t2_result_DuplicateHandle_test4 = WR_TIMED_OUT;
}
return 0;
}
PALTEST(threading_DuplicateHandle_test4_paltest_duplicatehandle_test4, "threading/DuplicateHandle/test4/paltest_duplicatehandle_test4")
{
HANDLE hDupMutex;
HANDLE hMutex;
HANDLE hThread;
HANDLE hThread2;
BOOL bDupHandle=FALSE;
DWORD dwThreadId = 0;
if ((PAL_Initialize(argc,argv)) != 0)
{
return(FAIL);
}
/*Create Mutex without ownership*/
hMutex = CreateMutexW(NULL, // no security attributes
FALSE, // initially not owned
NULL); // name of mutex
if (hMutex == NULL)
{
Fail("ERROR:%u: Unable to create mutex\n",
GetLastError());
}
/*Create Duplicate of the Mutex above*/
bDupHandle = DuplicateHandle(GetCurrentProcess(),
hMutex,
GetCurrentProcess(),
&hDupMutex,
GENERIC_READ|GENERIC_WRITE,
FALSE,
DUPLICATE_SAME_ACCESS);
if (!bDupHandle)
{
Trace("ERROR:%u: Created the duplicate handle to "
"closed event handle hMutex=0x%lx\n",
GetLastError(),
hMutex);
CloseHandle(hMutex);
Fail("");
}
/*Create a thread to test the Mutex*/
hThread = CreateThread(NULL,
0,
&ThreadTest1_DuplicateHandle_test4,
hMutex,
0,
&dwThreadId);
if (hThread == NULL)
{
Trace("ERROR:%u: unable to create thread\n",
GetLastError());
CloseHandle(hMutex);
CloseHandle(hDupMutex);
Fail("");
}
/* wait until thread has taken the mutex */
while (WR_WAITING == t1_result_DuplicateHandle_test4)
Sleep(1);
if(WR_TIMED_OUT == t1_result_DuplicateHandle_test4)
{
Trace("ERROR: %u: thread 1 couldn't acquire the mutex\n");
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
Fail("");
}
/*Create a second thread to use the duplicate Mutex*/
/*This should fail since the Mutex is owned hThread*/
hThread2 = CreateThread(NULL,
0,
&ThreadTest2_DuplicateHandle_test4,
hDupMutex,
0,
&dwThreadId);
if (hThread2 == NULL)
{
Trace("ERROR:%u: unable to create thread\n",
GetLastError());
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
Fail("");
}
/* wait until thread has tried to take the mutex */
while (WR_WAITING == t2_result_DuplicateHandle_test4)
Sleep(1);
if (WR_TIMED_OUT != t2_result_DuplicateHandle_test4 )
{
Trace("ERROR:%u: Able to take mutex %#x while its duplicate %#x is "
"held\n", hDupMutex, hMutex);
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
CloseHandle(hThread2);
Fail("");
}
/* reset second thread status */
t2_result_DuplicateHandle_test4 = WR_WAITING;
/* tell thread 1 to release the mutex */
t1_result_DuplicateHandle_test4 = WR_WAITING;
/* wait for thread 1 to release the mutex */
while (WR_WAITING == t1_result_DuplicateHandle_test4)
Sleep(1);
CloseHandle(hThread2);
/*Re-Create the second thread to reuse the duplicated Mutex*/
/*This test should pass, the Mutex has since been released*/
hThread2 = CreateThread(NULL,
0,
&ThreadTest2_DuplicateHandle_test4,
hDupMutex,
0,
&dwThreadId);
if (hThread2 == NULL)
{
Trace("ERROR:%u: unable to create thread\n",
GetLastError());
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
Fail("");
}
/* wait until thread has taken the mutex */
while (WR_WAITING == t2_result_DuplicateHandle_test4)
Sleep(1);
if (WR_GOT_MUTEX != t2_result_DuplicateHandle_test4 )
{
Trace("ERROR:%u: Unable to take mutex %#x after its duplicate %#x was "
"released\n", hDupMutex, hMutex);
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
CloseHandle(hThread2);
Fail("");
}
/*Cleanup.*/
CloseHandle(hMutex);
CloseHandle(hDupMutex);
CloseHandle(hThread);
CloseHandle(hThread2);
PAL_Terminate();
return (PASS);
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/src/libunwind/include/libunwind-tilegx.h | /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2014 Tilera Corp.
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef LIBUNWIND_H
#define LIBUNWIND_H
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#include <inttypes.h>
#include <ucontext.h>
#define UNW_TARGET tilegx
#define UNW_TARGET_TILEGX 1
#define _U_TDEP_QP_TRUE 0 /* see libunwind-dynamic.h */
/* This needs to be big enough to accommodate "struct cursor", while
leaving some slack for future expansion. Changing this value will
require recompiling all users of this library. Stack allocation is
relatively cheap and unwind-state copying is relatively rare, so we
want to err on making it rather too big than too small. */
#define UNW_TDEP_CURSOR_LEN 4096
/* The size of a "word" varies on TILEGX. This type is used for memory
addresses and register values. */
typedef uint64_t unw_word_t;
typedef int64_t unw_sword_t;
typedef long double unw_tdep_fpreg_t;
typedef enum
{
UNW_TILEGX_R0,
UNW_TILEGX_R1,
UNW_TILEGX_R2,
UNW_TILEGX_R3,
UNW_TILEGX_R4,
UNW_TILEGX_R5,
UNW_TILEGX_R6,
UNW_TILEGX_R7,
UNW_TILEGX_R8,
UNW_TILEGX_R9,
UNW_TILEGX_R10,
UNW_TILEGX_R11,
UNW_TILEGX_R12,
UNW_TILEGX_R13,
UNW_TILEGX_R14,
UNW_TILEGX_R15,
UNW_TILEGX_R16,
UNW_TILEGX_R17,
UNW_TILEGX_R18,
UNW_TILEGX_R19,
UNW_TILEGX_R20,
UNW_TILEGX_R21,
UNW_TILEGX_R22,
UNW_TILEGX_R23,
UNW_TILEGX_R24,
UNW_TILEGX_R25,
UNW_TILEGX_R26,
UNW_TILEGX_R27,
UNW_TILEGX_R28,
UNW_TILEGX_R29,
UNW_TILEGX_R30,
UNW_TILEGX_R31,
UNW_TILEGX_R32,
UNW_TILEGX_R33,
UNW_TILEGX_R34,
UNW_TILEGX_R35,
UNW_TILEGX_R36,
UNW_TILEGX_R37,
UNW_TILEGX_R38,
UNW_TILEGX_R39,
UNW_TILEGX_R40,
UNW_TILEGX_R41,
UNW_TILEGX_R42,
UNW_TILEGX_R43,
UNW_TILEGX_R44,
UNW_TILEGX_R45,
UNW_TILEGX_R46,
UNW_TILEGX_R47,
UNW_TILEGX_R48,
UNW_TILEGX_R49,
UNW_TILEGX_R50,
UNW_TILEGX_R51,
UNW_TILEGX_R52,
UNW_TILEGX_R53,
UNW_TILEGX_R54,
UNW_TILEGX_R55,
/* FIXME: Other registers! */
UNW_TILEGX_PC,
/* For TILEGX, the CFA is the value of SP (r54) at the call site in the
previous frame. */
UNW_TILEGX_CFA,
UNW_TDEP_LAST_REG = UNW_TILEGX_PC,
UNW_TDEP_IP = UNW_TILEGX_R55, /* R55 is link register for Tilegx */
UNW_TDEP_SP = UNW_TILEGX_R54,
UNW_TDEP_EH = UNW_TILEGX_R0 /* FIXME. */
} tilegx_regnum_t;
typedef enum
{
UNW_TILEGX_ABI_N64 = 2
} tilegx_abi_t;
#define UNW_TDEP_NUM_EH_REGS 2 /* FIXME for TILEGX. */
typedef struct unw_tdep_save_loc
{
/* Additional target-dependent info on a save location. */
} unw_tdep_save_loc_t;
typedef ucontext_t unw_tdep_context_t;
#include "libunwind-dynamic.h"
typedef struct
{
/* no tilegx-specific auxiliary proc-info */
} unw_tdep_proc_info_t;
#include "libunwind-common.h"
#define unw_tdep_getcontext getcontext
#define unw_tdep_is_fpreg UNW_ARCH_OBJ(is_fpreg)
extern int unw_tdep_is_fpreg (int);
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif /* LIBUNWIND_H */
| /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2014 Tilera Corp.
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef LIBUNWIND_H
#define LIBUNWIND_H
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#include <inttypes.h>
#include <ucontext.h>
#define UNW_TARGET tilegx
#define UNW_TARGET_TILEGX 1
#define _U_TDEP_QP_TRUE 0 /* see libunwind-dynamic.h */
/* This needs to be big enough to accommodate "struct cursor", while
leaving some slack for future expansion. Changing this value will
require recompiling all users of this library. Stack allocation is
relatively cheap and unwind-state copying is relatively rare, so we
want to err on making it rather too big than too small. */
#define UNW_TDEP_CURSOR_LEN 4096
/* The size of a "word" varies on TILEGX. This type is used for memory
addresses and register values. */
typedef uint64_t unw_word_t;
typedef int64_t unw_sword_t;
typedef long double unw_tdep_fpreg_t;
typedef enum
{
UNW_TILEGX_R0,
UNW_TILEGX_R1,
UNW_TILEGX_R2,
UNW_TILEGX_R3,
UNW_TILEGX_R4,
UNW_TILEGX_R5,
UNW_TILEGX_R6,
UNW_TILEGX_R7,
UNW_TILEGX_R8,
UNW_TILEGX_R9,
UNW_TILEGX_R10,
UNW_TILEGX_R11,
UNW_TILEGX_R12,
UNW_TILEGX_R13,
UNW_TILEGX_R14,
UNW_TILEGX_R15,
UNW_TILEGX_R16,
UNW_TILEGX_R17,
UNW_TILEGX_R18,
UNW_TILEGX_R19,
UNW_TILEGX_R20,
UNW_TILEGX_R21,
UNW_TILEGX_R22,
UNW_TILEGX_R23,
UNW_TILEGX_R24,
UNW_TILEGX_R25,
UNW_TILEGX_R26,
UNW_TILEGX_R27,
UNW_TILEGX_R28,
UNW_TILEGX_R29,
UNW_TILEGX_R30,
UNW_TILEGX_R31,
UNW_TILEGX_R32,
UNW_TILEGX_R33,
UNW_TILEGX_R34,
UNW_TILEGX_R35,
UNW_TILEGX_R36,
UNW_TILEGX_R37,
UNW_TILEGX_R38,
UNW_TILEGX_R39,
UNW_TILEGX_R40,
UNW_TILEGX_R41,
UNW_TILEGX_R42,
UNW_TILEGX_R43,
UNW_TILEGX_R44,
UNW_TILEGX_R45,
UNW_TILEGX_R46,
UNW_TILEGX_R47,
UNW_TILEGX_R48,
UNW_TILEGX_R49,
UNW_TILEGX_R50,
UNW_TILEGX_R51,
UNW_TILEGX_R52,
UNW_TILEGX_R53,
UNW_TILEGX_R54,
UNW_TILEGX_R55,
/* FIXME: Other registers! */
UNW_TILEGX_PC,
/* For TILEGX, the CFA is the value of SP (r54) at the call site in the
previous frame. */
UNW_TILEGX_CFA,
UNW_TDEP_LAST_REG = UNW_TILEGX_PC,
UNW_TDEP_IP = UNW_TILEGX_R55, /* R55 is link register for Tilegx */
UNW_TDEP_SP = UNW_TILEGX_R54,
UNW_TDEP_EH = UNW_TILEGX_R0 /* FIXME. */
} tilegx_regnum_t;
typedef enum
{
UNW_TILEGX_ABI_N64 = 2
} tilegx_abi_t;
#define UNW_TDEP_NUM_EH_REGS 2 /* FIXME for TILEGX. */
typedef struct unw_tdep_save_loc
{
/* Additional target-dependent info on a save location. */
} unw_tdep_save_loc_t;
typedef ucontext_t unw_tdep_context_t;
#include "libunwind-dynamic.h"
typedef struct
{
/* no tilegx-specific auxiliary proc-info */
} unw_tdep_proc_info_t;
#include "libunwind-common.h"
#define unw_tdep_getcontext getcontext
#define unw_tdep_is_fpreg UNW_ARCH_OBJ(is_fpreg)
extern int unw_tdep_is_fpreg (int);
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif /* LIBUNWIND_H */
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/_stricmp/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Do a lower case compare. Check two strings, only different
** because they have different capitalization, and they should return 0. Try
** two strings which will return less than 0 (one is smaller than the other).
** Also try the opposite, to get a return value greater than 0.
**
**
**==========================================================================*/
#include <palsuite.h>
/*
* Note: The _stricmp is dependent on the LC_CTYPE category of the locale,
* and this is ignored by these tests.
*/
PALTEST(c_runtime__stricmp_test1_paltest_stricmp_test1, "c_runtime/_stricmp/test1/paltest_stricmp_test1")
{
char *str1 = "foo";
char *str2 = "fOo";
char *str3 = "foo_bar";
char *str4 = "foobar";
/*
* Initialize the PAL and return FAIL if this fails
*/
if (0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
if (_stricmp(str1, str2) != 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") != 0\n", str1, str2);
}
if (_stricmp(str2, str3) >= 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") >= 0\n", str2, str3);
}
if (_stricmp(str3, str4) >= 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") >= 0\n", str3, str4);
}
if (_stricmp(str4, str1) <= 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") <= 0\n", str4, str1);
}
if (_stricmp(str3, str2) <= 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") <= 0\n", str2, str3);
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Do a lower case compare. Check two strings, only different
** because they have different capitalization, and they should return 0. Try
** two strings which will return less than 0 (one is smaller than the other).
** Also try the opposite, to get a return value greater than 0.
**
**
**==========================================================================*/
#include <palsuite.h>
/*
* Note: The _stricmp is dependent on the LC_CTYPE category of the locale,
* and this is ignored by these tests.
*/
PALTEST(c_runtime__stricmp_test1_paltest_stricmp_test1, "c_runtime/_stricmp/test1/paltest_stricmp_test1")
{
char *str1 = "foo";
char *str2 = "fOo";
char *str3 = "foo_bar";
char *str4 = "foobar";
/*
* Initialize the PAL and return FAIL if this fails
*/
if (0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
if (_stricmp(str1, str2) != 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") != 0\n", str1, str2);
}
if (_stricmp(str2, str3) >= 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") >= 0\n", str2, str3);
}
if (_stricmp(str3, str4) >= 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") >= 0\n", str3, str4);
}
if (_stricmp(str4, str1) <= 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") <= 0\n", str4, str1);
}
if (_stricmp(str3, str2) <= 0)
{
Fail ("ERROR: _stricmp returning incorrect value:\n"
"_stricmp(\"%s\", \"%s\") <= 0\n", str2, str3);
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/native/corehost/ijwhost/i386/bootstrap_thunk.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef IJW_BOOTSTRAP_THUNK_H
#define IJW_BOOTSTRAP_THUNK_H
#if !defined(TARGET_X86)
#error "This file should only be included on x86 builds."
#endif
#include "pal.h"
#include "corhdr.h"
extern "C" void STDMETHODCALLTYPE start_runtime_thunk_stub();
#include <pshpack1.h>
//=================================================================================
class bootstrap_thunk
{
private:
std::uint32_t m_token;
struct {
BYTE m_call; //0xe8
UINT32 m_thunkFcn; //bootstrapper function
} m_code;
pal::dll_t m_dll; // pal::dll_t of this module
std::uintptr_t *m_slot; // VTable slot for this thunk
public:
// Get thunk from the return address that the call instruction would have pushed
static bootstrap_thunk *get_thunk_from_cookie(std::uintptr_t cookie);
// Get thunk from the return address that the call instruction would have pushed
static bootstrap_thunk *get_thunk_from_entrypoint(std::uintptr_t entryAddr);
// Initializes the thunk to point to pThunkInitFcn that will load the
// runtime and perform the real thunk initialization.
void initialize(std::uintptr_t pThunkInitFcn,
pal::dll_t dll,
std::uint32_t token,
std::uintptr_t *pSlot);
// Returns the slot address of the vtable entry for this thunk
std::uintptr_t *get_slot_address();
// Returns the pal::dll_t for this thunk's module
pal::dll_t get_dll_handle();
// Returns the token of this thunk
std::uint32_t get_token();
std::uintptr_t get_entrypoint();
};
#include <poppack.h>
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef IJW_BOOTSTRAP_THUNK_H
#define IJW_BOOTSTRAP_THUNK_H
#if !defined(TARGET_X86)
#error "This file should only be included on x86 builds."
#endif
#include "pal.h"
#include "corhdr.h"
extern "C" void STDMETHODCALLTYPE start_runtime_thunk_stub();
#include <pshpack1.h>
//=================================================================================
class bootstrap_thunk
{
private:
std::uint32_t m_token;
struct {
BYTE m_call; //0xe8
UINT32 m_thunkFcn; //bootstrapper function
} m_code;
pal::dll_t m_dll; // pal::dll_t of this module
std::uintptr_t *m_slot; // VTable slot for this thunk
public:
// Get thunk from the return address that the call instruction would have pushed
static bootstrap_thunk *get_thunk_from_cookie(std::uintptr_t cookie);
// Get thunk from the return address that the call instruction would have pushed
static bootstrap_thunk *get_thunk_from_entrypoint(std::uintptr_t entryAddr);
// Initializes the thunk to point to pThunkInitFcn that will load the
// runtime and perform the real thunk initialization.
void initialize(std::uintptr_t pThunkInitFcn,
pal::dll_t dll,
std::uint32_t token,
std::uintptr_t *pSlot);
// Returns the slot address of the vtable entry for this thunk
std::uintptr_t *get_slot_address();
// Returns the pal::dll_t for this thunk's module
pal::dll_t get_dll_handle();
// Returns the token of this thunk
std::uint32_t get_token();
std::uintptr_t get_entrypoint();
};
#include <poppack.h>
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/gc/softwarewritewatch.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __SOFTWARE_WRITE_WATCH_H__
#define __SOFTWARE_WRITE_WATCH_H__
#include "gcinterface.h"
#include "gc.h"
#define WRITE_WATCH_UNIT_SIZE ((size_t)0x1000)
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#ifndef DACCESS_COMPILE
extern "C"
{
// Table containing the dirty state. This table is translated to exclude the lowest address it represents, see
// TranslateTableToExcludeHeapStartAddress.
extern uint8_t *g_gc_sw_ww_table;
// Write watch may be disabled when it is not needed (between GCs for instance). This indicates whether it is enabled.
extern bool g_gc_sw_ww_enabled_for_gc_heap;
}
class SoftwareWriteWatch
{
private:
// The granularity of dirty state in the table is one page. Dirtiness is tracked per byte of the table so that
// synchronization is not required when changing the dirty state. Shifting-right an address by the following value yields
// the byte index of the address into the write watch table. For instance,
// GetTable()[address >> AddressToTableByteIndexShift] is the byte that represents the region of memory for 'address'.
static const uint8_t AddressToTableByteIndexShift = SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift;
private:
static void VerifyCreated();
static void VerifyMemoryRegion(void *baseAddress, size_t regionByteSize);
static void VerifyMemoryRegion(void *baseAddress, size_t regionByteSize, void *heapStartAddress, void *heapEndAddress);
public:
static uint8_t *GetTable();
private:
static uint8_t *GetUntranslatedTable();
static uint8_t *GetUntranslatedTable(uint8_t *table, void *heapStartAddress);
static uint8_t *GetUntranslatedTableEnd();
static uint8_t *GetUntranslatedTableEnd(uint8_t *table, void *heapEndAddress);
public:
static void InitializeUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress);
private:
static void SetUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress);
public:
static void SetResizedUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress, void *heapEndAddress);
static bool IsEnabledForGCHeap();
static void EnableForGCHeap();
static void DisableForGCHeap();
private:
static void *GetHeapStartAddress();
static void *GetHeapEndAddress();
public:
static void StaticClose();
private:
static size_t GetTableByteIndex(void *address);
static void *GetPageAddress(size_t tableByteIndex);
public:
static size_t GetTableByteSize(void *heapStartAddress, void *heapEndAddress);
static size_t GetTableStartByteOffset(size_t byteSizeBeforeTable);
private:
static uint8_t *TranslateTableToExcludeHeapStartAddress(uint8_t *table, void *heapStartAddress);
static void TranslateToTableRegion(void *baseAddress, size_t regionByteSize, uint8_t **tableBaseAddressRef, size_t *tableRegionByteSizeRef);
public:
static void ClearDirty(void *baseAddress, size_t regionByteSize);
static void SetDirty(void *address, size_t writeByteSize);
static void SetDirtyRegion(void *baseAddress, size_t regionByteSize);
private:
static bool GetDirtyFromBlock(uint8_t *block, uint8_t *firstPageAddressInBlock, size_t startByteIndex, size_t endByteIndex, void **dirtyPages, size_t *dirtyPageIndexRef, size_t dirtyPageCount, bool clearDirty);
public:
static void GetDirty(void *baseAddress, size_t regionByteSize, void **dirtyPages, size_t *dirtyPageCountRef, bool clearDirty, bool isRuntimeSuspended);
};
inline void SoftwareWriteWatch::VerifyCreated()
{
assert(GetTable() != nullptr);
assert(GetHeapStartAddress() != nullptr);
assert(GetHeapEndAddress() != nullptr);
assert(GetHeapStartAddress() < GetHeapEndAddress());
}
inline void SoftwareWriteWatch::VerifyMemoryRegion(void *baseAddress, size_t regionByteSize)
{
VerifyMemoryRegion(baseAddress, regionByteSize, GetHeapStartAddress(), GetHeapEndAddress());
}
inline void SoftwareWriteWatch::VerifyMemoryRegion(
void *baseAddress,
size_t regionByteSize,
void *heapStartAddress,
void *heapEndAddress)
{
VerifyCreated();
assert(baseAddress != nullptr);
assert(heapStartAddress != nullptr);
assert(heapStartAddress >= GetHeapStartAddress());
assert(heapEndAddress != nullptr);
assert(heapEndAddress <= GetHeapEndAddress());
assert(baseAddress >= heapStartAddress);
assert(baseAddress < heapEndAddress);
assert(regionByteSize != 0);
assert(regionByteSize <= reinterpret_cast<size_t>(heapEndAddress) - reinterpret_cast<size_t>(baseAddress));
}
inline uint8_t *SoftwareWriteWatch::GetTable()
{
return g_gc_sw_ww_table;
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTable()
{
VerifyCreated();
return GetUntranslatedTable(GetTable(), GetHeapStartAddress());
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTable(uint8_t *table, void *heapStartAddress)
{
assert(table != nullptr);
assert(heapStartAddress != nullptr);
assert(heapStartAddress >= GetHeapStartAddress());
uint8_t *untranslatedTable = table + GetTableByteIndex(heapStartAddress);
assert(ALIGN_DOWN(untranslatedTable, sizeof(size_t)) == untranslatedTable);
return untranslatedTable;
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTableEnd()
{
VerifyCreated();
return GetUntranslatedTableEnd(GetTable(), GetHeapEndAddress());
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTableEnd(uint8_t *table, void *heapEndAddress)
{
assert(table != nullptr);
assert(heapEndAddress != nullptr);
assert(heapEndAddress <= GetHeapEndAddress());
return ALIGN_UP(&table[GetTableByteIndex(reinterpret_cast<uint8_t *>(heapEndAddress) - 1) + 1], sizeof(size_t));
}
inline void SoftwareWriteWatch::InitializeUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress)
{
assert(GetTable() == nullptr);
SetUntranslatedTable(untranslatedTable, heapStartAddress);
}
inline void SoftwareWriteWatch::SetUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress)
{
assert(untranslatedTable != nullptr);
assert(ALIGN_DOWN(untranslatedTable, sizeof(size_t)) == untranslatedTable);
assert(heapStartAddress != nullptr);
g_gc_sw_ww_table = TranslateTableToExcludeHeapStartAddress(untranslatedTable, heapStartAddress);
}
inline void SoftwareWriteWatch::SetResizedUntranslatedTable(
uint8_t *untranslatedTable,
void *heapStartAddress,
void *heapEndAddress)
{
// The runtime needs to be suspended during this call, and background GC threads need to synchronize calls to ClearDirty()
// and GetDirty() such that they are not called concurrently with this function
VerifyCreated();
assert(untranslatedTable != nullptr);
assert(ALIGN_DOWN(untranslatedTable, sizeof(size_t)) == untranslatedTable);
assert(heapStartAddress != nullptr);
assert(heapEndAddress != nullptr);
assert(heapStartAddress <= GetHeapStartAddress());
assert(heapEndAddress >= GetHeapEndAddress());
assert(heapStartAddress < GetHeapStartAddress() || heapEndAddress > GetHeapEndAddress());
uint8_t *oldUntranslatedTable = GetUntranslatedTable();
void *oldTableHeapStartAddress = GetHeapStartAddress();
size_t oldTableByteSize = GetTableByteSize(oldTableHeapStartAddress, GetHeapEndAddress());
SetUntranslatedTable(untranslatedTable, heapStartAddress);
uint8_t *tableRegionStart = &GetTable()[GetTableByteIndex(oldTableHeapStartAddress)];
memcpy(tableRegionStart, oldUntranslatedTable, oldTableByteSize);
}
inline bool SoftwareWriteWatch::IsEnabledForGCHeap()
{
return g_gc_sw_ww_enabled_for_gc_heap;
}
inline void SoftwareWriteWatch::EnableForGCHeap()
{
// The runtime needs to be suspended during this call. This is how it currently guarantees that GC heap writes from other
// threads between calls to EnableForGCHeap() and DisableForGCHeap() will be tracked.
VerifyCreated();
assert(!IsEnabledForGCHeap());
g_gc_sw_ww_enabled_for_gc_heap = true;
WriteBarrierParameters args = {};
args.operation = WriteBarrierOp::SwitchToWriteWatch;
args.write_watch_table = g_gc_sw_ww_table;
args.is_runtime_suspended = true;
GCToEEInterface::StompWriteBarrier(&args);
}
inline void SoftwareWriteWatch::DisableForGCHeap()
{
// The runtime needs to be suspended during this call. This is how it currently guarantees that GC heap writes from other
// threads between calls to EnableForGCHeap() and DisableForGCHeap() will be tracked.
VerifyCreated();
assert(IsEnabledForGCHeap());
g_gc_sw_ww_enabled_for_gc_heap = false;
WriteBarrierParameters args = {};
args.operation = WriteBarrierOp::SwitchToNonWriteWatch;
args.is_runtime_suspended = true;
GCToEEInterface::StompWriteBarrier(&args);
}
inline void *SoftwareWriteWatch::GetHeapStartAddress()
{
return g_gc_lowest_address;
}
inline void *SoftwareWriteWatch::GetHeapEndAddress()
{
return g_gc_highest_address;
}
inline size_t SoftwareWriteWatch::GetTableByteIndex(void *address)
{
assert(address != nullptr);
size_t tableByteIndex = reinterpret_cast<size_t>(address) >> AddressToTableByteIndexShift;
assert(tableByteIndex != 0);
return tableByteIndex;
}
inline void *SoftwareWriteWatch::GetPageAddress(size_t tableByteIndex)
{
assert(tableByteIndex != 0);
void *pageAddress = reinterpret_cast<void *>(tableByteIndex << AddressToTableByteIndexShift);
assert(pageAddress >= GetHeapStartAddress());
assert(pageAddress < GetHeapEndAddress());
assert(ALIGN_DOWN(pageAddress, WRITE_WATCH_UNIT_SIZE) == pageAddress);
return pageAddress;
}
inline size_t SoftwareWriteWatch::GetTableByteSize(void *heapStartAddress, void *heapEndAddress)
{
assert(heapStartAddress != nullptr);
assert(heapEndAddress != nullptr);
assert(heapStartAddress < heapEndAddress);
size_t tableByteSize =
GetTableByteIndex(reinterpret_cast<uint8_t *>(heapEndAddress) - 1) - GetTableByteIndex(heapStartAddress) + 1;
tableByteSize = ALIGN_UP(tableByteSize, sizeof(size_t));
return tableByteSize;
}
inline uint8_t *SoftwareWriteWatch::TranslateTableToExcludeHeapStartAddress(uint8_t *table, void *heapStartAddress)
{
assert(table != nullptr);
assert(heapStartAddress != nullptr);
// Exclude the table byte index corresponding to the heap start address from the table pointer, so that each lookup in the
// table by address does not have to calculate (address - heapStartAddress)
return table - GetTableByteIndex(heapStartAddress);
}
inline void SoftwareWriteWatch::TranslateToTableRegion(
void *baseAddress,
size_t regionByteSize,
uint8_t **tableBaseAddressRef,
size_t *tableRegionByteSizeRef)
{
VerifyCreated();
VerifyMemoryRegion(baseAddress, regionByteSize);
assert(tableBaseAddressRef != nullptr);
assert(tableRegionByteSizeRef != nullptr);
size_t baseAddressTableByteIndex = GetTableByteIndex(baseAddress);
*tableBaseAddressRef = &GetTable()[baseAddressTableByteIndex];
*tableRegionByteSizeRef =
GetTableByteIndex(reinterpret_cast<uint8_t *>(baseAddress) + (regionByteSize - 1)) - baseAddressTableByteIndex + 1;
}
inline void SoftwareWriteWatch::ClearDirty(void *baseAddress, size_t regionByteSize)
{
VerifyCreated();
VerifyMemoryRegion(baseAddress, regionByteSize);
uint8_t *tableBaseAddress;
size_t tableRegionByteSize;
TranslateToTableRegion(baseAddress, regionByteSize, &tableBaseAddress, &tableRegionByteSize);
memset(tableBaseAddress, 0, tableRegionByteSize);
}
inline void SoftwareWriteWatch::SetDirty(void *address, size_t writeByteSize)
{
VerifyCreated();
VerifyMemoryRegion(address, writeByteSize);
assert(address != nullptr);
assert(writeByteSize <= sizeof(void *));
size_t tableByteIndex = GetTableByteIndex(address);
assert(GetTableByteIndex(reinterpret_cast<uint8_t *>(address) + (writeByteSize - 1)) == tableByteIndex);
uint8_t *tableByteAddress = &GetTable()[tableByteIndex];
if (*tableByteAddress == 0)
{
*tableByteAddress = 0xff;
}
}
inline void SoftwareWriteWatch::SetDirtyRegion(void *baseAddress, size_t regionByteSize)
{
VerifyCreated();
VerifyMemoryRegion(baseAddress, regionByteSize);
uint8_t *tableBaseAddress;
size_t tableRegionByteSize;
TranslateToTableRegion(baseAddress, regionByteSize, &tableBaseAddress, &tableRegionByteSize);
memset(tableBaseAddress, ~0, tableRegionByteSize);
}
#endif // !DACCESS_COMPILE
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#endif // !__SOFTWARE_WRITE_WATCH_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __SOFTWARE_WRITE_WATCH_H__
#define __SOFTWARE_WRITE_WATCH_H__
#include "gcinterface.h"
#include "gc.h"
#define WRITE_WATCH_UNIT_SIZE ((size_t)0x1000)
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#ifndef DACCESS_COMPILE
extern "C"
{
// Table containing the dirty state. This table is translated to exclude the lowest address it represents, see
// TranslateTableToExcludeHeapStartAddress.
extern uint8_t *g_gc_sw_ww_table;
// Write watch may be disabled when it is not needed (between GCs for instance). This indicates whether it is enabled.
extern bool g_gc_sw_ww_enabled_for_gc_heap;
}
class SoftwareWriteWatch
{
private:
// The granularity of dirty state in the table is one page. Dirtiness is tracked per byte of the table so that
// synchronization is not required when changing the dirty state. Shifting-right an address by the following value yields
// the byte index of the address into the write watch table. For instance,
// GetTable()[address >> AddressToTableByteIndexShift] is the byte that represents the region of memory for 'address'.
static const uint8_t AddressToTableByteIndexShift = SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift;
private:
static void VerifyCreated();
static void VerifyMemoryRegion(void *baseAddress, size_t regionByteSize);
static void VerifyMemoryRegion(void *baseAddress, size_t regionByteSize, void *heapStartAddress, void *heapEndAddress);
public:
static uint8_t *GetTable();
private:
static uint8_t *GetUntranslatedTable();
static uint8_t *GetUntranslatedTable(uint8_t *table, void *heapStartAddress);
static uint8_t *GetUntranslatedTableEnd();
static uint8_t *GetUntranslatedTableEnd(uint8_t *table, void *heapEndAddress);
public:
static void InitializeUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress);
private:
static void SetUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress);
public:
static void SetResizedUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress, void *heapEndAddress);
static bool IsEnabledForGCHeap();
static void EnableForGCHeap();
static void DisableForGCHeap();
private:
static void *GetHeapStartAddress();
static void *GetHeapEndAddress();
public:
static void StaticClose();
private:
static size_t GetTableByteIndex(void *address);
static void *GetPageAddress(size_t tableByteIndex);
public:
static size_t GetTableByteSize(void *heapStartAddress, void *heapEndAddress);
static size_t GetTableStartByteOffset(size_t byteSizeBeforeTable);
private:
static uint8_t *TranslateTableToExcludeHeapStartAddress(uint8_t *table, void *heapStartAddress);
static void TranslateToTableRegion(void *baseAddress, size_t regionByteSize, uint8_t **tableBaseAddressRef, size_t *tableRegionByteSizeRef);
public:
static void ClearDirty(void *baseAddress, size_t regionByteSize);
static void SetDirty(void *address, size_t writeByteSize);
static void SetDirtyRegion(void *baseAddress, size_t regionByteSize);
private:
static bool GetDirtyFromBlock(uint8_t *block, uint8_t *firstPageAddressInBlock, size_t startByteIndex, size_t endByteIndex, void **dirtyPages, size_t *dirtyPageIndexRef, size_t dirtyPageCount, bool clearDirty);
public:
static void GetDirty(void *baseAddress, size_t regionByteSize, void **dirtyPages, size_t *dirtyPageCountRef, bool clearDirty, bool isRuntimeSuspended);
};
inline void SoftwareWriteWatch::VerifyCreated()
{
assert(GetTable() != nullptr);
assert(GetHeapStartAddress() != nullptr);
assert(GetHeapEndAddress() != nullptr);
assert(GetHeapStartAddress() < GetHeapEndAddress());
}
inline void SoftwareWriteWatch::VerifyMemoryRegion(void *baseAddress, size_t regionByteSize)
{
VerifyMemoryRegion(baseAddress, regionByteSize, GetHeapStartAddress(), GetHeapEndAddress());
}
inline void SoftwareWriteWatch::VerifyMemoryRegion(
void *baseAddress,
size_t regionByteSize,
void *heapStartAddress,
void *heapEndAddress)
{
VerifyCreated();
assert(baseAddress != nullptr);
assert(heapStartAddress != nullptr);
assert(heapStartAddress >= GetHeapStartAddress());
assert(heapEndAddress != nullptr);
assert(heapEndAddress <= GetHeapEndAddress());
assert(baseAddress >= heapStartAddress);
assert(baseAddress < heapEndAddress);
assert(regionByteSize != 0);
assert(regionByteSize <= reinterpret_cast<size_t>(heapEndAddress) - reinterpret_cast<size_t>(baseAddress));
}
inline uint8_t *SoftwareWriteWatch::GetTable()
{
return g_gc_sw_ww_table;
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTable()
{
VerifyCreated();
return GetUntranslatedTable(GetTable(), GetHeapStartAddress());
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTable(uint8_t *table, void *heapStartAddress)
{
assert(table != nullptr);
assert(heapStartAddress != nullptr);
assert(heapStartAddress >= GetHeapStartAddress());
uint8_t *untranslatedTable = table + GetTableByteIndex(heapStartAddress);
assert(ALIGN_DOWN(untranslatedTable, sizeof(size_t)) == untranslatedTable);
return untranslatedTable;
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTableEnd()
{
VerifyCreated();
return GetUntranslatedTableEnd(GetTable(), GetHeapEndAddress());
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTableEnd(uint8_t *table, void *heapEndAddress)
{
assert(table != nullptr);
assert(heapEndAddress != nullptr);
assert(heapEndAddress <= GetHeapEndAddress());
return ALIGN_UP(&table[GetTableByteIndex(reinterpret_cast<uint8_t *>(heapEndAddress) - 1) + 1], sizeof(size_t));
}
inline void SoftwareWriteWatch::InitializeUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress)
{
assert(GetTable() == nullptr);
SetUntranslatedTable(untranslatedTable, heapStartAddress);
}
inline void SoftwareWriteWatch::SetUntranslatedTable(uint8_t *untranslatedTable, void *heapStartAddress)
{
assert(untranslatedTable != nullptr);
assert(ALIGN_DOWN(untranslatedTable, sizeof(size_t)) == untranslatedTable);
assert(heapStartAddress != nullptr);
g_gc_sw_ww_table = TranslateTableToExcludeHeapStartAddress(untranslatedTable, heapStartAddress);
}
inline void SoftwareWriteWatch::SetResizedUntranslatedTable(
uint8_t *untranslatedTable,
void *heapStartAddress,
void *heapEndAddress)
{
// The runtime needs to be suspended during this call, and background GC threads need to synchronize calls to ClearDirty()
// and GetDirty() such that they are not called concurrently with this function
VerifyCreated();
assert(untranslatedTable != nullptr);
assert(ALIGN_DOWN(untranslatedTable, sizeof(size_t)) == untranslatedTable);
assert(heapStartAddress != nullptr);
assert(heapEndAddress != nullptr);
assert(heapStartAddress <= GetHeapStartAddress());
assert(heapEndAddress >= GetHeapEndAddress());
assert(heapStartAddress < GetHeapStartAddress() || heapEndAddress > GetHeapEndAddress());
uint8_t *oldUntranslatedTable = GetUntranslatedTable();
void *oldTableHeapStartAddress = GetHeapStartAddress();
size_t oldTableByteSize = GetTableByteSize(oldTableHeapStartAddress, GetHeapEndAddress());
SetUntranslatedTable(untranslatedTable, heapStartAddress);
uint8_t *tableRegionStart = &GetTable()[GetTableByteIndex(oldTableHeapStartAddress)];
memcpy(tableRegionStart, oldUntranslatedTable, oldTableByteSize);
}
inline bool SoftwareWriteWatch::IsEnabledForGCHeap()
{
return g_gc_sw_ww_enabled_for_gc_heap;
}
inline void SoftwareWriteWatch::EnableForGCHeap()
{
// The runtime needs to be suspended during this call. This is how it currently guarantees that GC heap writes from other
// threads between calls to EnableForGCHeap() and DisableForGCHeap() will be tracked.
VerifyCreated();
assert(!IsEnabledForGCHeap());
g_gc_sw_ww_enabled_for_gc_heap = true;
WriteBarrierParameters args = {};
args.operation = WriteBarrierOp::SwitchToWriteWatch;
args.write_watch_table = g_gc_sw_ww_table;
args.is_runtime_suspended = true;
GCToEEInterface::StompWriteBarrier(&args);
}
inline void SoftwareWriteWatch::DisableForGCHeap()
{
// The runtime needs to be suspended during this call. This is how it currently guarantees that GC heap writes from other
// threads between calls to EnableForGCHeap() and DisableForGCHeap() will be tracked.
VerifyCreated();
assert(IsEnabledForGCHeap());
g_gc_sw_ww_enabled_for_gc_heap = false;
WriteBarrierParameters args = {};
args.operation = WriteBarrierOp::SwitchToNonWriteWatch;
args.is_runtime_suspended = true;
GCToEEInterface::StompWriteBarrier(&args);
}
inline void *SoftwareWriteWatch::GetHeapStartAddress()
{
return g_gc_lowest_address;
}
inline void *SoftwareWriteWatch::GetHeapEndAddress()
{
return g_gc_highest_address;
}
inline size_t SoftwareWriteWatch::GetTableByteIndex(void *address)
{
assert(address != nullptr);
size_t tableByteIndex = reinterpret_cast<size_t>(address) >> AddressToTableByteIndexShift;
assert(tableByteIndex != 0);
return tableByteIndex;
}
inline void *SoftwareWriteWatch::GetPageAddress(size_t tableByteIndex)
{
assert(tableByteIndex != 0);
void *pageAddress = reinterpret_cast<void *>(tableByteIndex << AddressToTableByteIndexShift);
assert(pageAddress >= GetHeapStartAddress());
assert(pageAddress < GetHeapEndAddress());
assert(ALIGN_DOWN(pageAddress, WRITE_WATCH_UNIT_SIZE) == pageAddress);
return pageAddress;
}
inline size_t SoftwareWriteWatch::GetTableByteSize(void *heapStartAddress, void *heapEndAddress)
{
assert(heapStartAddress != nullptr);
assert(heapEndAddress != nullptr);
assert(heapStartAddress < heapEndAddress);
size_t tableByteSize =
GetTableByteIndex(reinterpret_cast<uint8_t *>(heapEndAddress) - 1) - GetTableByteIndex(heapStartAddress) + 1;
tableByteSize = ALIGN_UP(tableByteSize, sizeof(size_t));
return tableByteSize;
}
inline uint8_t *SoftwareWriteWatch::TranslateTableToExcludeHeapStartAddress(uint8_t *table, void *heapStartAddress)
{
assert(table != nullptr);
assert(heapStartAddress != nullptr);
// Exclude the table byte index corresponding to the heap start address from the table pointer, so that each lookup in the
// table by address does not have to calculate (address - heapStartAddress)
return table - GetTableByteIndex(heapStartAddress);
}
inline void SoftwareWriteWatch::TranslateToTableRegion(
void *baseAddress,
size_t regionByteSize,
uint8_t **tableBaseAddressRef,
size_t *tableRegionByteSizeRef)
{
VerifyCreated();
VerifyMemoryRegion(baseAddress, regionByteSize);
assert(tableBaseAddressRef != nullptr);
assert(tableRegionByteSizeRef != nullptr);
size_t baseAddressTableByteIndex = GetTableByteIndex(baseAddress);
*tableBaseAddressRef = &GetTable()[baseAddressTableByteIndex];
*tableRegionByteSizeRef =
GetTableByteIndex(reinterpret_cast<uint8_t *>(baseAddress) + (regionByteSize - 1)) - baseAddressTableByteIndex + 1;
}
inline void SoftwareWriteWatch::ClearDirty(void *baseAddress, size_t regionByteSize)
{
VerifyCreated();
VerifyMemoryRegion(baseAddress, regionByteSize);
uint8_t *tableBaseAddress;
size_t tableRegionByteSize;
TranslateToTableRegion(baseAddress, regionByteSize, &tableBaseAddress, &tableRegionByteSize);
memset(tableBaseAddress, 0, tableRegionByteSize);
}
inline void SoftwareWriteWatch::SetDirty(void *address, size_t writeByteSize)
{
VerifyCreated();
VerifyMemoryRegion(address, writeByteSize);
assert(address != nullptr);
assert(writeByteSize <= sizeof(void *));
size_t tableByteIndex = GetTableByteIndex(address);
assert(GetTableByteIndex(reinterpret_cast<uint8_t *>(address) + (writeByteSize - 1)) == tableByteIndex);
uint8_t *tableByteAddress = &GetTable()[tableByteIndex];
if (*tableByteAddress == 0)
{
*tableByteAddress = 0xff;
}
}
inline void SoftwareWriteWatch::SetDirtyRegion(void *baseAddress, size_t regionByteSize)
{
VerifyCreated();
VerifyMemoryRegion(baseAddress, regionByteSize);
uint8_t *tableBaseAddress;
size_t tableRegionByteSize;
TranslateToTableRegion(baseAddress, regionByteSize, &tableBaseAddress, &tableRegionByteSize);
memset(tableBaseAddress, ~0, tableRegionByteSize);
}
#endif // !DACCESS_COMPILE
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#endif // !__SOFTWARE_WRITE_WATCH_H__
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/fwprintf/test11/test11.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test11.c
**
** Purpose: Test the unsigned int specifier (%u).
** This test is modeled after the sprintf series.
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../fwprintf.h"
/*
* Depends on memcmp, strlen, fopen, fseek and fgets.
*/
PALTEST(c_runtime_fwprintf_test11_paltest_fwprintf_test11, "c_runtime/fwprintf/test11/paltest_fwprintf_test11")
{
int neg = -42;
int pos = 42;
INT64 l = 42;
if (PAL_Initialize(argc, argv) != 0)
{
return(FAIL);
}
DoNumTest(convert("foo %u"), pos, "foo 42");
DoNumTest(convert("foo %lu"), 0xFFFF, "foo 65535");
DoNumTest(convert("foo %hu"), 0xFFFF, "foo 65535");
DoNumTest(convert("foo %Lu"), pos, "foo 42");
DoI64Test(convert("foo %I64u"), l, "42", "foo 42", "foo 42");
DoNumTest(convert("foo %3u"), pos, "foo 42");
DoNumTest(convert("foo %-3u"), pos, "foo 42 ");
DoNumTest(convert("foo %.1u"), pos, "foo 42");
DoNumTest(convert("foo %.3u"), pos, "foo 042");
DoNumTest(convert("foo %03u"), pos, "foo 042");
DoNumTest(convert("foo %#u"), pos, "foo 42");
DoNumTest(convert("foo %+u"), pos, "foo 42");
DoNumTest(convert("foo % u"), pos, "foo 42");
DoNumTest(convert("foo %+u"), neg, "foo 4294967254");
DoNumTest(convert("foo % u"), neg, "foo 4294967254");
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test11.c
**
** Purpose: Test the unsigned int specifier (%u).
** This test is modeled after the sprintf series.
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../fwprintf.h"
/*
* Depends on memcmp, strlen, fopen, fseek and fgets.
*/
PALTEST(c_runtime_fwprintf_test11_paltest_fwprintf_test11, "c_runtime/fwprintf/test11/paltest_fwprintf_test11")
{
int neg = -42;
int pos = 42;
INT64 l = 42;
if (PAL_Initialize(argc, argv) != 0)
{
return(FAIL);
}
DoNumTest(convert("foo %u"), pos, "foo 42");
DoNumTest(convert("foo %lu"), 0xFFFF, "foo 65535");
DoNumTest(convert("foo %hu"), 0xFFFF, "foo 65535");
DoNumTest(convert("foo %Lu"), pos, "foo 42");
DoI64Test(convert("foo %I64u"), l, "42", "foo 42", "foo 42");
DoNumTest(convert("foo %3u"), pos, "foo 42");
DoNumTest(convert("foo %-3u"), pos, "foo 42 ");
DoNumTest(convert("foo %.1u"), pos, "foo 42");
DoNumTest(convert("foo %.3u"), pos, "foo 042");
DoNumTest(convert("foo %03u"), pos, "foo 042");
DoNumTest(convert("foo %#u"), pos, "foo 42");
DoNumTest(convert("foo %+u"), pos, "foo 42");
DoNumTest(convert("foo % u"), pos, "foo 42");
DoNumTest(convert("foo %+u"), neg, "foo 4294967254");
DoNumTest(convert("foo % u"), neg, "foo 4294967254");
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/native/corehost/test/nativehost/get_native_search_directories_test.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "get_native_search_directories_test.h"
#include "error_writer_redirector.h"
#include "hostfxr_exports.h"
#include <error_codes.h>
bool get_native_search_directories_test::get_for_command_line(
const pal::string_t& hostfxr_path,
int argc,
const pal::char_t* argv[],
pal::stringstream_t& test_output)
{
int rc = StatusCode::Success;
hostfxr_exports hostfxr{ hostfxr_path };
error_writer_redirector errors{ hostfxr.set_error_writer };
int32_t buffer_size = 12;
if (argc > 0 && pal::strcmp(argv[0], _X("test_NullBufferWithNonZeroSize")) == 0)
{
rc = hostfxr.get_native_search_directories(argc, argv, nullptr, 1, &buffer_size);
test_output << _X("get_native_search_directories (null, 1) returned: ") << std::hex << std::showbase << rc << std::endl;
test_output << _X("buffer_size: ") << buffer_size << std::endl;
}
else if (argc > 0 && pal::strcmp(argv[0], _X("test_NonNullBufferWithNegativeSize")) == 0)
{
char_t temp_buffer[10];
rc = hostfxr.get_native_search_directories(argc, argv, temp_buffer, -1, &buffer_size);
test_output << _X("get_native_search_directories (temp_buffer, -1) returned: ") << std::hex << std::showbase << rc << std::endl;
test_output << _X("buffer_size: ") << buffer_size << std::endl;
}
else
{
rc = hostfxr.get_native_search_directories(argc, argv, nullptr, 0, &buffer_size);
if (rc != (int)StatusCode::HostApiBufferTooSmall)
{
test_output << _X("get_native_search_directories (null,0) returned unexpected error code ") << std::hex << std::showbase << rc << _X(" expected HostApiBufferTooSmall (0x80008098).") << std::endl;
test_output << _X("buffer_size: ") << buffer_size << std::endl;
goto Exit;
}
std::vector<pal::char_t> buffer;
buffer.reserve(buffer_size);
rc = hostfxr.get_native_search_directories(argc, argv, buffer.data(), buffer_size, &buffer_size);
if (rc != (int)StatusCode::Success)
{
test_output << _X("get_native_search_directories returned unexpected error code ") << std::hex << std::showbase << rc << _X(" .") << std::endl;
goto Exit;
}
pal::string_t value(buffer.data());
test_output << _X("Native search directories: '") << value.c_str() << _X("'");
}
Exit:
if (errors.has_errors())
{
test_output << _X("hostfxr reported errors:") << std::endl << errors.get_errors().c_str();
}
return rc == StatusCode::Success;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "get_native_search_directories_test.h"
#include "error_writer_redirector.h"
#include "hostfxr_exports.h"
#include <error_codes.h>
bool get_native_search_directories_test::get_for_command_line(
const pal::string_t& hostfxr_path,
int argc,
const pal::char_t* argv[],
pal::stringstream_t& test_output)
{
int rc = StatusCode::Success;
hostfxr_exports hostfxr{ hostfxr_path };
error_writer_redirector errors{ hostfxr.set_error_writer };
int32_t buffer_size = 12;
if (argc > 0 && pal::strcmp(argv[0], _X("test_NullBufferWithNonZeroSize")) == 0)
{
rc = hostfxr.get_native_search_directories(argc, argv, nullptr, 1, &buffer_size);
test_output << _X("get_native_search_directories (null, 1) returned: ") << std::hex << std::showbase << rc << std::endl;
test_output << _X("buffer_size: ") << buffer_size << std::endl;
}
else if (argc > 0 && pal::strcmp(argv[0], _X("test_NonNullBufferWithNegativeSize")) == 0)
{
char_t temp_buffer[10];
rc = hostfxr.get_native_search_directories(argc, argv, temp_buffer, -1, &buffer_size);
test_output << _X("get_native_search_directories (temp_buffer, -1) returned: ") << std::hex << std::showbase << rc << std::endl;
test_output << _X("buffer_size: ") << buffer_size << std::endl;
}
else
{
rc = hostfxr.get_native_search_directories(argc, argv, nullptr, 0, &buffer_size);
if (rc != (int)StatusCode::HostApiBufferTooSmall)
{
test_output << _X("get_native_search_directories (null,0) returned unexpected error code ") << std::hex << std::showbase << rc << _X(" expected HostApiBufferTooSmall (0x80008098).") << std::endl;
test_output << _X("buffer_size: ") << buffer_size << std::endl;
goto Exit;
}
std::vector<pal::char_t> buffer;
buffer.reserve(buffer_size);
rc = hostfxr.get_native_search_directories(argc, argv, buffer.data(), buffer_size, &buffer_size);
if (rc != (int)StatusCode::Success)
{
test_output << _X("get_native_search_directories returned unexpected error code ") << std::hex << std::showbase << rc << _X(" .") << std::endl;
goto Exit;
}
pal::string_t value(buffer.data());
test_output << _X("Native search directories: '") << value.c_str() << _X("'");
}
Exit:
if (errors.has_errors())
{
test_output << _X("hostfxr reported errors:") << std::endl << errors.get_errors().c_str();
}
return rc == StatusCode::Success;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/codegen.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This class contains all the data & functionality for code generation
// of a method, except for the target-specific elements, which are
// primarily in the Target class.
//
#ifndef _CODEGEN_H_
#define _CODEGEN_H_
#include "codegeninterface.h"
#include "compiler.h" // temporary??
#include "regset.h"
#include "jitgcinfo.h"
class CodeGen final : public CodeGenInterface
{
friend class emitter;
friend class DisAssembler;
public:
// This could use further abstraction
CodeGen(Compiler* theCompiler);
virtual void genGenerateCode(void** codePtr, uint32_t* nativeSizeOfCode);
void genGenerateMachineCode();
void genEmitMachineCode();
void genEmitUnwindDebugGCandEH();
// TODO-Cleanup: Abstract out the part of this that finds the addressing mode, and
// move it to Lower
virtual bool genCreateAddrMode(
GenTree* addr, bool fold, bool* revPtr, GenTree** rv1Ptr, GenTree** rv2Ptr, unsigned* mulPtr, ssize_t* cnsPtr);
private:
#if defined(TARGET_XARCH)
// Bit masks used in negating a float or double number.
// This is to avoid creating more than one data constant for these bitmasks when a
// method has more than one GT_NEG operation on floating point values.
CORINFO_FIELD_HANDLE negBitmaskFlt;
CORINFO_FIELD_HANDLE negBitmaskDbl;
// Bit masks used in computing Math.Abs() of a float or double number.
CORINFO_FIELD_HANDLE absBitmaskFlt;
CORINFO_FIELD_HANDLE absBitmaskDbl;
// Bit mask used in U8 -> double conversion to adjust the result.
CORINFO_FIELD_HANDLE u8ToDblBitmask;
// Generates SSE2 code for the given tree as "Operand BitWiseOp BitMask"
void genSSE2BitwiseOp(GenTree* treeNode);
// Generates SSE41 code for the given tree as a round operation
void genSSE41RoundOp(GenTreeOp* treeNode);
instruction simdAlignedMovIns()
{
// We use movaps when non-VEX because it is a smaller instruction;
// however the VEX version vmovaps would be used which is the same size as vmovdqa;
// also vmovdqa has more available CPU ports on older processors so we switch to that
return compiler->canUseVexEncoding() ? INS_movdqa : INS_movaps;
}
instruction simdUnalignedMovIns()
{
// We use movups when non-VEX because it is a smaller instruction;
// however the VEX version vmovups would be used which is the same size as vmovdqu;
// but vmovdqu has more available CPU ports on older processors so we switch to that
return compiler->canUseVexEncoding() ? INS_movdqu : INS_movups;
}
#endif // defined(TARGET_XARCH)
void genPrepForCompiler();
void genMarkLabelsForCodegen();
inline RegState* regStateForType(var_types t)
{
return varTypeUsesFloatReg(t) ? &floatRegState : &intRegState;
}
inline RegState* regStateForReg(regNumber reg)
{
return genIsValidFloatReg(reg) ? &floatRegState : &intRegState;
}
regNumber genFramePointerReg()
{
if (isFramePointerUsed())
{
return REG_FPBASE;
}
else
{
return REG_SPBASE;
}
}
static bool genShouldRoundFP();
static GenTreeIndir indirForm(var_types type, GenTree* base);
static GenTreeStoreInd storeIndirForm(var_types type, GenTree* base, GenTree* data);
GenTreeIntCon intForm(var_types type, ssize_t value);
void genRangeCheck(GenTree* node);
void genLockedInstructions(GenTreeOp* node);
#ifdef TARGET_XARCH
void genCodeForLockAdd(GenTreeOp* node);
#endif
#ifdef REG_OPT_RSVD
// On some targets such as the ARM we may need to have an extra reserved register
// that is used when addressing stack based locals and stack based temps.
// This method returns the regNumber that should be used when an extra register
// is needed to access the stack based locals and stack based temps.
//
regNumber rsGetRsvdReg()
{
// We should have already added this register to the mask
// of reserved registers in regSet.rdMaskResvd
noway_assert((regSet.rsMaskResvd & RBM_OPT_RSVD) != 0);
return REG_OPT_RSVD;
}
#endif // REG_OPT_RSVD
//-------------------------------------------------------------------------
bool genUseBlockInit; // true if we plan to block-initialize the local stack frame
unsigned genInitStkLclCnt; // The count of local variables that we need to zero init
void SubtractStackLevel(unsigned adjustment)
{
assert(genStackLevel >= adjustment);
unsigned newStackLevel = genStackLevel - adjustment;
if (genStackLevel != newStackLevel)
{
JITDUMP("Adjusting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
void AddStackLevel(unsigned adjustment)
{
unsigned newStackLevel = genStackLevel + adjustment;
if (genStackLevel != newStackLevel)
{
JITDUMP("Adjusting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
void SetStackLevel(unsigned newStackLevel)
{
if (genStackLevel != newStackLevel)
{
JITDUMP("Setting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
//-------------------------------------------------------------------------
void genReportEH();
// Allocates storage for the GC info, writes the GC info into that storage, records the address of the
// GC info of the method with the EE, and returns a pointer to the "info" portion (just post-header) of
// the GC info. Requires "codeSize" to be the size of the generated code, "prologSize" and "epilogSize"
// to be the sizes of the prolog and epilog, respectively. In DEBUG, makes a check involving the
// "codePtr", assumed to be a pointer to the start of the generated code.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef JIT32_GCENCODER
void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
void* genCreateAndStoreGCInfoJIT32(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr));
#else // !JIT32_GCENCODER
void genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
void genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr));
#endif // !JIT32_GCENCODER
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
// the current (pending) label ref, a label which has been referenced but not yet seen
BasicBlock* genPendingCallLabel;
void** codePtr;
uint32_t* nativeSizeOfCode;
unsigned codeSize;
void* coldCodePtr;
void* consPtr;
#ifdef DEBUG
// Last instr we have displayed for dspInstrs
unsigned genCurDispOffset;
static const char* genInsName(instruction ins);
const char* genInsDisplayName(emitter::instrDesc* id);
static const char* genSizeStr(emitAttr size);
#endif // DEBUG
void genInitialize();
void genInitializeRegisterState();
void genCodeForBBlist();
public:
void genSpillVar(GenTree* tree);
protected:
void genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTarget = REG_NA);
void genGCWriteBarrier(GenTree* tgt, GCInfo::WriteBarrierForm wbf);
BasicBlock* genCreateTempLabel();
private:
void genLogLabel(BasicBlock* bb);
protected:
void genDefineTempLabel(BasicBlock* label);
void genDefineInlineTempLabel(BasicBlock* label);
void genAdjustStackLevel(BasicBlock* block);
void genExitCode(BasicBlock* block);
void genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, BasicBlock* failBlk = nullptr);
void genCheckOverflow(GenTree* tree);
//-------------------------------------------------------------------------
//
// Prolog/epilog generation
//
//-------------------------------------------------------------------------
unsigned prologSize;
unsigned epilogSize;
//
// Prolog functions and data (there are a few exceptions for more generally used things)
//
void genEstablishFramePointer(int delta, bool reportUnwindData);
void genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState);
void genEnregisterIncomingStackArgs();
#if defined(TARGET_ARM64)
void genEnregisterOSRArgsAndLocals(regNumber initReg, bool* pInitRegZeroed);
#else
void genEnregisterOSRArgsAndLocals();
#endif
void genCheckUseBlockInit();
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void genClearStackVec3ArgUpperBits();
#endif // UNIX_AMD64_ABI && FEATURE_SIMD
#if defined(TARGET_ARM64)
bool genInstrWithConstant(instruction ins,
emitAttr attr,
regNumber reg1,
regNumber reg2,
ssize_t imm,
regNumber tmpReg,
bool inUnwindRegion = false);
void genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg, bool* pTmpRegIsZero, bool reportUnwindData);
void genPrologSaveRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero);
void genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
void genEpilogRestoreRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero);
void genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
// A simple struct to keep register pairs for prolog and epilog.
struct RegPair
{
regNumber reg1;
regNumber reg2;
bool useSaveNextPair;
RegPair(regNumber reg1) : reg1(reg1), reg2(REG_NA), useSaveNextPair(false)
{
}
RegPair(regNumber reg1, regNumber reg2) : reg1(reg1), reg2(reg2), useSaveNextPair(false)
{
assert(reg2 == REG_NEXT(reg1));
}
};
static void genBuildRegPairsStack(regMaskTP regsMask, ArrayStack<RegPair>* regStack);
static void genSetUseSaveNextPairs(ArrayStack<RegPair>* regStack);
static int genGetSlotSizeForRegsInMask(regMaskTP regsMask);
void genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset);
void genRestoreCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset);
void genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta);
void genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta);
void genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed);
#else
void genPushCalleeSavedRegisters();
#endif
#if defined(TARGET_AMD64)
void genOSRRecordTier0CalleeSavedRegistersAndFrame();
void genOSRSaveRemainingCalleeSavedRegisters();
#endif // TARGET_AMD64
void genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn);
void genPoisonFrame(regMaskTP bbRegLiveIn);
#if defined(TARGET_ARM)
bool genInstrWithConstant(
instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insFlags flags, regNumber tmpReg);
bool genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg);
void genPushFltRegs(regMaskTP regMask);
void genPopFltRegs(regMaskTP regMask);
regMaskTP genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat);
regMaskTP genJmpCallArgMask();
void genFreeLclFrame(unsigned frameSize,
/* IN OUT */ bool* pUnwindStarted);
void genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg);
void genMov32RelocatableDataLabel(unsigned value, regNumber reg);
void genMov32RelocatableImmediate(emitAttr size, BYTE* addr, regNumber reg);
bool genUsedPopToReturn; // True if we use the pop into PC to return,
// False if we didn't and must branch to LR to return.
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of registers saved in the funclet prolog (includes LR)
unsigned fiFunctionCallerSPtoFPdelta; // Delta between caller SP and the frame pointer
unsigned fiSpDelta; // Stack pointer delta
unsigned fiPSP_slot_SP_offset; // PSP slot offset from SP
int fiPSP_slot_CallerSP_offset; // PSP slot offset from Caller SP
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_ARM64)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of callee-saved registers saved in the funclet prolog (includes LR)
int fiFunction_CallerSP_to_FP_delta; // Delta between caller SP and the frame pointer in the parent function
// (negative)
int fiSP_to_FPLR_save_delta; // FP/LR register save offset from SP (positive)
int fiSP_to_PSP_slot_delta; // PSP slot offset from SP (positive)
int fiSP_to_CalleeSave_delta; // First callee-saved register slot offset from SP (positive)
int fiCallerSP_to_PSP_slot_delta; // PSP slot offset from Caller SP (negative)
int fiFrameType; // Funclet frame types are numbered. See genFuncletProlog() for details.
int fiSpDelta1; // Stack pointer delta 1 (negative)
int fiSpDelta2; // Stack pointer delta 2 (negative)
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_AMD64)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
unsigned fiFunction_InitialSP_to_FP_delta; // Delta between Initial-SP and the frame pointer
unsigned fiSpDelta; // Stack pointer delta
int fiPSP_slot_InitialSP_offset; // PSP slot offset from Initial-SP
};
FuncletFrameInfoDsc genFuncletInfo;
#endif // TARGET_AMD64
#if defined(TARGET_XARCH)
// Save/Restore callee saved float regs to stack
void genPreserveCalleeSavedFltRegs(unsigned lclFrameSize);
void genRestoreCalleeSavedFltRegs(unsigned lclFrameSize);
// Generate VZeroupper instruction to avoid AVX/SSE transition penalty
void genVzeroupperIfNeeded(bool check256bitOnly = true);
#endif // TARGET_XARCH
void genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& initDblRegs, const regNumber& initReg);
regNumber genGetZeroReg(regNumber initReg, bool* pInitRegZeroed);
void genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
void genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
void genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed);
void genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed);
void genFinalizeFrame();
#ifdef PROFILING_SUPPORTED
void genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed);
void genProfilingLeaveCallback(unsigned helper);
#endif // PROFILING_SUPPORTED
// clang-format off
void genEmitCall(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
void* addr
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
regNumber base,
bool isJump);
// clang-format on
// clang-format off
void genEmitCallIndir(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
GenTreeIndir* indir
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
bool isJump);
// clang-format on
//
// Epilog functions
//
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog);
#endif
#if defined(TARGET_ARM64)
void genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog);
#else // !defined(TARGET_ARM64)
void genPopCalleeSavedRegisters(bool jmpEpilog = false);
#if defined(TARGET_XARCH)
unsigned genPopCalleeSavedRegistersFromMask(regMaskTP rsPopRegs);
#endif // !defined(TARGET_XARCH)
#endif // !defined(TARGET_ARM64)
//
// Common or driving functions
//
void genReserveProlog(BasicBlock* block); // currently unused
void genReserveEpilog(BasicBlock* block);
void genFnProlog();
void genFnEpilog(BasicBlock* block);
#if defined(FEATURE_EH_FUNCLETS)
void genReserveFuncletProlog(BasicBlock* block);
void genReserveFuncletEpilog(BasicBlock* block);
void genFuncletProlog(BasicBlock* block);
void genFuncletEpilog();
void genCaptureFuncletPrologEpilogInfo();
/*-----------------------------------------------------------------------------
*
* Set the main function PSPSym value in the frame.
* Funclets use different code to load the PSP sym and save it in their frame.
* See the document "CLR ABI.md" for a full description of the PSPSym.
* The PSPSym section of that document is copied here.
*
***********************************
* The name PSPSym stands for Previous Stack Pointer Symbol. It is how a funclet
* accesses locals from the main function body.
*
* First, two definitions.
*
* Caller-SP is the value of the stack pointer in a function's caller before the call
* instruction is executed. That is, when function A calls function B, Caller-SP for B
* is the value of the stack pointer immediately before the call instruction in A
* (calling B) was executed. Note that this definition holds for both AMD64, which
* pushes the return value when a call instruction is executed, and for ARM, which
* doesn't. For AMD64, Caller-SP is the address above the call return address.
*
* Initial-SP is the initial value of the stack pointer after the fixed-size portion of
* the frame has been allocated. That is, before any "alloca"-type allocations.
*
* The PSPSym is a pointer-sized local variable in the frame of the main function and
* of each funclet. The value stored in PSPSym is the value of Initial-SP/Caller-SP
* for the main function. The stack offset of the PSPSym is reported to the VM in the
* GC information header. The value reported in the GC information is the offset of the
* PSPSym from Initial-SP/Caller-SP. (Note that both the value stored, and the way the
* value is reported to the VM, differs between architectures. In particular, note that
* most things in the GC information header are reported as offsets relative to Caller-SP,
* but PSPSym on AMD64 is one (maybe the only) exception.)
*
* The VM uses the PSPSym to find other locals it cares about (such as the generics context
* in a funclet frame). The JIT uses it to re-establish the frame pointer register, so that
* the frame pointer is the same value in a funclet as it is in the main function body.
*
* When a funclet is called, it is passed the Establisher Frame Pointer. For AMD64 this is
* true for all funclets and it is passed as the first argument in RCX, but for ARM this is
* only true for first pass funclets (currently just filters) and it is passed as the second
* argument in R1. The Establisher Frame Pointer is a stack pointer of an interesting "parent"
* frame in the exception processing system. For the CLR, it points either to the main function
* frame or a dynamically enclosing funclet frame from the same function, for the funclet being
* invoked. The value of the Establisher Frame Pointer is Initial-SP on AMD64, Caller-SP on ARM.
*
* Using the establisher frame, the funclet wants to load the value of the PSPSym. Since we
* don't know if the Establisher Frame is from the main function or a funclet, we design the
* main function and funclet frame layouts to place the PSPSym at an identical, small, constant
* offset from the Establisher Frame in each case. (This is also required because we only report
* a single offset to the PSPSym in the GC information, and that offset must be valid for the main
* function and all of its funclets). Then, the funclet uses this known offset to compute the
* PSPSym address and read its value. From this, it can compute the value of the frame pointer
* (which is a constant offset from the PSPSym value) and set the frame register to be the same
* as the parent function. Also, the funclet writes the value of the PSPSym to its own frame's
* PSPSym. This "copying" of the PSPSym happens for every funclet invocation, in particular,
* for every nested funclet invocation.
*
* On ARM, for all second pass funclets (finally, fault, catch, and filter-handler) the VM
* restores all non-volatile registers to their values within the parent frame. This includes
* the frame register (R11). Thus, the PSPSym is not used to recompute the frame pointer register
* in this case, though the PSPSym is copied to the funclet's frame, as for all funclets.
*
* Catch, Filter, and Filter-handlers also get an Exception object (GC ref) as an argument
* (REG_EXCEPTION_OBJECT). On AMD64 it is the second argument and thus passed in RDX. On
* ARM this is the first argument and passed in R0.
*
* (Note that the JIT64 source code contains a comment that says, "The current CLR doesn't always
* pass the correct establisher frame to the funclet. Funclet may receive establisher frame of
* funclet when expecting that of original routine." It indicates this is the reason that a PSPSym
* is required in all funclets as well as the main function, whereas if the establisher frame was
* correctly reported, the PSPSym could be omitted in some cases.)
***********************************
*/
void genSetPSPSym(regNumber initReg, bool* pInitRegZeroed);
void genUpdateCurrentFunclet(BasicBlock* block);
#if defined(TARGET_ARM)
void genInsertNopForUnwinder(BasicBlock* block);
#endif
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
void genUpdateCurrentFunclet(BasicBlock* block)
{
return;
}
#endif // !FEATURE_EH_FUNCLETS
void genGeneratePrologsAndEpilogs();
#if defined(DEBUG) && defined(TARGET_ARM64)
void genArm64EmitterUnitTests();
#endif
#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
void genAmd64EmitterUnitTests();
#endif
#ifdef TARGET_ARM64
virtual void SetSaveFpLrWithAllCalleeSavedRegisters(bool value);
virtual bool IsSaveFpLrWithAllCalleeSavedRegisters() const;
bool genSaveFpLrWithAllCalleeSavedRegisters;
#endif // TARGET_ARM64
//-------------------------------------------------------------------------
//
// End prolog/epilog generation
//
//-------------------------------------------------------------------------
void genSinglePush();
void genSinglePop();
regMaskTP genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs);
void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Debugging Support XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#ifdef DEBUG
void genIPmappingDisp(unsigned mappingNum, IPmappingDsc* ipMapping);
void genIPmappingListDisp();
#endif // DEBUG
void genIPmappingAdd(IPmappingDscKind kind, const DebugInfo& di, bool isLabel);
void genIPmappingAddToFront(IPmappingDscKind kind, const DebugInfo& di, bool isLabel);
void genIPmappingGen();
#ifdef DEBUG
void genDumpPreciseDebugInfo();
void genDumpPreciseDebugInfoInlineTree(FILE* file, InlineContext* context, bool* first);
void genAddPreciseIPMappingHere(const DebugInfo& di);
#endif
void genEnsureCodeEmitted(const DebugInfo& di);
//-------------------------------------------------------------------------
// scope info for the variables
void genSetScopeInfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
unsigned LVnum,
bool avail,
siVarLoc* varLoc);
void genSetScopeInfo();
#ifdef USING_VARIABLE_LIVE_RANGE
// Send VariableLiveRanges as debug info to the debugger
void genSetScopeInfoUsingVariableRanges();
#endif // USING_VARIABLE_LIVE_RANGE
#ifdef USING_SCOPE_INFO
void genSetScopeInfoUsingsiScope();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ScopeInfo XX
XX XX
XX Keeps track of the scopes during code-generation. XX
XX This is used to translate the local-variable debugging information XX
XX from IL offsets to native code offsets. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
/*****************************************************************************
* ScopeInfo
*
* This class is called during code gen at block-boundaries, and when the
* set of live variables changes. It keeps track of the scope of the variables
* in terms of the native code PC.
*/
#endif // USING_SCOPE_INFO
public:
void siInit();
void checkICodeDebugInfo();
// The logic used to report debug info on debug code is the same for ScopeInfo and
// VariableLiveRange
void siBeginBlock(BasicBlock* block);
void siEndBlock(BasicBlock* block);
// VariableLiveRange and siScope needs this method to report variables on debug code
void siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned int lastBlockILEndOffset);
protected:
#if defined(FEATURE_EH_FUNCLETS)
bool siInFuncletRegion; // Have we seen the start of the funclet region?
#endif // FEATURE_EH_FUNCLETS
IL_OFFSET siLastEndOffs; // IL offset of the (exclusive) end of the last block processed
#ifdef USING_SCOPE_INFO
public:
// Closes the "ScopeInfo" of the tracked variables that has become dead.
virtual void siUpdate();
void siCheckVarScope(unsigned varNum, IL_OFFSET offs);
void siCloseAllOpenScopes();
#ifdef DEBUG
void siDispOpenScopes();
#endif
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
struct siScope
{
emitLocation scStartLoc; // emitter location of start of scope
emitLocation scEndLoc; // emitter location of end of scope
unsigned scVarNum; // index into lvaTable
unsigned scLVnum; // 'which' in eeGetLVinfo()
unsigned scStackLevel; // Only for stk-vars
siScope* scPrev;
siScope* scNext;
};
// Returns a "siVarLoc" instance representing the place where the variable lives base on
// varDsc and scope description.
CodeGenInterface::siVarLoc getSiVarLoc(const LclVarDsc* varDsc, const siScope* scope) const;
siScope siOpenScopeList, siScopeList, *siOpenScopeLast, *siScopeLast;
unsigned siScopeCnt;
VARSET_TP siLastLife; // Life at last call to siUpdate()
// Tracks the last entry for each tracked register variable
siScope** siLatestTrackedScopes;
// Functions
siScope* siNewScope(unsigned LVnum, unsigned varNum);
void siRemoveFromOpenScopeList(siScope* scope);
void siEndTrackedScope(unsigned varIndex);
void siEndScope(unsigned varNum);
void siEndScope(siScope* scope);
#ifdef DEBUG
bool siVerifyLocalVarTab();
#endif
#ifdef LATE_DISASM
public:
/* virtual */
const char* siRegVarName(size_t offs, size_t size, unsigned reg);
/* virtual */
const char* siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs);
#endif // LATE_DISASM
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX PrologScopeInfo XX
XX XX
XX We need special handling in the prolog block, as the parameter variables XX
XX may not be in the same position described by genLclVarTable - they all XX
XX start out on the stack XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#endif // USING_SCOPE_INFO
public:
void psiBegProlog();
void psiEndProlog();
#ifdef USING_SCOPE_INFO
void psiAdjustStackLevel(unsigned size);
// For EBP-frames, the parameters are accessed via ESP on entry to the function,
// but via EBP right after a "mov ebp,esp" instruction.
void psiMoveESPtoEBP();
// Close previous psiScope and open a new one on the location described by the registers.
void psiMoveToReg(unsigned varNum, regNumber reg = REG_NA, regNumber otherReg = REG_NA);
// Search the open "psiScope" of the "varNum" parameter, close it and open
// a new one using "LclVarDsc" fields.
void psiMoveToStack(unsigned varNum);
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
struct psiScope
{
emitLocation scStartLoc; // emitter location of start of scope
emitLocation scEndLoc; // emitter location of end of scope
unsigned scSlotNum; // index into lclVarTab
unsigned scLVnum; // 'which' in eeGetLVinfo()
bool scRegister;
union {
struct
{
regNumberSmall scRegNum;
// Used for:
// - "other half" of long var on architectures with 32 bit size registers - x86.
// - for System V structs it stores the second register
// used to pass a register passed struct.
regNumberSmall scOtherReg;
} u1;
struct
{
regNumberSmall scBaseReg;
NATIVE_OFFSET scOffset;
} u2;
};
psiScope* scPrev;
psiScope* scNext;
// Returns a "siVarLoc" instance representing the place where the variable lives base on
// psiScope properties.
CodeGenInterface::siVarLoc getSiVarLoc() const;
};
psiScope psiOpenScopeList, psiScopeList, *psiOpenScopeLast, *psiScopeLast;
unsigned psiScopeCnt;
// Implementation Functions
psiScope* psiNewPrologScope(unsigned LVnum, unsigned slotNum);
void psiEndPrologScope(psiScope* scope);
void psiSetScopeOffset(psiScope* newScope, const LclVarDsc* lclVarDsc) const;
#endif // USING_SCOPE_INFO
NATIVE_OFFSET psiGetVarStackOffset(const LclVarDsc* lclVarDsc) const;
/*****************************************************************************
* TrnslLocalVarInfo
*
* This struct holds the LocalVarInfo in terms of the generated native code
* after a call to genSetScopeInfo()
*/
protected:
#ifdef DEBUG
struct TrnslLocalVarInfo
{
unsigned tlviVarNum;
unsigned tlviLVnum;
VarName tlviName;
UNATIVE_OFFSET tlviStartPC;
size_t tlviLength;
bool tlviAvailable;
siVarLoc tlviVarLoc;
};
// Array of scopes of LocalVars in terms of native code
TrnslLocalVarInfo* genTrnslLocalVarInfo;
unsigned genTrnslLocalVarCount;
#endif
void genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree);
void genCodeForTreeNode(GenTree* treeNode);
void genCodeForBinary(GenTreeOp* treeNode);
#if defined(TARGET_X86)
void genCodeForLongUMod(GenTreeOp* node);
#endif // TARGET_X86
void genCodeForDivMod(GenTreeOp* treeNode);
void genCodeForMul(GenTreeOp* treeNode);
void genCodeForIncSaturate(GenTree* treeNode);
void genCodeForMulHi(GenTreeOp* treeNode);
void genLeaInstruction(GenTreeAddrMode* lea);
void genSetRegToCond(regNumber dstReg, GenTree* tree);
#if defined(TARGET_ARMARCH)
void genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale);
void genCodeForMulLong(GenTreeOp* mul);
#endif // TARGET_ARMARCH
#if !defined(TARGET_64BIT)
void genLongToIntCast(GenTree* treeNode);
#endif
// Generate code for a GT_BITCAST that is not contained.
void genCodeForBitCast(GenTreeOp* treeNode);
// Generate the instruction to move a value between register files
void genBitCast(var_types targetType, regNumber targetReg, var_types srcType, regNumber srcReg);
struct GenIntCastDesc
{
enum CheckKind
{
CHECK_NONE,
CHECK_SMALL_INT_RANGE,
CHECK_POSITIVE,
#ifdef TARGET_64BIT
CHECK_UINT_RANGE,
CHECK_POSITIVE_INT_RANGE,
CHECK_INT_RANGE,
#endif
};
enum ExtendKind
{
COPY,
ZERO_EXTEND_SMALL_INT,
SIGN_EXTEND_SMALL_INT,
#ifdef TARGET_64BIT
ZERO_EXTEND_INT,
SIGN_EXTEND_INT,
#endif
};
private:
CheckKind m_checkKind;
unsigned m_checkSrcSize;
int m_checkSmallIntMin;
int m_checkSmallIntMax;
ExtendKind m_extendKind;
unsigned m_extendSrcSize;
public:
GenIntCastDesc(GenTreeCast* cast);
CheckKind CheckKind() const
{
return m_checkKind;
}
unsigned CheckSrcSize() const
{
assert(m_checkKind != CHECK_NONE);
return m_checkSrcSize;
}
int CheckSmallIntMin() const
{
assert(m_checkKind == CHECK_SMALL_INT_RANGE);
return m_checkSmallIntMin;
}
int CheckSmallIntMax() const
{
assert(m_checkKind == CHECK_SMALL_INT_RANGE);
return m_checkSmallIntMax;
}
ExtendKind ExtendKind() const
{
return m_extendKind;
}
unsigned ExtendSrcSize() const
{
return m_extendSrcSize;
}
};
void genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg);
void genIntToIntCast(GenTreeCast* cast);
void genFloatToFloatCast(GenTree* treeNode);
void genFloatToIntCast(GenTree* treeNode);
void genIntToFloatCast(GenTree* treeNode);
void genCkfinite(GenTree* treeNode);
void genCodeForCompare(GenTreeOp* tree);
void genIntrinsic(GenTree* treeNode);
void genPutArgStk(GenTreePutArgStk* treeNode);
void genPutArgReg(GenTreeOp* tree);
#if FEATURE_ARG_SPLIT
void genPutArgSplit(GenTreePutArgSplit* treeNode);
#endif // FEATURE_ARG_SPLIT
#if defined(TARGET_XARCH)
unsigned getBaseVarForPutArgStk(GenTree* treeNode);
#endif // TARGET_XARCH
unsigned getFirstArgWithStackSlot();
void genCompareFloat(GenTree* treeNode);
void genCompareInt(GenTree* treeNode);
#ifdef FEATURE_SIMD
enum SIMDScalarMoveType{
SMT_ZeroInitUpper, // zero initlaize target upper bits
SMT_ZeroInitUpper_SrcHasUpperZeros, // zero initialize target upper bits; source upper bits are known to be zero
SMT_PreserveUpper // preserve target upper bits
};
#ifdef TARGET_ARM64
insOpts genGetSimdInsOpt(emitAttr size, var_types elementType);
#endif
instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival = nullptr);
void genSIMDScalarMove(
var_types targetType, var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode);
void genSIMDLo64BitConvert(SIMDIntrinsicID intrinsicID,
var_types simdType,
var_types baseType,
regNumber tmpReg,
regNumber tmpIntReg,
regNumber targetReg);
void genSIMDIntrinsic32BitConvert(GenTreeSIMD* simdNode);
void genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode);
void genSIMDExtractUpperHalf(GenTreeSIMD* simdNode, regNumber srcReg, regNumber tgtReg);
void genSIMDIntrinsic(GenTreeSIMD* simdNode);
// TYP_SIMD12 (i.e Vector3 of size 12 bytes) is not a hardware supported size and requires
// two reads/writes on 64-bit targets. These routines abstract reading/writing of Vector3
// values through an indirection. Note that Vector3 locals allocated on stack would have
// their size rounded to TARGET_POINTER_SIZE (which is 8 bytes on 64-bit targets) and hence
// Vector3 locals could be treated as TYP_SIMD16 while reading/writing.
void genStoreIndTypeSIMD12(GenTree* treeNode);
void genLoadIndTypeSIMD12(GenTree* treeNode);
void genStoreLclTypeSIMD12(GenTree* treeNode);
void genLoadLclTypeSIMD12(GenTree* treeNode);
#ifdef TARGET_X86
void genStoreSIMD12ToStack(regNumber operandReg, regNumber tmpReg);
void genPutArgStkSIMD12(GenTree* treeNode);
#endif // TARGET_X86
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
void genHWIntrinsic(GenTreeHWIntrinsic* node);
#if defined(TARGET_XARCH)
void genHWIntrinsic_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber reg, GenTree* rmOp);
void genHWIntrinsic_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, int8_t ival);
void genHWIntrinsic_R_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr);
void genHWIntrinsic_R_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTree* op2);
void genHWIntrinsic_R_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, int8_t ival);
void genHWIntrinsic_R_R_RM_R(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr);
void genHWIntrinsic_R_R_R_RM(
instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, GenTree* op3);
void genBaseIntrinsic(GenTreeHWIntrinsic* node);
void genX86BaseIntrinsic(GenTreeHWIntrinsic* node);
void genSSEIntrinsic(GenTreeHWIntrinsic* node);
void genSSE2Intrinsic(GenTreeHWIntrinsic* node);
void genSSE41Intrinsic(GenTreeHWIntrinsic* node);
void genSSE42Intrinsic(GenTreeHWIntrinsic* node);
void genAvxOrAvx2Intrinsic(GenTreeHWIntrinsic* node);
void genAESIntrinsic(GenTreeHWIntrinsic* node);
void genBMI1OrBMI2Intrinsic(GenTreeHWIntrinsic* node);
void genFMAIntrinsic(GenTreeHWIntrinsic* node);
void genLZCNTIntrinsic(GenTreeHWIntrinsic* node);
void genPCLMULQDQIntrinsic(GenTreeHWIntrinsic* node);
void genPOPCNTIntrinsic(GenTreeHWIntrinsic* node);
void genXCNTIntrinsic(GenTreeHWIntrinsic* node, instruction ins);
template <typename HWIntrinsicSwitchCaseBody>
void genHWIntrinsicJumpTableFallback(NamedIntrinsic intrinsic,
regNumber nonConstImmReg,
regNumber baseReg,
regNumber offsReg,
HWIntrinsicSwitchCaseBody emitSwCase);
#endif // defined(TARGET_XARCH)
#ifdef TARGET_ARM64
class HWIntrinsicImmOpHelper final
{
public:
HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTree* immOp, GenTreeHWIntrinsic* intrin);
void EmitBegin();
void EmitCaseEnd();
// Returns true after the last call to EmitCaseEnd() (i.e. this signals that code generation is done).
bool Done() const
{
return (immValue > immUpperBound);
}
// Returns a value of the immediate operand that should be used for a case.
int ImmValue() const
{
return immValue;
}
private:
// Returns true if immOp is non contained immediate (i.e. the value of the immediate operand is enregistered in
// nonConstImmReg).
bool NonConstImmOp() const
{
return nonConstImmReg != REG_NA;
}
// Returns true if a non constant immediate operand can be either 0 or 1.
bool TestImmOpZeroOrOne() const
{
assert(NonConstImmOp());
return (immLowerBound == 0) && (immUpperBound == 1);
}
emitter* GetEmitter() const
{
return codeGen->GetEmitter();
}
CodeGen* const codeGen;
BasicBlock* endLabel;
BasicBlock* nonZeroLabel;
int immValue;
int immLowerBound;
int immUpperBound;
regNumber nonConstImmReg;
regNumber branchTargetReg;
};
#endif // TARGET_ARM64
#endif // FEATURE_HW_INTRINSICS
#if !defined(TARGET_64BIT)
// CodeGen for Long Ints
void genStoreLongLclVar(GenTree* treeNode);
#endif // !defined(TARGET_64BIT)
// Do liveness update for register produced by the current node in codegen after
// code has been emitted for it.
void genProduceReg(GenTree* tree);
void genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum);
void genUnspillLocal(
unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum, bool reSpill, bool isLastUse);
void genUnspillRegIfNeeded(GenTree* tree);
void genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex);
regNumber genConsumeReg(GenTree* tree);
regNumber genConsumeReg(GenTree* tree, unsigned multiRegIndex);
void genCopyRegIfNeeded(GenTree* tree, regNumber needReg);
void genConsumeRegAndCopy(GenTree* tree, regNumber needReg);
void genConsumeIfReg(GenTree* tree)
{
if (!tree->isContained())
{
(void)genConsumeReg(tree);
}
}
void genRegCopy(GenTree* tree);
regNumber genRegCopy(GenTree* tree, unsigned multiRegIndex);
void genTransferRegGCState(regNumber dst, regNumber src);
void genConsumeAddress(GenTree* addr);
void genConsumeAddrMode(GenTreeAddrMode* mode);
void genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg);
void genConsumeBlockSrc(GenTreeBlk* blkNode);
void genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg);
void genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
#ifdef FEATURE_PUT_STRUCT_ARG_STK
void genConsumePutStructArgStk(GenTreePutArgStk* putArgStkNode,
regNumber dstReg,
regNumber srcReg,
regNumber sizeReg);
#endif // FEATURE_PUT_STRUCT_ARG_STK
#if FEATURE_ARG_SPLIT
void genConsumeArgSplitStruct(GenTreePutArgSplit* putArgNode);
#endif // FEATURE_ARG_SPLIT
void genConsumeRegs(GenTree* tree);
void genConsumeOperands(GenTreeOp* tree);
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void genConsumeMultiOpOperands(GenTreeMultiOp* tree);
#endif
void genEmitGSCookieCheck(bool pushReg);
void genCodeForShift(GenTree* tree);
#if defined(TARGET_X86) || defined(TARGET_ARM)
void genCodeForShiftLong(GenTree* tree);
#endif
#ifdef TARGET_XARCH
void genCodeForShiftRMW(GenTreeStoreInd* storeInd);
void genCodeForBT(GenTreeOp* bt);
#endif // TARGET_XARCH
void genCodeForCast(GenTreeOp* tree);
void genCodeForLclAddr(GenTree* tree);
void genCodeForIndexAddr(GenTreeIndexAddr* tree);
void genCodeForIndir(GenTreeIndir* tree);
void genCodeForNegNot(GenTree* tree);
void genCodeForBswap(GenTree* tree);
void genCodeForLclVar(GenTreeLclVar* tree);
void genCodeForLclFld(GenTreeLclFld* tree);
void genCodeForStoreLclFld(GenTreeLclFld* tree);
void genCodeForStoreLclVar(GenTreeLclVar* tree);
void genCodeForReturnTrap(GenTreeOp* tree);
void genCodeForJcc(GenTreeCC* tree);
void genCodeForSetcc(GenTreeCC* setcc);
void genCodeForStoreInd(GenTreeStoreInd* tree);
void genCodeForSwap(GenTreeOp* tree);
void genCodeForCpObj(GenTreeObj* cpObjNode);
void genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode);
void genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode);
#ifndef TARGET_X86
void genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode);
#endif
void genCodeForPhysReg(GenTreePhysReg* tree);
void genCodeForNullCheck(GenTreeIndir* tree);
void genCodeForCmpXchg(GenTreeCmpXchg* tree);
void genAlignStackBeforeCall(GenTreePutArgStk* putArgStk);
void genAlignStackBeforeCall(GenTreeCall* call);
void genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias = 0);
#if defined(UNIX_X86_ABI)
unsigned curNestedAlignment; // Keep track of alignment adjustment required during codegen.
unsigned maxNestedAlignment; // The maximum amount of alignment adjustment required.
void SubtractNestedAlignment(unsigned adjustment)
{
assert(curNestedAlignment >= adjustment);
unsigned newNestedAlignment = curNestedAlignment - adjustment;
if (curNestedAlignment != newNestedAlignment)
{
JITDUMP("Adjusting stack nested alignment from %d to %d\n", curNestedAlignment, newNestedAlignment);
}
curNestedAlignment = newNestedAlignment;
}
void AddNestedAlignment(unsigned adjustment)
{
unsigned newNestedAlignment = curNestedAlignment + adjustment;
if (curNestedAlignment != newNestedAlignment)
{
JITDUMP("Adjusting stack nested alignment from %d to %d\n", curNestedAlignment, newNestedAlignment);
}
curNestedAlignment = newNestedAlignment;
if (curNestedAlignment > maxNestedAlignment)
{
JITDUMP("Max stack nested alignment changed from %d to %d\n", maxNestedAlignment, curNestedAlignment);
maxNestedAlignment = curNestedAlignment;
}
}
#endif
#ifndef TARGET_X86
void genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArgVarNum);
#endif // !TARGET_X86
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef TARGET_X86
bool genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk);
void genPushReg(var_types type, regNumber srcReg);
void genPutArgStkFieldList(GenTreePutArgStk* putArgStk);
#endif // TARGET_X86
void genPutStructArgStk(GenTreePutArgStk* treeNode);
unsigned genMove8IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove4IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove2IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove1IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
void genStoreRegToStackArg(var_types type, regNumber reg, int offset);
void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode);
void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode);
#ifdef TARGET_X86
void genStructPutArgPush(GenTreePutArgStk* putArgStkNode);
#else
void genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgStkNode);
#endif
#endif // FEATURE_PUT_STRUCT_ARG_STK
void genCodeForStoreBlk(GenTreeBlk* storeBlkNode);
#ifndef TARGET_X86
void genCodeForInitBlkHelper(GenTreeBlk* initBlkNode);
#endif
void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode);
void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode);
void genJumpTable(GenTree* tree);
void genTableBasedSwitch(GenTree* tree);
void genCodeForArrIndex(GenTreeArrIndex* treeNode);
void genCodeForArrOffset(GenTreeArrOffs* treeNode);
instruction genGetInsForOper(genTreeOps oper, var_types type);
bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data);
GenTree* getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd);
regNumber getCallIndirectionCellReg(const GenTreeCall* call);
void genCall(GenTreeCall* call);
void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes));
void genJmpMethod(GenTree* jmp);
BasicBlock* genCallFinally(BasicBlock* block);
void genCodeForJumpTrue(GenTreeOp* jtrue);
#ifdef TARGET_ARM64
void genCodeForJumpCompare(GenTreeOp* tree);
void genCodeForMadd(GenTreeOp* tree);
void genCodeForBfiz(GenTreeOp* tree);
void genCodeForAddEx(GenTreeOp* tree);
#endif // TARGET_ARM64
#if defined(FEATURE_EH_FUNCLETS)
void genEHCatchRet(BasicBlock* block);
#else // !FEATURE_EH_FUNCLETS
void genEHFinallyOrFilterRet(BasicBlock* block);
#endif // !FEATURE_EH_FUNCLETS
void genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode);
void genMultiRegStoreToLocal(GenTreeLclVar* lclNode);
// Codegen for multi-register struct returns.
bool isStructReturn(GenTree* treeNode);
#ifdef FEATURE_SIMD
void genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc);
#endif
void genStructReturn(GenTree* treeNode);
#if defined(TARGET_X86) || defined(TARGET_ARM)
void genLongReturn(GenTree* treeNode);
#endif // TARGET_X86 || TARGET_ARM
#if defined(TARGET_X86)
void genFloatReturn(GenTree* treeNode);
#endif // TARGET_X86
#if defined(TARGET_ARM64)
void genSimpleReturn(GenTree* treeNode);
#endif // TARGET_ARM64
void genReturn(GenTree* treeNode);
void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp);
void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp);
target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp);
#if defined(TARGET_XARCH)
void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp);
#endif // defined(TARGET_XARCH)
void genLclHeap(GenTree* tree);
bool genIsRegCandidateLocal(GenTree* tree)
{
if (!tree->IsLocal())
{
return false;
}
return compiler->lvaGetDesc(tree->AsLclVarCommon())->lvIsRegCandidate();
}
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef TARGET_X86
bool m_pushStkArg;
#else // !TARGET_X86
unsigned m_stkArgVarNum;
unsigned m_stkArgOffset;
#endif // !TARGET_X86
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if defined(DEBUG) && defined(TARGET_XARCH)
void genStackPointerCheck(bool doStackPointerCheck, unsigned lvaStackPointerVar);
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#ifdef DEBUG
GenTree* lastConsumedNode;
void genNumberOperandUse(GenTree* const operand, int& useNum) const;
void genCheckConsumeNode(GenTree* const node);
#else // !DEBUG
inline void genCheckConsumeNode(GenTree* treeNode)
{
}
#endif // DEBUG
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Instruction XX
XX XX
XX The interface to generate a machine-instruction. XX
XX Currently specific to x86 XX
XX TODO-Cleanup: Consider factoring this out of CodeGen XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
void instGen(instruction ins);
void inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock);
void inst_SET(emitJumpKind condition, regNumber reg);
void inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size = EA_UNKNOWN);
void inst_Mov(var_types dstType,
regNumber dstReg,
regNumber srcReg,
bool canSkip,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_Mov_Extend(var_types srcType,
bool srcInReg,
regNumber dstReg,
regNumber srcReg,
bool canSkip,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_RV(instruction ins,
regNumber reg1,
regNumber reg2,
var_types type = TYP_I_IMPL,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_RV_RV(instruction ins,
regNumber reg1,
regNumber reg2,
regNumber reg3,
emitAttr size,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_IV(instruction ins, cnsval_ssize_t val);
void inst_IV_handle(instruction ins, cnsval_ssize_t val);
void inst_RV_IV(
instruction ins, regNumber reg, target_ssize_t val, emitAttr size, insFlags flags = INS_FLAGS_DONT_CARE);
void inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type);
void inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs);
void inst_TT(instruction ins, GenTree* tree, unsigned offs = 0, int shfv = 0, emitAttr size = EA_UNKNOWN);
void inst_TT_RV(instruction ins, emitAttr size, GenTree* tree, regNumber reg);
void inst_RV_TT(instruction ins,
regNumber reg,
GenTree* tree,
unsigned offs = 0,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_SH(instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags = INS_FLAGS_DONT_CARE);
#if defined(TARGET_XARCH)
enum class OperandKind{
ClsVar, // [CLS_VAR_ADDR] - "C" in the emitter.
Local, // [Local or spill temp + offset] - "S" in the emitter.
Indir, // [base+index*scale+disp] - "A" in the emitter.
Imm, // immediate - "I" in the emitter.
Reg // reg - "R" in the emitter.
};
class OperandDesc
{
OperandKind m_kind;
union {
struct
{
CORINFO_FIELD_HANDLE m_fieldHnd;
};
struct
{
int m_varNum;
uint16_t m_offset;
};
struct
{
GenTree* m_addr;
GenTreeIndir* m_indir;
var_types m_indirType;
};
struct
{
ssize_t m_immediate;
bool m_immediateNeedsReloc;
};
struct
{
regNumber m_reg;
};
};
public:
OperandDesc(CORINFO_FIELD_HANDLE fieldHnd) : m_kind(OperandKind::ClsVar), m_fieldHnd(fieldHnd)
{
}
OperandDesc(int varNum, uint16_t offset) : m_kind(OperandKind::Local), m_varNum(varNum), m_offset(offset)
{
}
OperandDesc(GenTreeIndir* indir)
: m_kind(OperandKind::Indir), m_addr(indir->Addr()), m_indir(indir), m_indirType(indir->TypeGet())
{
}
OperandDesc(var_types indirType, GenTree* addr)
: m_kind(OperandKind::Indir), m_addr(addr), m_indir(nullptr), m_indirType(indirType)
{
}
OperandDesc(ssize_t immediate, bool immediateNeedsReloc)
: m_kind(OperandKind::Imm), m_immediate(immediate), m_immediateNeedsReloc(immediateNeedsReloc)
{
}
OperandDesc(regNumber reg) : m_kind(OperandKind::Reg), m_reg(reg)
{
}
OperandKind GetKind() const
{
return m_kind;
}
CORINFO_FIELD_HANDLE GetFieldHnd() const
{
assert(m_kind == OperandKind::ClsVar);
return m_fieldHnd;
}
int GetVarNum() const
{
assert(m_kind == OperandKind::Local);
return m_varNum;
}
int GetLclOffset() const
{
assert(m_kind == OperandKind::Local);
return m_offset;
}
// TODO-Cleanup: instead of this rather unsightly workaround with
// "indirForm", create a new abstraction for address modes to pass
// to the emitter (or at least just use "addr"...).
GenTreeIndir* GetIndirForm(GenTreeIndir* pIndirForm)
{
if (m_indir == nullptr)
{
GenTreeIndir indirForm = CodeGen::indirForm(m_indirType, m_addr);
memcpy(pIndirForm, &indirForm, sizeof(GenTreeIndir));
}
else
{
pIndirForm = m_indir;
}
return pIndirForm;
}
ssize_t GetImmediate() const
{
assert(m_kind == OperandKind::Imm);
return m_immediate;
}
bool ImmediateNeedsReloc() const
{
assert(m_kind == OperandKind::Imm);
return m_immediateNeedsReloc;
}
regNumber GetReg() const
{
return m_reg;
}
bool IsContained() const
{
return m_kind != OperandKind::Reg;
}
};
OperandDesc genOperandDesc(GenTree* op);
void inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival);
void inst_RV_TT_IV(instruction ins, emitAttr attr, regNumber reg1, GenTree* rmOp, int ival);
void inst_RV_RV_TT(instruction ins, emitAttr size, regNumber targetReg, regNumber op1Reg, GenTree* op2, bool isRMW);
#endif
void inst_set_SV_var(GenTree* tree);
#ifdef TARGET_ARM
bool arm_Valid_Imm_For_Instr(instruction ins, target_ssize_t imm, insFlags flags);
bool arm_Valid_Imm_For_Add(target_ssize_t imm, insFlags flag);
bool arm_Valid_Imm_For_Add_SP(target_ssize_t imm);
#endif
instruction ins_Move_Extend(var_types srcType, bool srcInReg);
instruction ins_Copy(var_types dstType);
instruction ins_Copy(regNumber srcReg, var_types dstType);
instruction ins_FloatConv(var_types to, var_types from);
instruction ins_MathOp(genTreeOps oper, var_types type);
void instGen_Return(unsigned stkArgSize);
enum BarrierKind
{
BARRIER_FULL, // full barrier
BARRIER_LOAD_ONLY, // load barier
};
void instGen_MemoryBarrier(BarrierKind barrierKind = BARRIER_FULL);
void instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags = INS_FLAGS_DONT_CARE);
void instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags = INS_FLAGS_DONT_CARE DEBUGARG(size_t targetHandle = 0)
DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY));
#ifdef TARGET_XARCH
instruction genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue);
#endif // TARGET_XARCH
// Maps a GenCondition code to a sequence of conditional jumps or other conditional instructions
// such as X86's SETcc. A sequence of instructions rather than just a single one is required for
// certain floating point conditions.
// For example, X86's UCOMISS sets ZF to indicate equality but it also sets it, together with PF,
// to indicate an unordered result. So for GenCondition::FEQ we first need to check if PF is 0
// and then jump if ZF is 1:
// JP fallThroughBlock
// JE jumpDestBlock
// fallThroughBlock:
// ...
// jumpDestBlock:
//
// This is very similar to the way shortcircuit evaluation of bool AND and OR operators works so
// in order to make the GenConditionDesc mapping tables easier to read, a bool expression-like
// pattern is used to encode the above:
// { EJ_jnp, GT_AND, EJ_je }
// { EJ_jp, GT_OR, EJ_jne }
//
// For more details check inst_JCC and inst_SETCC functions.
//
struct GenConditionDesc
{
emitJumpKind jumpKind1;
genTreeOps oper;
emitJumpKind jumpKind2;
char padTo4Bytes;
static const GenConditionDesc& Get(GenCondition condition)
{
assert(condition.GetCode() < ArrLen(map));
const GenConditionDesc& desc = map[condition.GetCode()];
assert(desc.jumpKind1 != EJ_NONE);
assert((desc.oper == GT_NONE) || (desc.oper == GT_AND) || (desc.oper == GT_OR));
assert((desc.oper == GT_NONE) == (desc.jumpKind2 == EJ_NONE));
return desc;
}
private:
static const GenConditionDesc map[32];
};
void inst_JCC(GenCondition condition, BasicBlock* target);
void inst_SETCC(GenCondition condition, var_types type, regNumber dstReg);
};
// A simple phase that just invokes a method on the codegen instance
//
class CodeGenPhase final : public Phase
{
public:
CodeGenPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)())
: Phase(_codeGen->GetCompiler(), _phase), codeGen(_codeGen), action(_action)
{
}
protected:
virtual PhaseStatus DoPhase() override
{
(codeGen->*action)();
return PhaseStatus::MODIFIED_EVERYTHING;
}
private:
CodeGen* codeGen;
void (CodeGen::*action)();
};
// Wrapper for using CodeGenPhase
//
inline void DoPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)())
{
CodeGenPhase phase(_codeGen, _phase, _action);
phase.Run();
}
#endif // _CODEGEN_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This class contains all the data & functionality for code generation
// of a method, except for the target-specific elements, which are
// primarily in the Target class.
//
#ifndef _CODEGEN_H_
#define _CODEGEN_H_
#include "codegeninterface.h"
#include "compiler.h" // temporary??
#include "regset.h"
#include "jitgcinfo.h"
class CodeGen final : public CodeGenInterface
{
friend class emitter;
friend class DisAssembler;
public:
// This could use further abstraction
CodeGen(Compiler* theCompiler);
virtual void genGenerateCode(void** codePtr, uint32_t* nativeSizeOfCode);
void genGenerateMachineCode();
void genEmitMachineCode();
void genEmitUnwindDebugGCandEH();
// TODO-Cleanup: Abstract out the part of this that finds the addressing mode, and
// move it to Lower
virtual bool genCreateAddrMode(
GenTree* addr, bool fold, bool* revPtr, GenTree** rv1Ptr, GenTree** rv2Ptr, unsigned* mulPtr, ssize_t* cnsPtr);
private:
#if defined(TARGET_XARCH)
// Bit masks used in negating a float or double number.
// This is to avoid creating more than one data constant for these bitmasks when a
// method has more than one GT_NEG operation on floating point values.
CORINFO_FIELD_HANDLE negBitmaskFlt;
CORINFO_FIELD_HANDLE negBitmaskDbl;
// Bit masks used in computing Math.Abs() of a float or double number.
CORINFO_FIELD_HANDLE absBitmaskFlt;
CORINFO_FIELD_HANDLE absBitmaskDbl;
// Bit mask used in U8 -> double conversion to adjust the result.
CORINFO_FIELD_HANDLE u8ToDblBitmask;
// Generates SSE2 code for the given tree as "Operand BitWiseOp BitMask"
void genSSE2BitwiseOp(GenTree* treeNode);
// Generates SSE41 code for the given tree as a round operation
void genSSE41RoundOp(GenTreeOp* treeNode);
instruction simdAlignedMovIns()
{
// We use movaps when non-VEX because it is a smaller instruction;
// however the VEX version vmovaps would be used which is the same size as vmovdqa;
// also vmovdqa has more available CPU ports on older processors so we switch to that
return compiler->canUseVexEncoding() ? INS_movdqa : INS_movaps;
}
instruction simdUnalignedMovIns()
{
// We use movups when non-VEX because it is a smaller instruction;
// however the VEX version vmovups would be used which is the same size as vmovdqu;
// but vmovdqu has more available CPU ports on older processors so we switch to that
return compiler->canUseVexEncoding() ? INS_movdqu : INS_movups;
}
#endif // defined(TARGET_XARCH)
void genPrepForCompiler();
void genMarkLabelsForCodegen();
inline RegState* regStateForType(var_types t)
{
return varTypeUsesFloatReg(t) ? &floatRegState : &intRegState;
}
inline RegState* regStateForReg(regNumber reg)
{
return genIsValidFloatReg(reg) ? &floatRegState : &intRegState;
}
regNumber genFramePointerReg()
{
if (isFramePointerUsed())
{
return REG_FPBASE;
}
else
{
return REG_SPBASE;
}
}
static bool genShouldRoundFP();
static GenTreeIndir indirForm(var_types type, GenTree* base);
static GenTreeStoreInd storeIndirForm(var_types type, GenTree* base, GenTree* data);
GenTreeIntCon intForm(var_types type, ssize_t value);
void genRangeCheck(GenTree* node);
void genLockedInstructions(GenTreeOp* node);
#ifdef TARGET_XARCH
void genCodeForLockAdd(GenTreeOp* node);
#endif
#ifdef REG_OPT_RSVD
// On some targets such as the ARM we may need to have an extra reserved register
// that is used when addressing stack based locals and stack based temps.
// This method returns the regNumber that should be used when an extra register
// is needed to access the stack based locals and stack based temps.
//
regNumber rsGetRsvdReg()
{
// We should have already added this register to the mask
// of reserved registers in regSet.rdMaskResvd
noway_assert((regSet.rsMaskResvd & RBM_OPT_RSVD) != 0);
return REG_OPT_RSVD;
}
#endif // REG_OPT_RSVD
//-------------------------------------------------------------------------
bool genUseBlockInit; // true if we plan to block-initialize the local stack frame
unsigned genInitStkLclCnt; // The count of local variables that we need to zero init
void SubtractStackLevel(unsigned adjustment)
{
assert(genStackLevel >= adjustment);
unsigned newStackLevel = genStackLevel - adjustment;
if (genStackLevel != newStackLevel)
{
JITDUMP("Adjusting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
void AddStackLevel(unsigned adjustment)
{
unsigned newStackLevel = genStackLevel + adjustment;
if (genStackLevel != newStackLevel)
{
JITDUMP("Adjusting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
void SetStackLevel(unsigned newStackLevel)
{
if (genStackLevel != newStackLevel)
{
JITDUMP("Setting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
//-------------------------------------------------------------------------
void genReportEH();
// Allocates storage for the GC info, writes the GC info into that storage, records the address of the
// GC info of the method with the EE, and returns a pointer to the "info" portion (just post-header) of
// the GC info. Requires "codeSize" to be the size of the generated code, "prologSize" and "epilogSize"
// to be the sizes of the prolog and epilog, respectively. In DEBUG, makes a check involving the
// "codePtr", assumed to be a pointer to the start of the generated code.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef JIT32_GCENCODER
void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
void* genCreateAndStoreGCInfoJIT32(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr));
#else // !JIT32_GCENCODER
void genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
void genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr));
#endif // !JIT32_GCENCODER
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
// the current (pending) label ref, a label which has been referenced but not yet seen
BasicBlock* genPendingCallLabel;
void** codePtr;
uint32_t* nativeSizeOfCode;
unsigned codeSize;
void* coldCodePtr;
void* consPtr;
#ifdef DEBUG
// Last instr we have displayed for dspInstrs
unsigned genCurDispOffset;
static const char* genInsName(instruction ins);
const char* genInsDisplayName(emitter::instrDesc* id);
static const char* genSizeStr(emitAttr size);
#endif // DEBUG
void genInitialize();
void genInitializeRegisterState();
void genCodeForBBlist();
public:
void genSpillVar(GenTree* tree);
protected:
void genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTarget = REG_NA);
void genGCWriteBarrier(GenTree* tgt, GCInfo::WriteBarrierForm wbf);
BasicBlock* genCreateTempLabel();
private:
void genLogLabel(BasicBlock* bb);
protected:
void genDefineTempLabel(BasicBlock* label);
void genDefineInlineTempLabel(BasicBlock* label);
void genAdjustStackLevel(BasicBlock* block);
void genExitCode(BasicBlock* block);
void genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, BasicBlock* failBlk = nullptr);
void genCheckOverflow(GenTree* tree);
//-------------------------------------------------------------------------
//
// Prolog/epilog generation
//
//-------------------------------------------------------------------------
unsigned prologSize;
unsigned epilogSize;
//
// Prolog functions and data (there are a few exceptions for more generally used things)
//
void genEstablishFramePointer(int delta, bool reportUnwindData);
void genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState);
void genEnregisterIncomingStackArgs();
#if defined(TARGET_ARM64)
void genEnregisterOSRArgsAndLocals(regNumber initReg, bool* pInitRegZeroed);
#else
void genEnregisterOSRArgsAndLocals();
#endif
void genCheckUseBlockInit();
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void genClearStackVec3ArgUpperBits();
#endif // UNIX_AMD64_ABI && FEATURE_SIMD
#if defined(TARGET_ARM64)
bool genInstrWithConstant(instruction ins,
emitAttr attr,
regNumber reg1,
regNumber reg2,
ssize_t imm,
regNumber tmpReg,
bool inUnwindRegion = false);
void genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg, bool* pTmpRegIsZero, bool reportUnwindData);
void genPrologSaveRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero);
void genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
void genEpilogRestoreRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero);
void genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
// A simple struct to keep register pairs for prolog and epilog.
struct RegPair
{
regNumber reg1;
regNumber reg2;
bool useSaveNextPair;
RegPair(regNumber reg1) : reg1(reg1), reg2(REG_NA), useSaveNextPair(false)
{
}
RegPair(regNumber reg1, regNumber reg2) : reg1(reg1), reg2(reg2), useSaveNextPair(false)
{
assert(reg2 == REG_NEXT(reg1));
}
};
static void genBuildRegPairsStack(regMaskTP regsMask, ArrayStack<RegPair>* regStack);
static void genSetUseSaveNextPairs(ArrayStack<RegPair>* regStack);
static int genGetSlotSizeForRegsInMask(regMaskTP regsMask);
void genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset);
void genRestoreCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset);
void genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta);
void genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta);
void genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed);
#else
void genPushCalleeSavedRegisters();
#endif
#if defined(TARGET_AMD64)
void genOSRRecordTier0CalleeSavedRegistersAndFrame();
void genOSRSaveRemainingCalleeSavedRegisters();
#endif // TARGET_AMD64
void genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn);
void genPoisonFrame(regMaskTP bbRegLiveIn);
#if defined(TARGET_ARM)
bool genInstrWithConstant(
instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insFlags flags, regNumber tmpReg);
bool genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg);
void genPushFltRegs(regMaskTP regMask);
void genPopFltRegs(regMaskTP regMask);
regMaskTP genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat);
regMaskTP genJmpCallArgMask();
void genFreeLclFrame(unsigned frameSize,
/* IN OUT */ bool* pUnwindStarted);
void genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg);
void genMov32RelocatableDataLabel(unsigned value, regNumber reg);
void genMov32RelocatableImmediate(emitAttr size, BYTE* addr, regNumber reg);
bool genUsedPopToReturn; // True if we use the pop into PC to return,
// False if we didn't and must branch to LR to return.
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of registers saved in the funclet prolog (includes LR)
unsigned fiFunctionCallerSPtoFPdelta; // Delta between caller SP and the frame pointer
unsigned fiSpDelta; // Stack pointer delta
unsigned fiPSP_slot_SP_offset; // PSP slot offset from SP
int fiPSP_slot_CallerSP_offset; // PSP slot offset from Caller SP
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_ARM64)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of callee-saved registers saved in the funclet prolog (includes LR)
int fiFunction_CallerSP_to_FP_delta; // Delta between caller SP and the frame pointer in the parent function
// (negative)
int fiSP_to_FPLR_save_delta; // FP/LR register save offset from SP (positive)
int fiSP_to_PSP_slot_delta; // PSP slot offset from SP (positive)
int fiSP_to_CalleeSave_delta; // First callee-saved register slot offset from SP (positive)
int fiCallerSP_to_PSP_slot_delta; // PSP slot offset from Caller SP (negative)
int fiFrameType; // Funclet frame types are numbered. See genFuncletProlog() for details.
int fiSpDelta1; // Stack pointer delta 1 (negative)
int fiSpDelta2; // Stack pointer delta 2 (negative)
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_AMD64)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
unsigned fiFunction_InitialSP_to_FP_delta; // Delta between Initial-SP and the frame pointer
unsigned fiSpDelta; // Stack pointer delta
int fiPSP_slot_InitialSP_offset; // PSP slot offset from Initial-SP
};
FuncletFrameInfoDsc genFuncletInfo;
#endif // TARGET_AMD64
#if defined(TARGET_XARCH)
// Save/Restore callee saved float regs to stack
void genPreserveCalleeSavedFltRegs(unsigned lclFrameSize);
void genRestoreCalleeSavedFltRegs(unsigned lclFrameSize);
// Generate VZeroupper instruction to avoid AVX/SSE transition penalty
void genVzeroupperIfNeeded(bool check256bitOnly = true);
#endif // TARGET_XARCH
void genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& initDblRegs, const regNumber& initReg);
regNumber genGetZeroReg(regNumber initReg, bool* pInitRegZeroed);
void genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
void genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
void genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed);
void genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed);
void genFinalizeFrame();
#ifdef PROFILING_SUPPORTED
void genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed);
void genProfilingLeaveCallback(unsigned helper);
#endif // PROFILING_SUPPORTED
// clang-format off
void genEmitCall(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
void* addr
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
regNumber base,
bool isJump);
// clang-format on
// clang-format off
void genEmitCallIndir(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
GenTreeIndir* indir
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
bool isJump);
// clang-format on
//
// Epilog functions
//
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog);
#endif
#if defined(TARGET_ARM64)
void genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog);
#else // !defined(TARGET_ARM64)
void genPopCalleeSavedRegisters(bool jmpEpilog = false);
#if defined(TARGET_XARCH)
unsigned genPopCalleeSavedRegistersFromMask(regMaskTP rsPopRegs);
#endif // !defined(TARGET_XARCH)
#endif // !defined(TARGET_ARM64)
//
// Common or driving functions
//
void genReserveProlog(BasicBlock* block); // currently unused
void genReserveEpilog(BasicBlock* block);
void genFnProlog();
void genFnEpilog(BasicBlock* block);
#if defined(FEATURE_EH_FUNCLETS)
void genReserveFuncletProlog(BasicBlock* block);
void genReserveFuncletEpilog(BasicBlock* block);
void genFuncletProlog(BasicBlock* block);
void genFuncletEpilog();
void genCaptureFuncletPrologEpilogInfo();
/*-----------------------------------------------------------------------------
*
* Set the main function PSPSym value in the frame.
* Funclets use different code to load the PSP sym and save it in their frame.
* See the document "CLR ABI.md" for a full description of the PSPSym.
* The PSPSym section of that document is copied here.
*
***********************************
* The name PSPSym stands for Previous Stack Pointer Symbol. It is how a funclet
* accesses locals from the main function body.
*
* First, two definitions.
*
* Caller-SP is the value of the stack pointer in a function's caller before the call
* instruction is executed. That is, when function A calls function B, Caller-SP for B
* is the value of the stack pointer immediately before the call instruction in A
* (calling B) was executed. Note that this definition holds for both AMD64, which
* pushes the return value when a call instruction is executed, and for ARM, which
* doesn't. For AMD64, Caller-SP is the address above the call return address.
*
* Initial-SP is the initial value of the stack pointer after the fixed-size portion of
* the frame has been allocated. That is, before any "alloca"-type allocations.
*
* The PSPSym is a pointer-sized local variable in the frame of the main function and
* of each funclet. The value stored in PSPSym is the value of Initial-SP/Caller-SP
* for the main function. The stack offset of the PSPSym is reported to the VM in the
* GC information header. The value reported in the GC information is the offset of the
* PSPSym from Initial-SP/Caller-SP. (Note that both the value stored, and the way the
* value is reported to the VM, differs between architectures. In particular, note that
* most things in the GC information header are reported as offsets relative to Caller-SP,
* but PSPSym on AMD64 is one (maybe the only) exception.)
*
* The VM uses the PSPSym to find other locals it cares about (such as the generics context
* in a funclet frame). The JIT uses it to re-establish the frame pointer register, so that
* the frame pointer is the same value in a funclet as it is in the main function body.
*
* When a funclet is called, it is passed the Establisher Frame Pointer. For AMD64 this is
* true for all funclets and it is passed as the first argument in RCX, but for ARM this is
* only true for first pass funclets (currently just filters) and it is passed as the second
* argument in R1. The Establisher Frame Pointer is a stack pointer of an interesting "parent"
* frame in the exception processing system. For the CLR, it points either to the main function
* frame or a dynamically enclosing funclet frame from the same function, for the funclet being
* invoked. The value of the Establisher Frame Pointer is Initial-SP on AMD64, Caller-SP on ARM.
*
* Using the establisher frame, the funclet wants to load the value of the PSPSym. Since we
* don't know if the Establisher Frame is from the main function or a funclet, we design the
* main function and funclet frame layouts to place the PSPSym at an identical, small, constant
* offset from the Establisher Frame in each case. (This is also required because we only report
* a single offset to the PSPSym in the GC information, and that offset must be valid for the main
* function and all of its funclets). Then, the funclet uses this known offset to compute the
* PSPSym address and read its value. From this, it can compute the value of the frame pointer
* (which is a constant offset from the PSPSym value) and set the frame register to be the same
* as the parent function. Also, the funclet writes the value of the PSPSym to its own frame's
* PSPSym. This "copying" of the PSPSym happens for every funclet invocation, in particular,
* for every nested funclet invocation.
*
* On ARM, for all second pass funclets (finally, fault, catch, and filter-handler) the VM
* restores all non-volatile registers to their values within the parent frame. This includes
* the frame register (R11). Thus, the PSPSym is not used to recompute the frame pointer register
* in this case, though the PSPSym is copied to the funclet's frame, as for all funclets.
*
* Catch, Filter, and Filter-handlers also get an Exception object (GC ref) as an argument
* (REG_EXCEPTION_OBJECT). On AMD64 it is the second argument and thus passed in RDX. On
* ARM this is the first argument and passed in R0.
*
* (Note that the JIT64 source code contains a comment that says, "The current CLR doesn't always
* pass the correct establisher frame to the funclet. Funclet may receive establisher frame of
* funclet when expecting that of original routine." It indicates this is the reason that a PSPSym
* is required in all funclets as well as the main function, whereas if the establisher frame was
* correctly reported, the PSPSym could be omitted in some cases.)
***********************************
*/
void genSetPSPSym(regNumber initReg, bool* pInitRegZeroed);
void genUpdateCurrentFunclet(BasicBlock* block);
#if defined(TARGET_ARM)
void genInsertNopForUnwinder(BasicBlock* block);
#endif
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
void genUpdateCurrentFunclet(BasicBlock* block)
{
return;
}
#endif // !FEATURE_EH_FUNCLETS
void genGeneratePrologsAndEpilogs();
#if defined(DEBUG) && defined(TARGET_ARM64)
void genArm64EmitterUnitTests();
#endif
#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
void genAmd64EmitterUnitTests();
#endif
#ifdef TARGET_ARM64
virtual void SetSaveFpLrWithAllCalleeSavedRegisters(bool value);
virtual bool IsSaveFpLrWithAllCalleeSavedRegisters() const;
bool genSaveFpLrWithAllCalleeSavedRegisters;
#endif // TARGET_ARM64
//-------------------------------------------------------------------------
//
// End prolog/epilog generation
//
//-------------------------------------------------------------------------
void genSinglePush();
void genSinglePop();
regMaskTP genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs);
void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Debugging Support XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#ifdef DEBUG
void genIPmappingDisp(unsigned mappingNum, IPmappingDsc* ipMapping);
void genIPmappingListDisp();
#endif // DEBUG
void genIPmappingAdd(IPmappingDscKind kind, const DebugInfo& di, bool isLabel);
void genIPmappingAddToFront(IPmappingDscKind kind, const DebugInfo& di, bool isLabel);
void genIPmappingGen();
#ifdef DEBUG
void genDumpPreciseDebugInfo();
void genDumpPreciseDebugInfoInlineTree(FILE* file, InlineContext* context, bool* first);
void genAddPreciseIPMappingHere(const DebugInfo& di);
#endif
void genEnsureCodeEmitted(const DebugInfo& di);
//-------------------------------------------------------------------------
// scope info for the variables
void genSetScopeInfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
unsigned LVnum,
bool avail,
siVarLoc* varLoc);
void genSetScopeInfo();
#ifdef USING_VARIABLE_LIVE_RANGE
// Send VariableLiveRanges as debug info to the debugger
void genSetScopeInfoUsingVariableRanges();
#endif // USING_VARIABLE_LIVE_RANGE
#ifdef USING_SCOPE_INFO
void genSetScopeInfoUsingsiScope();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ScopeInfo XX
XX XX
XX Keeps track of the scopes during code-generation. XX
XX This is used to translate the local-variable debugging information XX
XX from IL offsets to native code offsets. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
/*****************************************************************************
* ScopeInfo
*
* This class is called during code gen at block-boundaries, and when the
* set of live variables changes. It keeps track of the scope of the variables
* in terms of the native code PC.
*/
#endif // USING_SCOPE_INFO
public:
void siInit();
void checkICodeDebugInfo();
// The logic used to report debug info on debug code is the same for ScopeInfo and
// VariableLiveRange
void siBeginBlock(BasicBlock* block);
void siEndBlock(BasicBlock* block);
// VariableLiveRange and siScope needs this method to report variables on debug code
void siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned int lastBlockILEndOffset);
protected:
#if defined(FEATURE_EH_FUNCLETS)
bool siInFuncletRegion; // Have we seen the start of the funclet region?
#endif // FEATURE_EH_FUNCLETS
IL_OFFSET siLastEndOffs; // IL offset of the (exclusive) end of the last block processed
#ifdef USING_SCOPE_INFO
public:
// Closes the "ScopeInfo" of the tracked variables that has become dead.
virtual void siUpdate();
void siCheckVarScope(unsigned varNum, IL_OFFSET offs);
void siCloseAllOpenScopes();
#ifdef DEBUG
void siDispOpenScopes();
#endif
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
struct siScope
{
emitLocation scStartLoc; // emitter location of start of scope
emitLocation scEndLoc; // emitter location of end of scope
unsigned scVarNum; // index into lvaTable
unsigned scLVnum; // 'which' in eeGetLVinfo()
unsigned scStackLevel; // Only for stk-vars
siScope* scPrev;
siScope* scNext;
};
// Returns a "siVarLoc" instance representing the place where the variable lives base on
// varDsc and scope description.
CodeGenInterface::siVarLoc getSiVarLoc(const LclVarDsc* varDsc, const siScope* scope) const;
siScope siOpenScopeList, siScopeList, *siOpenScopeLast, *siScopeLast;
unsigned siScopeCnt;
VARSET_TP siLastLife; // Life at last call to siUpdate()
// Tracks the last entry for each tracked register variable
siScope** siLatestTrackedScopes;
// Functions
siScope* siNewScope(unsigned LVnum, unsigned varNum);
void siRemoveFromOpenScopeList(siScope* scope);
void siEndTrackedScope(unsigned varIndex);
void siEndScope(unsigned varNum);
void siEndScope(siScope* scope);
#ifdef DEBUG
bool siVerifyLocalVarTab();
#endif
#ifdef LATE_DISASM
public:
/* virtual */
const char* siRegVarName(size_t offs, size_t size, unsigned reg);
/* virtual */
const char* siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs);
#endif // LATE_DISASM
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX PrologScopeInfo XX
XX XX
XX We need special handling in the prolog block, as the parameter variables XX
XX may not be in the same position described by genLclVarTable - they all XX
XX start out on the stack XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#endif // USING_SCOPE_INFO
public:
void psiBegProlog();
void psiEndProlog();
#ifdef USING_SCOPE_INFO
void psiAdjustStackLevel(unsigned size);
// For EBP-frames, the parameters are accessed via ESP on entry to the function,
// but via EBP right after a "mov ebp,esp" instruction.
void psiMoveESPtoEBP();
// Close previous psiScope and open a new one on the location described by the registers.
void psiMoveToReg(unsigned varNum, regNumber reg = REG_NA, regNumber otherReg = REG_NA);
// Search the open "psiScope" of the "varNum" parameter, close it and open
// a new one using "LclVarDsc" fields.
void psiMoveToStack(unsigned varNum);
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
struct psiScope
{
emitLocation scStartLoc; // emitter location of start of scope
emitLocation scEndLoc; // emitter location of end of scope
unsigned scSlotNum; // index into lclVarTab
unsigned scLVnum; // 'which' in eeGetLVinfo()
bool scRegister;
union {
struct
{
regNumberSmall scRegNum;
// Used for:
// - "other half" of long var on architectures with 32 bit size registers - x86.
// - for System V structs it stores the second register
// used to pass a register passed struct.
regNumberSmall scOtherReg;
} u1;
struct
{
regNumberSmall scBaseReg;
NATIVE_OFFSET scOffset;
} u2;
};
psiScope* scPrev;
psiScope* scNext;
// Returns a "siVarLoc" instance representing the place where the variable lives base on
// psiScope properties.
CodeGenInterface::siVarLoc getSiVarLoc() const;
};
psiScope psiOpenScopeList, psiScopeList, *psiOpenScopeLast, *psiScopeLast;
unsigned psiScopeCnt;
// Implementation Functions
psiScope* psiNewPrologScope(unsigned LVnum, unsigned slotNum);
void psiEndPrologScope(psiScope* scope);
void psiSetScopeOffset(psiScope* newScope, const LclVarDsc* lclVarDsc) const;
#endif // USING_SCOPE_INFO
NATIVE_OFFSET psiGetVarStackOffset(const LclVarDsc* lclVarDsc) const;
/*****************************************************************************
* TrnslLocalVarInfo
*
* This struct holds the LocalVarInfo in terms of the generated native code
* after a call to genSetScopeInfo()
*/
protected:
#ifdef DEBUG
struct TrnslLocalVarInfo
{
unsigned tlviVarNum;
unsigned tlviLVnum;
VarName tlviName;
UNATIVE_OFFSET tlviStartPC;
size_t tlviLength;
bool tlviAvailable;
siVarLoc tlviVarLoc;
};
// Array of scopes of LocalVars in terms of native code
TrnslLocalVarInfo* genTrnslLocalVarInfo;
unsigned genTrnslLocalVarCount;
#endif
void genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree);
void genCodeForTreeNode(GenTree* treeNode);
void genCodeForBinary(GenTreeOp* treeNode);
#if defined(TARGET_X86)
void genCodeForLongUMod(GenTreeOp* node);
#endif // TARGET_X86
void genCodeForDivMod(GenTreeOp* treeNode);
void genCodeForMul(GenTreeOp* treeNode);
void genCodeForIncSaturate(GenTree* treeNode);
void genCodeForMulHi(GenTreeOp* treeNode);
void genLeaInstruction(GenTreeAddrMode* lea);
void genSetRegToCond(regNumber dstReg, GenTree* tree);
#if defined(TARGET_ARMARCH)
void genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale);
void genCodeForMulLong(GenTreeOp* mul);
#endif // TARGET_ARMARCH
#if !defined(TARGET_64BIT)
void genLongToIntCast(GenTree* treeNode);
#endif
// Generate code for a GT_BITCAST that is not contained.
void genCodeForBitCast(GenTreeOp* treeNode);
// Generate the instruction to move a value between register files
void genBitCast(var_types targetType, regNumber targetReg, var_types srcType, regNumber srcReg);
struct GenIntCastDesc
{
enum CheckKind
{
CHECK_NONE,
CHECK_SMALL_INT_RANGE,
CHECK_POSITIVE,
#ifdef TARGET_64BIT
CHECK_UINT_RANGE,
CHECK_POSITIVE_INT_RANGE,
CHECK_INT_RANGE,
#endif
};
enum ExtendKind
{
COPY,
ZERO_EXTEND_SMALL_INT,
SIGN_EXTEND_SMALL_INT,
#ifdef TARGET_64BIT
ZERO_EXTEND_INT,
SIGN_EXTEND_INT,
#endif
};
private:
CheckKind m_checkKind;
unsigned m_checkSrcSize;
int m_checkSmallIntMin;
int m_checkSmallIntMax;
ExtendKind m_extendKind;
unsigned m_extendSrcSize;
public:
GenIntCastDesc(GenTreeCast* cast);
CheckKind CheckKind() const
{
return m_checkKind;
}
unsigned CheckSrcSize() const
{
assert(m_checkKind != CHECK_NONE);
return m_checkSrcSize;
}
int CheckSmallIntMin() const
{
assert(m_checkKind == CHECK_SMALL_INT_RANGE);
return m_checkSmallIntMin;
}
int CheckSmallIntMax() const
{
assert(m_checkKind == CHECK_SMALL_INT_RANGE);
return m_checkSmallIntMax;
}
ExtendKind ExtendKind() const
{
return m_extendKind;
}
unsigned ExtendSrcSize() const
{
return m_extendSrcSize;
}
};
void genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg);
void genIntToIntCast(GenTreeCast* cast);
void genFloatToFloatCast(GenTree* treeNode);
void genFloatToIntCast(GenTree* treeNode);
void genIntToFloatCast(GenTree* treeNode);
void genCkfinite(GenTree* treeNode);
void genCodeForCompare(GenTreeOp* tree);
void genIntrinsic(GenTree* treeNode);
void genPutArgStk(GenTreePutArgStk* treeNode);
void genPutArgReg(GenTreeOp* tree);
#if FEATURE_ARG_SPLIT
void genPutArgSplit(GenTreePutArgSplit* treeNode);
#endif // FEATURE_ARG_SPLIT
#if defined(TARGET_XARCH)
unsigned getBaseVarForPutArgStk(GenTree* treeNode);
#endif // TARGET_XARCH
unsigned getFirstArgWithStackSlot();
void genCompareFloat(GenTree* treeNode);
void genCompareInt(GenTree* treeNode);
#ifdef FEATURE_SIMD
enum SIMDScalarMoveType{
SMT_ZeroInitUpper, // zero initlaize target upper bits
SMT_ZeroInitUpper_SrcHasUpperZeros, // zero initialize target upper bits; source upper bits are known to be zero
SMT_PreserveUpper // preserve target upper bits
};
#ifdef TARGET_ARM64
insOpts genGetSimdInsOpt(emitAttr size, var_types elementType);
#endif
instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival = nullptr);
void genSIMDScalarMove(
var_types targetType, var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode);
void genSIMDLo64BitConvert(SIMDIntrinsicID intrinsicID,
var_types simdType,
var_types baseType,
regNumber tmpReg,
regNumber tmpIntReg,
regNumber targetReg);
void genSIMDIntrinsic32BitConvert(GenTreeSIMD* simdNode);
void genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode);
void genSIMDExtractUpperHalf(GenTreeSIMD* simdNode, regNumber srcReg, regNumber tgtReg);
void genSIMDIntrinsic(GenTreeSIMD* simdNode);
// TYP_SIMD12 (i.e Vector3 of size 12 bytes) is not a hardware supported size and requires
// two reads/writes on 64-bit targets. These routines abstract reading/writing of Vector3
// values through an indirection. Note that Vector3 locals allocated on stack would have
// their size rounded to TARGET_POINTER_SIZE (which is 8 bytes on 64-bit targets) and hence
// Vector3 locals could be treated as TYP_SIMD16 while reading/writing.
void genStoreIndTypeSIMD12(GenTree* treeNode);
void genLoadIndTypeSIMD12(GenTree* treeNode);
void genStoreLclTypeSIMD12(GenTree* treeNode);
void genLoadLclTypeSIMD12(GenTree* treeNode);
#ifdef TARGET_X86
void genStoreSIMD12ToStack(regNumber operandReg, regNumber tmpReg);
void genPutArgStkSIMD12(GenTree* treeNode);
#endif // TARGET_X86
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
void genHWIntrinsic(GenTreeHWIntrinsic* node);
#if defined(TARGET_XARCH)
void genHWIntrinsic_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber reg, GenTree* rmOp);
void genHWIntrinsic_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, int8_t ival);
void genHWIntrinsic_R_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr);
void genHWIntrinsic_R_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTree* op2);
void genHWIntrinsic_R_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, int8_t ival);
void genHWIntrinsic_R_R_RM_R(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr);
void genHWIntrinsic_R_R_R_RM(
instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, GenTree* op3);
void genBaseIntrinsic(GenTreeHWIntrinsic* node);
void genX86BaseIntrinsic(GenTreeHWIntrinsic* node);
void genSSEIntrinsic(GenTreeHWIntrinsic* node);
void genSSE2Intrinsic(GenTreeHWIntrinsic* node);
void genSSE41Intrinsic(GenTreeHWIntrinsic* node);
void genSSE42Intrinsic(GenTreeHWIntrinsic* node);
void genAvxOrAvx2Intrinsic(GenTreeHWIntrinsic* node);
void genAESIntrinsic(GenTreeHWIntrinsic* node);
void genBMI1OrBMI2Intrinsic(GenTreeHWIntrinsic* node);
void genFMAIntrinsic(GenTreeHWIntrinsic* node);
void genLZCNTIntrinsic(GenTreeHWIntrinsic* node);
void genPCLMULQDQIntrinsic(GenTreeHWIntrinsic* node);
void genPOPCNTIntrinsic(GenTreeHWIntrinsic* node);
void genXCNTIntrinsic(GenTreeHWIntrinsic* node, instruction ins);
template <typename HWIntrinsicSwitchCaseBody>
void genHWIntrinsicJumpTableFallback(NamedIntrinsic intrinsic,
regNumber nonConstImmReg,
regNumber baseReg,
regNumber offsReg,
HWIntrinsicSwitchCaseBody emitSwCase);
#endif // defined(TARGET_XARCH)
#ifdef TARGET_ARM64
class HWIntrinsicImmOpHelper final
{
public:
HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTree* immOp, GenTreeHWIntrinsic* intrin);
void EmitBegin();
void EmitCaseEnd();
// Returns true after the last call to EmitCaseEnd() (i.e. this signals that code generation is done).
bool Done() const
{
return (immValue > immUpperBound);
}
// Returns a value of the immediate operand that should be used for a case.
int ImmValue() const
{
return immValue;
}
private:
// Returns true if immOp is non contained immediate (i.e. the value of the immediate operand is enregistered in
// nonConstImmReg).
bool NonConstImmOp() const
{
return nonConstImmReg != REG_NA;
}
// Returns true if a non constant immediate operand can be either 0 or 1.
bool TestImmOpZeroOrOne() const
{
assert(NonConstImmOp());
return (immLowerBound == 0) && (immUpperBound == 1);
}
emitter* GetEmitter() const
{
return codeGen->GetEmitter();
}
CodeGen* const codeGen;
BasicBlock* endLabel;
BasicBlock* nonZeroLabel;
int immValue;
int immLowerBound;
int immUpperBound;
regNumber nonConstImmReg;
regNumber branchTargetReg;
};
#endif // TARGET_ARM64
#endif // FEATURE_HW_INTRINSICS
#if !defined(TARGET_64BIT)
// CodeGen for Long Ints
void genStoreLongLclVar(GenTree* treeNode);
#endif // !defined(TARGET_64BIT)
// Do liveness update for register produced by the current node in codegen after
// code has been emitted for it.
void genProduceReg(GenTree* tree);
void genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum);
void genUnspillLocal(
unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum, bool reSpill, bool isLastUse);
void genUnspillRegIfNeeded(GenTree* tree);
void genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex);
regNumber genConsumeReg(GenTree* tree);
regNumber genConsumeReg(GenTree* tree, unsigned multiRegIndex);
void genCopyRegIfNeeded(GenTree* tree, regNumber needReg);
void genConsumeRegAndCopy(GenTree* tree, regNumber needReg);
void genConsumeIfReg(GenTree* tree)
{
if (!tree->isContained())
{
(void)genConsumeReg(tree);
}
}
void genRegCopy(GenTree* tree);
regNumber genRegCopy(GenTree* tree, unsigned multiRegIndex);
void genTransferRegGCState(regNumber dst, regNumber src);
void genConsumeAddress(GenTree* addr);
void genConsumeAddrMode(GenTreeAddrMode* mode);
void genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg);
void genConsumeBlockSrc(GenTreeBlk* blkNode);
void genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg);
void genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
#ifdef FEATURE_PUT_STRUCT_ARG_STK
void genConsumePutStructArgStk(GenTreePutArgStk* putArgStkNode,
regNumber dstReg,
regNumber srcReg,
regNumber sizeReg);
#endif // FEATURE_PUT_STRUCT_ARG_STK
#if FEATURE_ARG_SPLIT
void genConsumeArgSplitStruct(GenTreePutArgSplit* putArgNode);
#endif // FEATURE_ARG_SPLIT
void genConsumeRegs(GenTree* tree);
void genConsumeOperands(GenTreeOp* tree);
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void genConsumeMultiOpOperands(GenTreeMultiOp* tree);
#endif
void genEmitGSCookieCheck(bool pushReg);
void genCodeForShift(GenTree* tree);
#if defined(TARGET_X86) || defined(TARGET_ARM)
void genCodeForShiftLong(GenTree* tree);
#endif
#ifdef TARGET_XARCH
void genCodeForShiftRMW(GenTreeStoreInd* storeInd);
void genCodeForBT(GenTreeOp* bt);
#endif // TARGET_XARCH
void genCodeForCast(GenTreeOp* tree);
void genCodeForLclAddr(GenTree* tree);
void genCodeForIndexAddr(GenTreeIndexAddr* tree);
void genCodeForIndir(GenTreeIndir* tree);
void genCodeForNegNot(GenTree* tree);
void genCodeForBswap(GenTree* tree);
void genCodeForLclVar(GenTreeLclVar* tree);
void genCodeForLclFld(GenTreeLclFld* tree);
void genCodeForStoreLclFld(GenTreeLclFld* tree);
void genCodeForStoreLclVar(GenTreeLclVar* tree);
void genCodeForReturnTrap(GenTreeOp* tree);
void genCodeForJcc(GenTreeCC* tree);
void genCodeForSetcc(GenTreeCC* setcc);
void genCodeForStoreInd(GenTreeStoreInd* tree);
void genCodeForSwap(GenTreeOp* tree);
void genCodeForCpObj(GenTreeObj* cpObjNode);
void genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode);
void genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode);
#ifndef TARGET_X86
void genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode);
#endif
void genCodeForPhysReg(GenTreePhysReg* tree);
void genCodeForNullCheck(GenTreeIndir* tree);
void genCodeForCmpXchg(GenTreeCmpXchg* tree);
void genAlignStackBeforeCall(GenTreePutArgStk* putArgStk);
void genAlignStackBeforeCall(GenTreeCall* call);
void genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias = 0);
#if defined(UNIX_X86_ABI)
unsigned curNestedAlignment; // Keep track of alignment adjustment required during codegen.
unsigned maxNestedAlignment; // The maximum amount of alignment adjustment required.
void SubtractNestedAlignment(unsigned adjustment)
{
assert(curNestedAlignment >= adjustment);
unsigned newNestedAlignment = curNestedAlignment - adjustment;
if (curNestedAlignment != newNestedAlignment)
{
JITDUMP("Adjusting stack nested alignment from %d to %d\n", curNestedAlignment, newNestedAlignment);
}
curNestedAlignment = newNestedAlignment;
}
void AddNestedAlignment(unsigned adjustment)
{
unsigned newNestedAlignment = curNestedAlignment + adjustment;
if (curNestedAlignment != newNestedAlignment)
{
JITDUMP("Adjusting stack nested alignment from %d to %d\n", curNestedAlignment, newNestedAlignment);
}
curNestedAlignment = newNestedAlignment;
if (curNestedAlignment > maxNestedAlignment)
{
JITDUMP("Max stack nested alignment changed from %d to %d\n", maxNestedAlignment, curNestedAlignment);
maxNestedAlignment = curNestedAlignment;
}
}
#endif
#ifndef TARGET_X86
void genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArgVarNum);
#endif // !TARGET_X86
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef TARGET_X86
bool genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk);
void genPushReg(var_types type, regNumber srcReg);
void genPutArgStkFieldList(GenTreePutArgStk* putArgStk);
#endif // TARGET_X86
void genPutStructArgStk(GenTreePutArgStk* treeNode);
unsigned genMove8IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove4IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove2IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove1IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
void genStoreRegToStackArg(var_types type, regNumber reg, int offset);
void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode);
void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode);
#ifdef TARGET_X86
void genStructPutArgPush(GenTreePutArgStk* putArgStkNode);
#else
void genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgStkNode);
#endif
#endif // FEATURE_PUT_STRUCT_ARG_STK
void genCodeForStoreBlk(GenTreeBlk* storeBlkNode);
#ifndef TARGET_X86
void genCodeForInitBlkHelper(GenTreeBlk* initBlkNode);
#endif
void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode);
void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode);
void genJumpTable(GenTree* tree);
void genTableBasedSwitch(GenTree* tree);
void genCodeForArrIndex(GenTreeArrIndex* treeNode);
void genCodeForArrOffset(GenTreeArrOffs* treeNode);
instruction genGetInsForOper(genTreeOps oper, var_types type);
bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data);
GenTree* getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd);
regNumber getCallIndirectionCellReg(const GenTreeCall* call);
void genCall(GenTreeCall* call);
void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes));
void genJmpMethod(GenTree* jmp);
BasicBlock* genCallFinally(BasicBlock* block);
void genCodeForJumpTrue(GenTreeOp* jtrue);
#ifdef TARGET_ARM64
void genCodeForJumpCompare(GenTreeOp* tree);
void genCodeForMadd(GenTreeOp* tree);
void genCodeForBfiz(GenTreeOp* tree);
void genCodeForAddEx(GenTreeOp* tree);
#endif // TARGET_ARM64
#if defined(FEATURE_EH_FUNCLETS)
void genEHCatchRet(BasicBlock* block);
#else // !FEATURE_EH_FUNCLETS
void genEHFinallyOrFilterRet(BasicBlock* block);
#endif // !FEATURE_EH_FUNCLETS
void genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode);
void genMultiRegStoreToLocal(GenTreeLclVar* lclNode);
// Codegen for multi-register struct returns.
bool isStructReturn(GenTree* treeNode);
#ifdef FEATURE_SIMD
void genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc);
#endif
void genStructReturn(GenTree* treeNode);
#if defined(TARGET_X86) || defined(TARGET_ARM)
void genLongReturn(GenTree* treeNode);
#endif // TARGET_X86 || TARGET_ARM
#if defined(TARGET_X86)
void genFloatReturn(GenTree* treeNode);
#endif // TARGET_X86
#if defined(TARGET_ARM64)
void genSimpleReturn(GenTree* treeNode);
#endif // TARGET_ARM64
void genReturn(GenTree* treeNode);
void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp);
void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp);
target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp);
#if defined(TARGET_XARCH)
void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp);
#endif // defined(TARGET_XARCH)
void genLclHeap(GenTree* tree);
bool genIsRegCandidateLocal(GenTree* tree)
{
if (!tree->IsLocal())
{
return false;
}
return compiler->lvaGetDesc(tree->AsLclVarCommon())->lvIsRegCandidate();
}
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef TARGET_X86
bool m_pushStkArg;
#else // !TARGET_X86
unsigned m_stkArgVarNum;
unsigned m_stkArgOffset;
#endif // !TARGET_X86
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if defined(DEBUG) && defined(TARGET_XARCH)
void genStackPointerCheck(bool doStackPointerCheck, unsigned lvaStackPointerVar);
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#ifdef DEBUG
GenTree* lastConsumedNode;
void genNumberOperandUse(GenTree* const operand, int& useNum) const;
void genCheckConsumeNode(GenTree* const node);
#else // !DEBUG
inline void genCheckConsumeNode(GenTree* treeNode)
{
}
#endif // DEBUG
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Instruction XX
XX XX
XX The interface to generate a machine-instruction. XX
XX Currently specific to x86 XX
XX TODO-Cleanup: Consider factoring this out of CodeGen XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
void instGen(instruction ins);
void inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock);
void inst_SET(emitJumpKind condition, regNumber reg);
void inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size = EA_UNKNOWN);
void inst_Mov(var_types dstType,
regNumber dstReg,
regNumber srcReg,
bool canSkip,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_Mov_Extend(var_types srcType,
bool srcInReg,
regNumber dstReg,
regNumber srcReg,
bool canSkip,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_RV(instruction ins,
regNumber reg1,
regNumber reg2,
var_types type = TYP_I_IMPL,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_RV_RV(instruction ins,
regNumber reg1,
regNumber reg2,
regNumber reg3,
emitAttr size,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_IV(instruction ins, cnsval_ssize_t val);
void inst_IV_handle(instruction ins, cnsval_ssize_t val);
void inst_RV_IV(
instruction ins, regNumber reg, target_ssize_t val, emitAttr size, insFlags flags = INS_FLAGS_DONT_CARE);
void inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type);
void inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs);
void inst_TT(instruction ins, GenTree* tree, unsigned offs = 0, int shfv = 0, emitAttr size = EA_UNKNOWN);
void inst_TT_RV(instruction ins, emitAttr size, GenTree* tree, regNumber reg);
void inst_RV_TT(instruction ins,
regNumber reg,
GenTree* tree,
unsigned offs = 0,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_SH(instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags = INS_FLAGS_DONT_CARE);
#if defined(TARGET_XARCH)
enum class OperandKind{
ClsVar, // [CLS_VAR_ADDR] - "C" in the emitter.
Local, // [Local or spill temp + offset] - "S" in the emitter.
Indir, // [base+index*scale+disp] - "A" in the emitter.
Imm, // immediate - "I" in the emitter.
Reg // reg - "R" in the emitter.
};
class OperandDesc
{
OperandKind m_kind;
union {
struct
{
CORINFO_FIELD_HANDLE m_fieldHnd;
};
struct
{
int m_varNum;
uint16_t m_offset;
};
struct
{
GenTree* m_addr;
GenTreeIndir* m_indir;
var_types m_indirType;
};
struct
{
ssize_t m_immediate;
bool m_immediateNeedsReloc;
};
struct
{
regNumber m_reg;
};
};
public:
OperandDesc(CORINFO_FIELD_HANDLE fieldHnd) : m_kind(OperandKind::ClsVar), m_fieldHnd(fieldHnd)
{
}
OperandDesc(int varNum, uint16_t offset) : m_kind(OperandKind::Local), m_varNum(varNum), m_offset(offset)
{
}
OperandDesc(GenTreeIndir* indir)
: m_kind(OperandKind::Indir), m_addr(indir->Addr()), m_indir(indir), m_indirType(indir->TypeGet())
{
}
OperandDesc(var_types indirType, GenTree* addr)
: m_kind(OperandKind::Indir), m_addr(addr), m_indir(nullptr), m_indirType(indirType)
{
}
OperandDesc(ssize_t immediate, bool immediateNeedsReloc)
: m_kind(OperandKind::Imm), m_immediate(immediate), m_immediateNeedsReloc(immediateNeedsReloc)
{
}
OperandDesc(regNumber reg) : m_kind(OperandKind::Reg), m_reg(reg)
{
}
OperandKind GetKind() const
{
return m_kind;
}
CORINFO_FIELD_HANDLE GetFieldHnd() const
{
assert(m_kind == OperandKind::ClsVar);
return m_fieldHnd;
}
int GetVarNum() const
{
assert(m_kind == OperandKind::Local);
return m_varNum;
}
int GetLclOffset() const
{
assert(m_kind == OperandKind::Local);
return m_offset;
}
// TODO-Cleanup: instead of this rather unsightly workaround with
// "indirForm", create a new abstraction for address modes to pass
// to the emitter (or at least just use "addr"...).
GenTreeIndir* GetIndirForm(GenTreeIndir* pIndirForm)
{
if (m_indir == nullptr)
{
GenTreeIndir indirForm = CodeGen::indirForm(m_indirType, m_addr);
memcpy(pIndirForm, &indirForm, sizeof(GenTreeIndir));
}
else
{
pIndirForm = m_indir;
}
return pIndirForm;
}
ssize_t GetImmediate() const
{
assert(m_kind == OperandKind::Imm);
return m_immediate;
}
bool ImmediateNeedsReloc() const
{
assert(m_kind == OperandKind::Imm);
return m_immediateNeedsReloc;
}
regNumber GetReg() const
{
return m_reg;
}
bool IsContained() const
{
return m_kind != OperandKind::Reg;
}
};
OperandDesc genOperandDesc(GenTree* op);
void inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival);
void inst_RV_TT_IV(instruction ins, emitAttr attr, regNumber reg1, GenTree* rmOp, int ival);
void inst_RV_RV_TT(instruction ins, emitAttr size, regNumber targetReg, regNumber op1Reg, GenTree* op2, bool isRMW);
#endif
void inst_set_SV_var(GenTree* tree);
#ifdef TARGET_ARM
bool arm_Valid_Imm_For_Instr(instruction ins, target_ssize_t imm, insFlags flags);
bool arm_Valid_Imm_For_Add(target_ssize_t imm, insFlags flag);
bool arm_Valid_Imm_For_Add_SP(target_ssize_t imm);
#endif
instruction ins_Move_Extend(var_types srcType, bool srcInReg);
instruction ins_Copy(var_types dstType);
instruction ins_Copy(regNumber srcReg, var_types dstType);
instruction ins_FloatConv(var_types to, var_types from);
instruction ins_MathOp(genTreeOps oper, var_types type);
void instGen_Return(unsigned stkArgSize);
enum BarrierKind
{
BARRIER_FULL, // full barrier
BARRIER_LOAD_ONLY, // load barier
};
void instGen_MemoryBarrier(BarrierKind barrierKind = BARRIER_FULL);
void instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags = INS_FLAGS_DONT_CARE);
void instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags = INS_FLAGS_DONT_CARE DEBUGARG(size_t targetHandle = 0)
DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY));
#ifdef TARGET_XARCH
instruction genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue);
#endif // TARGET_XARCH
// Maps a GenCondition code to a sequence of conditional jumps or other conditional instructions
// such as X86's SETcc. A sequence of instructions rather than just a single one is required for
// certain floating point conditions.
// For example, X86's UCOMISS sets ZF to indicate equality but it also sets it, together with PF,
// to indicate an unordered result. So for GenCondition::FEQ we first need to check if PF is 0
// and then jump if ZF is 1:
// JP fallThroughBlock
// JE jumpDestBlock
// fallThroughBlock:
// ...
// jumpDestBlock:
//
// This is very similar to the way shortcircuit evaluation of bool AND and OR operators works so
// in order to make the GenConditionDesc mapping tables easier to read, a bool expression-like
// pattern is used to encode the above:
// { EJ_jnp, GT_AND, EJ_je }
// { EJ_jp, GT_OR, EJ_jne }
//
// For more details check inst_JCC and inst_SETCC functions.
//
struct GenConditionDesc
{
emitJumpKind jumpKind1;
genTreeOps oper;
emitJumpKind jumpKind2;
char padTo4Bytes;
static const GenConditionDesc& Get(GenCondition condition)
{
assert(condition.GetCode() < ArrLen(map));
const GenConditionDesc& desc = map[condition.GetCode()];
assert(desc.jumpKind1 != EJ_NONE);
assert((desc.oper == GT_NONE) || (desc.oper == GT_AND) || (desc.oper == GT_OR));
assert((desc.oper == GT_NONE) == (desc.jumpKind2 == EJ_NONE));
return desc;
}
private:
static const GenConditionDesc map[32];
};
void inst_JCC(GenCondition condition, BasicBlock* target);
void inst_SETCC(GenCondition condition, var_types type, regNumber dstReg);
};
// A simple phase that just invokes a method on the codegen instance
//
class CodeGenPhase final : public Phase
{
public:
CodeGenPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)())
: Phase(_codeGen->GetCompiler(), _phase), codeGen(_codeGen), action(_action)
{
}
protected:
virtual PhaseStatus DoPhase() override
{
(codeGen->*action)();
return PhaseStatus::MODIFIED_EVERYTHING;
}
private:
CodeGen* codeGen;
void (CodeGen::*action)();
};
// Wrapper for using CodeGenPhase
//
inline void DoPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)())
{
CodeGenPhase phase(_codeGen, _phase, _action);
phase.Run();
}
#endif // _CODEGEN_H_
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/inc/holderinst.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __HOLDERINST_H_
#define __HOLDERINST_H_
// This file contains holder instantiations which we can't put in holder.h because
// the instantiations require _ASSERTE to be defined, which is not always the case
// for placed that include holder.h.
FORCEINLINE void SafeArrayRelease(SAFEARRAY* p)
{
SafeArrayDestroy(p);
}
class SafeArrayHolder : public Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayRelease, NULL>
{
public:
SafeArrayHolder(SAFEARRAY* p = NULL)
: Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayRelease, NULL>(p)
{
}
FORCEINLINE void operator=(SAFEARRAY* p)
{
Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayRelease, NULL>::operator=(p);
}
};
#endif // __HOLDERINST_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __HOLDERINST_H_
#define __HOLDERINST_H_
// This file contains holder instantiations which we can't put in holder.h because
// the instantiations require _ASSERTE to be defined, which is not always the case
// for placed that include holder.h.
FORCEINLINE void SafeArrayRelease(SAFEARRAY* p)
{
SafeArrayDestroy(p);
}
class SafeArrayHolder : public Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayRelease, NULL>
{
public:
SafeArrayHolder(SAFEARRAY* p = NULL)
: Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayRelease, NULL>(p)
{
}
FORCEINLINE void operator=(SAFEARRAY* p)
{
Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayRelease, NULL>::operator=(p);
}
};
#endif // __HOLDERINST_H_
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/helper.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: helper.c
**
** Purpose: Intended to be the child process of a debugger. Calls
** OutputDebugStringA once with a normal string, once with an empty
** string
**
**
**============================================================*/
#include <palsuite.h>
PALTEST(debug_api_OutputDebugStringA_test1_paltest_outputdebugstringa_test1_helper, "debug_api/OutputDebugStringA/test1/paltest_outputdebugstringa_test1_helper")
{
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
OutputDebugStringA("Foo!\n");
OutputDebugStringA("");
/* give a chance to the debugger process to read the debug string before
exiting */
Sleep(1000);
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: helper.c
**
** Purpose: Intended to be the child process of a debugger. Calls
** OutputDebugStringA once with a normal string, once with an empty
** string
**
**
**============================================================*/
#include <palsuite.h>
PALTEST(debug_api_OutputDebugStringA_test1_paltest_outputdebugstringa_test1_helper, "debug_api/OutputDebugStringA/test1/paltest_outputdebugstringa_test1_helper")
{
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
OutputDebugStringA("Foo!\n");
OutputDebugStringA("");
/* give a chance to the debugger process to read the debug string before
exiting */
Sleep(1000);
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/inc/corexcep.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*********************************************************************
** **
** CorExcep.h - lists the exception codes used by the CLR. **
** **
*********************************************************************/
#ifndef __COREXCEP_H__
#define __COREXCEP_H__
// All COM+ exceptions are expressed as a RaiseException with this exception
// code. If you change this value, you must also change
// Exception.cs's _COMPlusExceptionCode value.
#define EXCEPTION_MSVC 0xe06d7363 // 0xe0000000 | 'msc'
#define EXCEPTION_COMPLUS 0xe0434352 // 0xe0000000 | 'CCR'
#define EXCEPTION_HIJACK 0xe0434f4e // 0xe0000000 | 'COM'+1
#if defined(_DEBUG)
#define EXCEPTION_INTERNAL_ASSERT 0xe0584d4e // 0xe0000000 | 'XMN'
// An internal Assert will raise this exception when the config
// value "RaiseExceptionOnAssert" si specified. This is used in
// stress to facilitate failure triaging.
#endif
// This is the exception code to report SetupThread failure to caller of reverse pinvoke
// It is misleading to use our COM+ exception code, since this is not a managed exception.
// In the end, we picked e0455858 (EXX).
#define EXCEPTION_EXX 0xe0455858 // 0xe0000000 | 'EXX'
#endif // __COREXCEP_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*********************************************************************
** **
** CorExcep.h - lists the exception codes used by the CLR. **
** **
*********************************************************************/
#ifndef __COREXCEP_H__
#define __COREXCEP_H__
// All COM+ exceptions are expressed as a RaiseException with this exception
// code. If you change this value, you must also change
// Exception.cs's _COMPlusExceptionCode value.
#define EXCEPTION_MSVC 0xe06d7363 // 0xe0000000 | 'msc'
#define EXCEPTION_COMPLUS 0xe0434352 // 0xe0000000 | 'CCR'
#define EXCEPTION_HIJACK 0xe0434f4e // 0xe0000000 | 'COM'+1
#if defined(_DEBUG)
#define EXCEPTION_INTERNAL_ASSERT 0xe0584d4e // 0xe0000000 | 'XMN'
// An internal Assert will raise this exception when the config
// value "RaiseExceptionOnAssert" si specified. This is used in
// stress to facilitate failure triaging.
#endif
// This is the exception code to report SetupThread failure to caller of reverse pinvoke
// It is misleading to use our COM+ exception code, since this is not a managed exception.
// In the end, we picked e0455858 (EXX).
#define EXCEPTION_EXX 0xe0455858 // 0xe0000000 | 'EXX'
#endif // __COREXCEP_H__
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/debug/inc/common.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef DEBUGGER_COMMON_H
#define DEBUGGER_COMMON_H
//
// Conversions between pointers and CORDB_ADDRESS
// These are 3gb safe - we use zero-extension for CORDB_ADDRESS.
// Note that this is a different semantics from CLRDATA_ADDRESS which is sign-extended.
//
// @dbgtodo : This confuses the host and target address spaces. Ideally we'd have
// conversions between PTR types (eg. DPTR) and CORDB_ADDRESS, and not need conversions
// from host pointer types to CORDB_ADDRESS.
//
#if defined(TARGET_X86) || defined(TARGET_ARM)
inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(const void* ptr)
{
SUPPORTS_DAC;
// Cast a void* to a ULONG is not 64-bit safe and triggers compiler warning C3411.
// But this is x86 only, so we know it's ok. Use PtrToUlong to do the conversion
// without invoking the error.
return (CORDB_ADDRESS)(PtrToUlong(ptr));
}
inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(UINT_PTR ptr)
{
SUPPORTS_DAC;
// PtrToUlong
return (CORDB_ADDRESS)(ULONG)(ptr);
}
#else
#define PTR_TO_CORDB_ADDRESS(_ptr) (CORDB_ADDRESS)(ULONG_PTR)(_ptr)
#endif //TARGET_X86 || TARGET_ARM
#define CORDB_ADDRESS_TO_PTR(_cordb_addr) ((LPVOID)(SIZE_T)(_cordb_addr))
// Determine if an exception record is for a CLR debug event, and get the payload.
CORDB_ADDRESS IsEventDebuggerNotification(const EXCEPTION_RECORD * pRecord, CORDB_ADDRESS pClrBaseAddress);
#if defined(FEATURE_DBGIPC_TRANSPORT_DI) || defined(FEATURE_DBGIPC_TRANSPORT_VM)
struct DebuggerIPCEvent;
void InitEventForDebuggerNotification(DEBUG_EVENT * pDebugEvent,
CORDB_ADDRESS pClrBaseAddress,
DebuggerIPCEvent * pIPCEvent);
#endif // (FEATURE_DBGIPC_TRANSPORT_DI || FEATURE_DBGIPC_TRANSPORT_VM)
void GetPidDecoratedName(_Out_writes_z_(cBufSizeInChars) WCHAR * pBuf,
int cBufSizeInChars,
const WCHAR * pPrefix,
DWORD pid);
//
// This macro is used in CORDbgCopyThreadContext().
//
// CORDbgCopyThreadContext() does an intelligent copy
// from pSrc to pDst, respecting the ContextFlags of both contexts.
//
#define CopyContextChunk(_t, _f, _end, _flag) \
{ \
LOG((LF_CORDB, LL_INFO1000000, \
"CP::CTC: copying " #_flag ":" FMT_ADDR "<---" FMT_ADDR "(%d)\n", \
DBG_ADDR(_t), DBG_ADDR(_f), ((UINT_PTR)(_end) - (UINT_PTR)_t))); \
memcpy((_t), (_f), ((UINT_PTR)(_end) - (UINT_PTR)(_t))); \
}
//
// CORDbgCopyThreadContext() does an intelligent copy from pSrc to pDst,
// respecting the ContextFlags of both contexts.
//
struct DebuggerREGDISPLAY;
extern void CORDbgCopyThreadContext(DT_CONTEXT* pDst, const DT_CONTEXT* pSrc);
extern void CORDbgSetDebuggerREGDISPLAYFromContext(DebuggerREGDISPLAY *pDRD,
DT_CONTEXT* pContext);
//---------------------------------------------------------------------------------------
//
// Return the size of the CONTEXT required for the specified context flags.
//
// Arguments:
// flags - this is the equivalent of the ContextFlags field of a CONTEXT
//
// Return Value:
// size of the CONTEXT required
//
// Notes:
// On WIN64 platforms this function will always return sizeof(CONTEXT).
//
inline
ULONG32 ContextSizeForFlags(ULONG32 flags)
{
#if defined(CONTEXT_EXTENDED_REGISTERS) && defined(TARGET_X86)
// Older platforms didn't have extended registers in
// the context definition so only enforce that size
// if the extended register flag is set.
if ((flags & CONTEXT_EXTENDED_REGISTERS) != CONTEXT_EXTENDED_REGISTERS)
{
return offsetof(T_CONTEXT, ExtendedRegisters);
}
else
#endif // TARGET_X86
{
return sizeof(T_CONTEXT);
}
}
//---------------------------------------------------------------------------------------
//
// Given the size of a buffer and the context flags, check whether the buffer is sufficient large
// to hold the CONTEXT.
//
// Arguments:
// size - size of a buffer
// flags - this is the equivalent of the ContextFlags field of a CONTEXT
//
// Return Value:
// TRUE if the buffer is large enough to hold the CONTEXT
//
inline
BOOL CheckContextSizeForFlags(ULONG32 size, ULONG32 flags)
{
return (size >= ContextSizeForFlags(flags));
}
//---------------------------------------------------------------------------------------
//
// Given the size of a buffer and the BYTE array representation of a CONTEXT,
// check whether the buffer is sufficient large to hold the CONTEXT.
//
// Arguments:
// size - size of a buffer
// flags - this is the equivalent of the ContextFlags field of a CONTEXT
//
// Return Value:
// TRUE if the buffer is large enough to hold the CONTEXT
//
inline
BOOL CheckContextSizeForBuffer(ULONG32 size, const BYTE * pbBuffer)
{
return ( ( size >= (offsetof(T_CONTEXT, ContextFlags) + sizeof(ULONG32)) ) &&
CheckContextSizeForFlags(size, (reinterpret_cast<const T_CONTEXT *>(pbBuffer))->ContextFlags) );
}
/* ------------------------------------------------------------------------- *
* Constant declarations
* ------------------------------------------------------------------------- */
enum
{
NULL_THREAD_ID = -1,
NULL_PROCESS_ID = -1
};
/* ------------------------------------------------------------------------- *
* Macros
* ------------------------------------------------------------------------- */
//
// CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various
// reasons (see http://winweb/wincet/bannedapis.htm).
//
#define VALIDATE_POINTER_TO_OBJECT(ptr, type) \
if ((ptr) == NULL) \
{ \
return E_INVALIDARG; \
}
#define VALIDATE_POINTER_TO_OBJECT_OR_NULL(ptr, type)
//
// CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various
// reasons (see http://winweb/wincet/bannedapis.htm).
//
#define VALIDATE_POINTER_TO_OBJECT_ARRAY(ptr, type, cElt, fRead, fWrite) \
if ((ptr) == NULL) \
{ \
return E_INVALIDARG; \
}
#define VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(ptr, type,cElt,fRead,fWrite)
/* ------------------------------------------------------------------------- *
* Function Prototypes
* ------------------------------------------------------------------------- */
// Linear search through an array of NativeVarInfos, to find
// the variable of index dwIndex, valid at the given ip.
//
// returns CORDBG_E_IL_VAR_NOT_AVAILABLE if the variable isn't
// valid at the given ip.
//
// This should be inlined
HRESULT FindNativeInfoInILVariableArray(DWORD dwIndex,
SIZE_T ip,
ICorDebugInfo::NativeVarInfo **ppNativeInfo,
unsigned int nativeInfoCount,
ICorDebugInfo::NativeVarInfo *nativeInfo);
// struct DebuggerILToNativeMap: Holds the IL to Native offset map
// Great pains are taken to ensure that this each entry corresponds to the
// first IL instruction in a source line. It isn't actually a mapping
// of _every_ IL instruction in a method, just those for source lines.
// SIZE_T ilOffset: IL offset of a source line.
// SIZE_T nativeStartOffset: Offset within the method where the native
// instructions corresponding to the IL offset begin.
// SIZE_T nativeEndOffset: Offset within the method where the native
// instructions corresponding to the IL offset end.
//
// Note: any changes to this struct need to be reflected in
// COR_DEBUG_IL_TO_NATIVE_MAP in CorDebug.idl. These structs must
// match exactly.
//
struct DebuggerILToNativeMap
{
ULONG ilOffset;
ULONG nativeStartOffset;
ULONG nativeEndOffset;
ICorDebugInfo::SourceTypes source;
};
void ExportILToNativeMap(ULONG32 cMap,
COR_DEBUG_IL_TO_NATIVE_MAP mapExt[],
struct DebuggerILToNativeMap mapInt[],
SIZE_T sizeOfCode);
#include <primitives.h>
// ----------------------------------------------------------------------------
// IsPatchInRequestedRange
//
// Description:
// This function checks if a patch falls (fully or partially) in the requested range of memory.
//
// Arguments:
// * requestedAddr - the address of the memory range
// * requestedSize - the size of the memory range
// * patchAddr - the address of the patch
// * pPRD - the opcode of the patch
//
// Return Value:
// Return TRUE if the patch is fully or partially in the requested memory range.
//
// Notes:
// Currently this function is called both from the RS (via code:CordbProcess.ReadMemory and
// code:CordbProcess.WriteMemory) and from DAC. When we DACize the two functions mentioned above,
// this function should be called from DAC only, and we should use a MemoryRange here.
//
inline bool IsPatchInRequestedRange(CORDB_ADDRESS requestedAddr,
SIZE_T requestedSize,
CORDB_ADDRESS patchAddr)
{
SUPPORTS_DAC;
if (requestedAddr == 0)
return false;
// Note that patchEnd points to the byte immediately AFTER the patch, so patchEnd is NOT
// part of the patch.
CORDB_ADDRESS patchEnd = GetPatchEndAddr(patchAddr);
// We have three cases:
// 1) the entire patch is in the requested range
// 2) the beginning of the requested range is covered by the patch
// 3) the end of the requested range is covered by the patch
//
// Note that on x86, since the break instruction only takes up one byte, the following condition
// degenerates to case 1 only.
return (((requestedAddr <= patchAddr) && (patchEnd <= (requestedAddr + requestedSize))) ||
((patchAddr <= requestedAddr) && (requestedAddr < patchEnd)) ||
((patchAddr <= (requestedAddr + requestedSize - 1)) && ((requestedAddr + requestedSize - 1) < patchEnd)));
}
inline CORDB_ADDRESS ALIGN_ADDRESS( CORDB_ADDRESS val, CORDB_ADDRESS alignment )
{
LIMITED_METHOD_DAC_CONTRACT;
// alignment must be a power of 2 for this implementation to work (need modulo otherwise)
_ASSERTE( 0 == (alignment & (alignment - 1)) );
CORDB_ADDRESS result = (val + (alignment - 1)) & ~(alignment - 1);
_ASSERTE( result >= val ); // check for overflow
return result;
}
#include "dacprivate.h" // for MSLAYOUT
#include "dumpcommon.h"
#endif //DEBUGGER_COMMON_H
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef DEBUGGER_COMMON_H
#define DEBUGGER_COMMON_H
//
// Conversions between pointers and CORDB_ADDRESS
// These are 3gb safe - we use zero-extension for CORDB_ADDRESS.
// Note that this is a different semantics from CLRDATA_ADDRESS which is sign-extended.
//
// @dbgtodo : This confuses the host and target address spaces. Ideally we'd have
// conversions between PTR types (eg. DPTR) and CORDB_ADDRESS, and not need conversions
// from host pointer types to CORDB_ADDRESS.
//
#if defined(TARGET_X86) || defined(TARGET_ARM)
inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(const void* ptr)
{
SUPPORTS_DAC;
// Cast a void* to a ULONG is not 64-bit safe and triggers compiler warning C3411.
// But this is x86 only, so we know it's ok. Use PtrToUlong to do the conversion
// without invoking the error.
return (CORDB_ADDRESS)(PtrToUlong(ptr));
}
inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(UINT_PTR ptr)
{
SUPPORTS_DAC;
// PtrToUlong
return (CORDB_ADDRESS)(ULONG)(ptr);
}
#else
#define PTR_TO_CORDB_ADDRESS(_ptr) (CORDB_ADDRESS)(ULONG_PTR)(_ptr)
#endif //TARGET_X86 || TARGET_ARM
#define CORDB_ADDRESS_TO_PTR(_cordb_addr) ((LPVOID)(SIZE_T)(_cordb_addr))
// Determine if an exception record is for a CLR debug event, and get the payload.
CORDB_ADDRESS IsEventDebuggerNotification(const EXCEPTION_RECORD * pRecord, CORDB_ADDRESS pClrBaseAddress);
#if defined(FEATURE_DBGIPC_TRANSPORT_DI) || defined(FEATURE_DBGIPC_TRANSPORT_VM)
struct DebuggerIPCEvent;
void InitEventForDebuggerNotification(DEBUG_EVENT * pDebugEvent,
CORDB_ADDRESS pClrBaseAddress,
DebuggerIPCEvent * pIPCEvent);
#endif // (FEATURE_DBGIPC_TRANSPORT_DI || FEATURE_DBGIPC_TRANSPORT_VM)
void GetPidDecoratedName(_Out_writes_z_(cBufSizeInChars) WCHAR * pBuf,
int cBufSizeInChars,
const WCHAR * pPrefix,
DWORD pid);
//
// This macro is used in CORDbgCopyThreadContext().
//
// CORDbgCopyThreadContext() does an intelligent copy
// from pSrc to pDst, respecting the ContextFlags of both contexts.
//
#define CopyContextChunk(_t, _f, _end, _flag) \
{ \
LOG((LF_CORDB, LL_INFO1000000, \
"CP::CTC: copying " #_flag ":" FMT_ADDR "<---" FMT_ADDR "(%d)\n", \
DBG_ADDR(_t), DBG_ADDR(_f), ((UINT_PTR)(_end) - (UINT_PTR)_t))); \
memcpy((_t), (_f), ((UINT_PTR)(_end) - (UINT_PTR)(_t))); \
}
//
// CORDbgCopyThreadContext() does an intelligent copy from pSrc to pDst,
// respecting the ContextFlags of both contexts.
//
struct DebuggerREGDISPLAY;
extern void CORDbgCopyThreadContext(DT_CONTEXT* pDst, const DT_CONTEXT* pSrc);
extern void CORDbgSetDebuggerREGDISPLAYFromContext(DebuggerREGDISPLAY *pDRD,
DT_CONTEXT* pContext);
//---------------------------------------------------------------------------------------
//
// Return the size of the CONTEXT required for the specified context flags.
//
// Arguments:
// flags - this is the equivalent of the ContextFlags field of a CONTEXT
//
// Return Value:
// size of the CONTEXT required
//
// Notes:
// On WIN64 platforms this function will always return sizeof(CONTEXT).
//
inline
ULONG32 ContextSizeForFlags(ULONG32 flags)
{
#if defined(CONTEXT_EXTENDED_REGISTERS) && defined(TARGET_X86)
// Older platforms didn't have extended registers in
// the context definition so only enforce that size
// if the extended register flag is set.
if ((flags & CONTEXT_EXTENDED_REGISTERS) != CONTEXT_EXTENDED_REGISTERS)
{
return offsetof(T_CONTEXT, ExtendedRegisters);
}
else
#endif // TARGET_X86
{
return sizeof(T_CONTEXT);
}
}
//---------------------------------------------------------------------------------------
//
// Given the size of a buffer and the context flags, check whether the buffer is sufficient large
// to hold the CONTEXT.
//
// Arguments:
// size - size of a buffer
// flags - this is the equivalent of the ContextFlags field of a CONTEXT
//
// Return Value:
// TRUE if the buffer is large enough to hold the CONTEXT
//
inline
BOOL CheckContextSizeForFlags(ULONG32 size, ULONG32 flags)
{
return (size >= ContextSizeForFlags(flags));
}
//---------------------------------------------------------------------------------------
//
// Given the size of a buffer and the BYTE array representation of a CONTEXT,
// check whether the buffer is sufficient large to hold the CONTEXT.
//
// Arguments:
// size - size of a buffer
// flags - this is the equivalent of the ContextFlags field of a CONTEXT
//
// Return Value:
// TRUE if the buffer is large enough to hold the CONTEXT
//
inline
BOOL CheckContextSizeForBuffer(ULONG32 size, const BYTE * pbBuffer)
{
return ( ( size >= (offsetof(T_CONTEXT, ContextFlags) + sizeof(ULONG32)) ) &&
CheckContextSizeForFlags(size, (reinterpret_cast<const T_CONTEXT *>(pbBuffer))->ContextFlags) );
}
/* ------------------------------------------------------------------------- *
* Constant declarations
* ------------------------------------------------------------------------- */
enum
{
NULL_THREAD_ID = -1,
NULL_PROCESS_ID = -1
};
/* ------------------------------------------------------------------------- *
* Macros
* ------------------------------------------------------------------------- */
//
// CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various
// reasons (see http://winweb/wincet/bannedapis.htm).
//
#define VALIDATE_POINTER_TO_OBJECT(ptr, type) \
if ((ptr) == NULL) \
{ \
return E_INVALIDARG; \
}
#define VALIDATE_POINTER_TO_OBJECT_OR_NULL(ptr, type)
//
// CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various
// reasons (see http://winweb/wincet/bannedapis.htm).
//
#define VALIDATE_POINTER_TO_OBJECT_ARRAY(ptr, type, cElt, fRead, fWrite) \
if ((ptr) == NULL) \
{ \
return E_INVALIDARG; \
}
#define VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(ptr, type,cElt,fRead,fWrite)
/* ------------------------------------------------------------------------- *
* Function Prototypes
* ------------------------------------------------------------------------- */
// Linear search through an array of NativeVarInfos, to find
// the variable of index dwIndex, valid at the given ip.
//
// returns CORDBG_E_IL_VAR_NOT_AVAILABLE if the variable isn't
// valid at the given ip.
//
// This should be inlined
HRESULT FindNativeInfoInILVariableArray(DWORD dwIndex,
SIZE_T ip,
ICorDebugInfo::NativeVarInfo **ppNativeInfo,
unsigned int nativeInfoCount,
ICorDebugInfo::NativeVarInfo *nativeInfo);
// struct DebuggerILToNativeMap: Holds the IL to Native offset map
// Great pains are taken to ensure that this each entry corresponds to the
// first IL instruction in a source line. It isn't actually a mapping
// of _every_ IL instruction in a method, just those for source lines.
// SIZE_T ilOffset: IL offset of a source line.
// SIZE_T nativeStartOffset: Offset within the method where the native
// instructions corresponding to the IL offset begin.
// SIZE_T nativeEndOffset: Offset within the method where the native
// instructions corresponding to the IL offset end.
//
// Note: any changes to this struct need to be reflected in
// COR_DEBUG_IL_TO_NATIVE_MAP in CorDebug.idl. These structs must
// match exactly.
//
struct DebuggerILToNativeMap
{
ULONG ilOffset;
ULONG nativeStartOffset;
ULONG nativeEndOffset;
ICorDebugInfo::SourceTypes source;
};
void ExportILToNativeMap(ULONG32 cMap,
COR_DEBUG_IL_TO_NATIVE_MAP mapExt[],
struct DebuggerILToNativeMap mapInt[],
SIZE_T sizeOfCode);
#include <primitives.h>
// ----------------------------------------------------------------------------
// IsPatchInRequestedRange
//
// Description:
// This function checks if a patch falls (fully or partially) in the requested range of memory.
//
// Arguments:
// * requestedAddr - the address of the memory range
// * requestedSize - the size of the memory range
// * patchAddr - the address of the patch
// * pPRD - the opcode of the patch
//
// Return Value:
// Return TRUE if the patch is fully or partially in the requested memory range.
//
// Notes:
// Currently this function is called both from the RS (via code:CordbProcess.ReadMemory and
// code:CordbProcess.WriteMemory) and from DAC. When we DACize the two functions mentioned above,
// this function should be called from DAC only, and we should use a MemoryRange here.
//
inline bool IsPatchInRequestedRange(CORDB_ADDRESS requestedAddr,
SIZE_T requestedSize,
CORDB_ADDRESS patchAddr)
{
SUPPORTS_DAC;
if (requestedAddr == 0)
return false;
// Note that patchEnd points to the byte immediately AFTER the patch, so patchEnd is NOT
// part of the patch.
CORDB_ADDRESS patchEnd = GetPatchEndAddr(patchAddr);
// We have three cases:
// 1) the entire patch is in the requested range
// 2) the beginning of the requested range is covered by the patch
// 3) the end of the requested range is covered by the patch
//
// Note that on x86, since the break instruction only takes up one byte, the following condition
// degenerates to case 1 only.
return (((requestedAddr <= patchAddr) && (patchEnd <= (requestedAddr + requestedSize))) ||
((patchAddr <= requestedAddr) && (requestedAddr < patchEnd)) ||
((patchAddr <= (requestedAddr + requestedSize - 1)) && ((requestedAddr + requestedSize - 1) < patchEnd)));
}
inline CORDB_ADDRESS ALIGN_ADDRESS( CORDB_ADDRESS val, CORDB_ADDRESS alignment )
{
LIMITED_METHOD_DAC_CONTRACT;
// alignment must be a power of 2 for this implementation to work (need modulo otherwise)
_ASSERTE( 0 == (alignment & (alignment - 1)) );
CORDB_ADDRESS result = (val + (alignment - 1)) & ~(alignment - 1);
_ASSERTE( result >= val ); // check for overflow
return result;
}
#include "dacprivate.h" // for MSLAYOUT
#include "dumpcommon.h"
#endif //DEBUGGER_COMMON_H
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/mono/mono/utils/mono-mmap-internals.h | /**
* \file
* Internal virtual memory stuff.
*
* Copyright (C) 2014 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_UTILS_MMAP_INTERNAL_H__
#define __MONO_UTILS_MMAP_INTERNAL_H__
#include "mono-compiler.h"
void *
mono_malloc_shared_area (int pid);
char*
mono_aligned_address (char *mem, size_t size, size_t alignment);
void
mono_account_mem (MonoMemAccountType type, ssize_t size);
gboolean
mono_valloc_can_alloc (size_t size);
void
mono_valloc_set_limit (size_t size);
#endif /* __MONO_UTILS_MMAP_INTERNAL_H__ */
| /**
* \file
* Internal virtual memory stuff.
*
* Copyright (C) 2014 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_UTILS_MMAP_INTERNAL_H__
#define __MONO_UTILS_MMAP_INTERNAL_H__
#include "mono-compiler.h"
void *
mono_malloc_shared_area (int pid);
char*
mono_aligned_address (char *mem, size_t size, size_t alignment);
void
mono_account_mem (MonoMemAccountType type, ssize_t size);
gboolean
mono_valloc_can_alloc (size_t size);
void
mono_valloc_set_limit (size_t size);
#endif /* __MONO_UTILS_MMAP_INTERNAL_H__ */
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/filemapping_memmgt/GetModuleFileNameA/test2/GetModuleFileNameA.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: getmodulefilenamea.c
**
** Purpose: Positive test the GetModuleFileNameA API.
** Call GetModuleFileName to retrieve current process
** full path and file name by passing a NULL module handle
**
**
**============================================================*/
#include <palsuite.h>
#define MODULENAMEBUFFERSIZE 1024
PALTEST(filemapping_memmgt_GetModuleFileNameA_test2_paltest_getmodulefilenamea_test2, "filemapping_memmgt/GetModuleFileNameA/test2/paltest_getmodulefilenamea_test2")
{
DWORD ModuleNameLength;
char ModuleFileNameBuf[MODULENAMEBUFFERSIZE]="";
int err;
//Initialize the PAL environment
err = PAL_Initialize(argc, argv);
if(0 != err)
{
ExitProcess(FAIL);
}
//retrieve the current process full path and file name
//by passing a NULL module handle
ModuleNameLength = GetModuleFileName(
NULL, //a NULL handle
ModuleFileNameBuf,//buffer for module file name
MODULENAMEBUFFERSIZE);
if(0 == ModuleNameLength)
{
Fail("\nFailed to all GetModuleFileName API!\n");
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: getmodulefilenamea.c
**
** Purpose: Positive test the GetModuleFileNameA API.
** Call GetModuleFileName to retrieve current process
** full path and file name by passing a NULL module handle
**
**
**============================================================*/
#include <palsuite.h>
#define MODULENAMEBUFFERSIZE 1024
PALTEST(filemapping_memmgt_GetModuleFileNameA_test2_paltest_getmodulefilenamea_test2, "filemapping_memmgt/GetModuleFileNameA/test2/paltest_getmodulefilenamea_test2")
{
DWORD ModuleNameLength;
char ModuleFileNameBuf[MODULENAMEBUFFERSIZE]="";
int err;
//Initialize the PAL environment
err = PAL_Initialize(argc, argv);
if(0 != err)
{
ExitProcess(FAIL);
}
//retrieve the current process full path and file name
//by passing a NULL module handle
ModuleNameLength = GetModuleFileName(
NULL, //a NULL handle
ModuleFileNameBuf,//buffer for module file name
MODULENAMEBUFFERSIZE);
if(0 == ModuleNameLength)
{
Fail("\nFailed to all GetModuleFileName API!\n");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/tools/metainfo/mdinfo.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <windows.h>
#include <objbase.h>
#include <crtdbg.h>
#include <assert.h>
#include <corpriv.h>
#include <cor.h>
#include "assert.h"
#include "corerror.h"
#include <winwrap.h>
#include <prettyprintsig.h>
#include <cahlpr.h>
#include <limits.h>
#include "mdinfo.h"
#define ENUM_BUFFER_SIZE 10
#define TAB_SIZE 8
#define ISFLAG(p,x) if (Is##p##x(flags)) strcat_s(sFlags,STRING_BUFFER_LEN, "["#x "] ");
extern HRESULT _FillVariant(
BYTE bCPlusTypeFlag,
void const *pValue,
ULONG cbValue,
VARIANT *pvar);
// Validator declarations.
extern DWORD g_ValModuleType;
// Tables for mapping element type to text
const char *g_szMapElementType[] =
{
"End", // 0x0
"Void", // 0x1
"Boolean",
"Char",
"I1",
"UI1",
"I2", // 0x6
"UI2",
"I4",
"UI4",
"I8",
"UI8",
"R4",
"R8",
"String",
"Ptr", // 0xf
"ByRef", // 0x10
"ValueClass",
"Class",
"Var",
"MDArray", // 0x14
"GenericInst",
"TypedByRef",
"VALUEARRAY",
"I",
"U",
"R", // 0x1a
"FNPTR",
"Object",
"SZArray",
"MVar",
"CMOD_REQD",
"CMOD_OPT",
"INTERNAL",
};
const char *g_szMapUndecorateType[] =
{
"", // 0x0
"void",
"boolean",
"Char",
"byte",
"unsigned byte",
"short",
"unsigned short",
"int",
"unsigned int",
"long",
"unsigned long",
"float",
"double",
"String",
"*", // 0xf
"ByRef",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Function Pointer",
"Object",
"",
"",
"CMOD_REQD",
"CMOD_OPT",
"INTERNAL",
};
// Provide enough entries for IMAGE_CEE_CS_CALLCONV_MASK (defined in CorHdr.h)
const char *g_strCalling[] =
{
"[DEFAULT]",
"[C]",
"[STDCALL]",
"[THISCALL]",
"[FASTCALL]",
"[VARARG]",
"[FIELD]",
"[LOCALSIG]",
"[PROPERTY]",
"[UNMANAGED]",
"[GENERICINST]",
"[NATIVEVARARG]",
"[INVALID]",
"[INVALID]",
"[INVALID]",
"[INVALID]"
};
const char *g_szNativeType[] =
{
"NATIVE_TYPE_END(DEPRECATED!)", // = 0x0, //DEPRECATED
"NATIVE_TYPE_VOID(DEPRECATED!)", // = 0x1, //DEPRECATED
"NATIVE_TYPE_BOOLEAN", // = 0x2, // (4 byte boolean value: TRUE = non-zero, FALSE = 0)
"NATIVE_TYPE_I1", // = 0x3,
"NATIVE_TYPE_U1", // = 0x4,
"NATIVE_TYPE_I2", // = 0x5,
"NATIVE_TYPE_U2", // = 0x6,
"NATIVE_TYPE_I4", // = 0x7,
"NATIVE_TYPE_U4", // = 0x8,
"NATIVE_TYPE_I8", // = 0x9,
"NATIVE_TYPE_U8", // = 0xa,
"NATIVE_TYPE_R4", // = 0xb,
"NATIVE_TYPE_R8", // = 0xc,
"NATIVE_TYPE_SYSCHAR(DEPRECATED!)", // = 0xd, //DEPRECATED
"NATIVE_TYPE_VARIANT(DEPRECATED!)", // = 0xe, //DEPRECATED
"NATIVE_TYPE_CURRENCY", // = 0xf,
"NATIVE_TYPE_PTR(DEPRECATED!)", // = 0x10, //DEPRECATED
"NATIVE_TYPE_DECIMAL(DEPRECATED!)", // = 0x11, //DEPRECATED
"NATIVE_TYPE_DATE(DEPRECATED!)", // = 0x12, //DEPRECATED
"NATIVE_TYPE_BSTR", // = 0x13,
"NATIVE_TYPE_LPSTR", // = 0x14,
"NATIVE_TYPE_LPWSTR", // = 0x15,
"NATIVE_TYPE_LPTSTR", // = 0x16,
"NATIVE_TYPE_FIXEDSYSSTRING", // = 0x17,
"NATIVE_TYPE_OBJECTREF(DEPRECATED!)", // = 0x18, //DEPRECATED
"NATIVE_TYPE_IUNKNOWN", // = 0x19,
"NATIVE_TYPE_IDISPATCH", // = 0x1a,
"NATIVE_TYPE_STRUCT", // = 0x1b,
"NATIVE_TYPE_INTF", // = 0x1c,
"NATIVE_TYPE_SAFEARRAY", // = 0x1d,
"NATIVE_TYPE_FIXEDARRAY", // = 0x1e,
"NATIVE_TYPE_INT", // = 0x1f,
"NATIVE_TYPE_UINT", // = 0x20,
"NATIVE_TYPE_NESTEDSTRUCT(DEPRECATED!)", // = 0x21, //DEPRECATED (use "NATIVE_TYPE_STRUCT)
"NATIVE_TYPE_BYVALSTR", // = 0x22,
"NATIVE_TYPE_ANSIBSTR", // = 0x23,
"NATIVE_TYPE_TBSTR", // = 0x24, // select BSTR or ANSIBSTR depending on platform
"NATIVE_TYPE_VARIANTBOOL", // = 0x25, // (2-byte boolean value: TRUE = -1, FALSE = 0)
"NATIVE_TYPE_FUNC", // = 0x26,
"NATIVE_TYPE_LPVOID", // = 0x27, // blind pointer (no deep marshaling)
"NATIVE_TYPE_ASANY", // = 0x28,
"<UNDEFINED NATIVE TYPE 0x29>",
"NATIVE_TYPE_ARRAY", // = 0x2a,
"NATIVE_TYPE_LPSTRUCT", // = 0x2b,
"NATIVE_TYPE_CUSTOMMARSHALER", // = 0x2c, // Custom marshaler.
"NATIVE_TYPE_ERROR", // = 0x2d, // VT_HRESULT when exporting to a typelib.
};
size_t g_cbCoffNames = 0;
mdMethodDef g_tkEntryPoint = 0; // integration with ILDASM
// helper to init signature buffer
void MDInfo::InitSigBuffer()
{
strcpy_s((LPSTR)m_sigBuf.Ptr(), 1, "");
} // void MDInfo::InitSigBuffer()
// helper to append a string into the signature buffer. If size of signature buffer is not big enough,
// we will grow it.
HRESULT MDInfo::AddToSigBuffer(_In_z_ const char *string)
{
HRESULT hr;
size_t LL = strlen((LPSTR)m_sigBuf.Ptr()) + strlen(string) + 1;
IfFailRet( m_sigBuf.ReSizeNoThrow(LL) );
strcat_s((LPSTR)m_sigBuf.Ptr(), LL, string);
return NOERROR;
} // HRESULT MDInfo::AddToSigBuffer()
MDInfo::MDInfo(IMetaDataImport2 *pImport, IMetaDataAssemblyImport *pAssemblyImport, LPCWSTR szScope, strPassBackFn inPBFn, ULONG DumpFilter)
{ // This constructor is specific to ILDASM/MetaInfo integration
_ASSERTE(pImport != NULL);
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType));
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX);
Init(inPBFn, (DUMP_FILTER)DumpFilter);
m_pImport = pImport;
m_pImport->AddRef();
if ((m_pAssemblyImport = pAssemblyImport))
m_pAssemblyImport->AddRef();
else
{
HRESULT hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport);
if (FAILED(hr))
Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr);
}
} // MDInfo::MDInfo()
MDInfo::MDInfo(IMetaDataDispenserEx *pDispenser, LPCWSTR szScope, strPassBackFn inPBFn, ULONG DumpFilter)
{
HRESULT hr = S_OK;
VARIANT value;
_ASSERTE(pDispenser != NULL && inPBFn != NULL);
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType));
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX);
Init(inPBFn, (DUMP_FILTER)DumpFilter);
// Attempt to open scope on given file
V_VT(&value) = VT_UI4;
V_UI4(&value) = MDImportOptionAll;
if (FAILED(hr = pDispenser->SetOption(MetaDataImportOption, &value)))
Error("SetOption failed.", hr);
hr = pDispenser->OpenScope(szScope, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport);
if (hr == CLDB_E_BADUPDATEMODE)
{
V_VT(&value) = VT_UI4;
V_UI4(&value) = MDUpdateIncremental;
if (FAILED(hr = pDispenser->SetOption(MetaDataSetUpdate, &value)))
Error("SetOption failed.", hr);
hr = pDispenser->OpenScope(szScope, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport);
}
if (FAILED(hr))
Error("OpenScope failed", hr);
// Query for the IMetaDataAssemblyImport interface.
hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport);
if (FAILED(hr))
Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr);
} // MDInfo::MDInfo()
MDInfo::MDInfo(IMetaDataDispenserEx *pDispenser, PBYTE pbMetaData, DWORD dwSize, strPassBackFn inPBFn, ULONG DumpFilter)
{
_ASSERTE(pDispenser != NULL && inPBFn != NULL);
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType));
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX);
Init(inPBFn, (DUMP_FILTER)DumpFilter);
// Attempt to open scope on manifest. It's valid for this to fail, because
// the blob we open may just be the assembly resources (the space is
// overloaded until we remove LM -a assemblies, at which point this
// constructor should probably be removed too).
HRESULT hr;
VARIANT value;
V_VT(&value) = VT_UI4;
V_UI4(&value) = MDImportOptionAll;
if (FAILED(hr = pDispenser->SetOption(MetaDataImportOption, &value)))
Error("SetOption failed.", hr);
if (SUCCEEDED(hr = pDispenser->OpenScopeOnMemory(pbMetaData, dwSize, ofNoTransform,
IID_IMetaDataImport2, (IUnknown**)&m_pImport)))
{
// Query for the IMetaDataAssemblyImport interface.
hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport);
if (FAILED(hr))
Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr);
}
} // MDInfo::MDInfo()
void MDInfo::Init(
strPassBackFn inPBFn, // Callback to write text.
DUMP_FILTER DumpFilter) // Flags to control the dump.
{
m_pbFn = inPBFn;
m_DumpFilter = DumpFilter;
m_pTables = NULL;
m_pTables2 = NULL;
m_pImport = NULL;
m_pAssemblyImport = NULL;
} // void MDInfo::Init()
// Destructor
MDInfo::~MDInfo()
{
if (m_pImport)
m_pImport->Release();
if (m_pAssemblyImport)
m_pAssemblyImport->Release();
if (m_pTables)
m_pTables->Release();
if (m_pTables2)
m_pTables2->Release();
} // MDInfo::~MDInfo()
//=====================================================================================================================
// DisplayMD() function
//
// Displays the meta data content of a file
void MDInfo::DisplayMD()
{
if ((m_DumpFilter & dumpAssem) && m_pAssemblyImport)
DisplayAssemblyInfo();
WriteLine("===========================================================");
// Metadata itself: Raw or normal view
if (m_DumpFilter & (dumpSchema | dumpHeader | dumpCSV | dumpRaw | dumpStats | dumpRawHeaps))
DisplayRaw();
else
{
DisplayVersionInfo();
DisplayScopeInfo();
WriteLine("===========================================================");
DisplayGlobalFunctions();
DisplayGlobalFields();
DisplayGlobalMemberRefs();
DisplayTypeDefs();
DisplayTypeRefs();
DisplayTypeSpecs();
DisplayMethodSpecs();
DisplayModuleRefs();
DisplaySignatures();
DisplayAssembly();
DisplayUserStrings();
// WriteLine("============================================================");
// WriteLine("Unresolved MemberRefs");
// DisplayMemberRefs(0x00000001, "\t");
VWrite("\n\nCoff symbol name overhead: %d\n", g_cbCoffNames);
}
WriteLine("===========================================================");
if (m_DumpFilter & dumpUnsat)
DisplayUnsatInfo();
WriteLine("===========================================================");
} // MDVEHandlerClass()
int MDInfo::WriteLine(_In_z_ const char *str)
{
ULONG32 count = (ULONG32) strlen(str);
m_pbFn(str);
m_pbFn("\n");
return count;
} // int MDInfo::WriteLine()
int MDInfo::Write(_In_z_ const char *str)
{
ULONG32 count = (ULONG32) strlen(str);
m_pbFn(str);
return count;
} // int MDInfo::Write()
int MDInfo::VWriteLine(_In_z_ const char *str, ...)
{
va_list marker;
int count;
va_start(marker, str);
count = VWriteMarker(str, marker);
m_pbFn("\n");
va_end(marker);
return count;
} // int MDInfo::VWriteLine()
int MDInfo::VWrite(_In_z_ const char *str, ...)
{
va_list marker;
int count;
va_start(marker, str);
count = VWriteMarker(str, marker);
va_end(marker);
return count;
} // int MDInfo::VWrite()
int MDInfo::VWriteMarker(_In_z_ const char *str, va_list marker)
{
HRESULT hr;
int count = -1;
// Used to allocate 1K, then if not enough, 2K, then 4K.
// Faster to allocate 32K right away and be done with it,
// we're not running on Commodore 64
if (FAILED(hr = m_output.ReSizeNoThrow(STRING_BUFFER_LEN * 8)))
Error("ReSize failed.", hr);
else
{
count = vsprintf_s((char *)m_output.Ptr(), STRING_BUFFER_LEN * 8, str, marker);
m_pbFn((char *)m_output.Ptr());
}
return count;
} // int MDInfo::VWriteToBuffer()
// Error() function -- prints an error and returns
void MDInfo::Error(const char* szError, HRESULT hr)
{
printf("\n%s\n",szError);
if (hr != S_OK)
{
printf("Failed return code: 0x%08x\n", hr);
IErrorInfo *pIErr = NULL; // Error interface.
BSTR bstrDesc = NULL; // Description text.
#ifdef FEATURE_COMINTEROP
// Try to get an error info object and display the message.
if (GetErrorInfo(0, &pIErr) == S_OK &&
pIErr->GetDescription(&bstrDesc) == S_OK)
{
printf("%ls ", bstrDesc);
SysFreeString(bstrDesc);
}
#endif
// Free the error interface.
if (pIErr)
pIErr->Release();
}
exit(hr);
} // void MDInfo::Error()
// Print out the optional version info included in the MetaData.
void MDInfo::DisplayVersionInfo()
{
if (!(m_DumpFilter & MDInfo::dumpNoLogo))
{
LPCUTF8 pVersionStr;
HRESULT hr = S_OK;
if (m_pTables == 0)
{
if (m_pImport)
hr = m_pImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables);
else if (m_pAssemblyImport)
hr = m_pAssemblyImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables);
else
return;
if (FAILED(hr))
Error("QueryInterface failed for IID_IMetaDataTables.", hr);
}
hr = m_pTables->GetString(1, &pVersionStr);
if (FAILED(hr))
Error("GetString() failed.", hr);
if (strstr(pVersionStr, "Version of runtime against which the binary is built : ")
== pVersionStr)
{
WriteLine(const_cast<char *>(pVersionStr));
}
}
} // void MDInfo::DisplayVersionInfo()
// Prints out information about the scope
void MDInfo::DisplayScopeInfo()
{
HRESULT hr;
mdModule mdm;
GUID mvid;
WCHAR scopeName[STRING_BUFFER_LEN];
WCHAR guidString[STRING_BUFFER_LEN];
hr = m_pImport->GetScopeProps( scopeName, STRING_BUFFER_LEN, 0, &mvid);
if (FAILED(hr)) Error("GetScopeProps failed.", hr);
VWriteLine("ScopeName : %ls",scopeName);
if (!(m_DumpFilter & MDInfo::dumpNoLogo))
VWriteLine("MVID : %ls",GUIDAsString(mvid, guidString, STRING_BUFFER_LEN));
hr = m_pImport->GetModuleFromScope(&mdm);
if (FAILED(hr)) Error("GetModuleFromScope failed.", hr);
DisplayPermissions(mdm, "");
DisplayCustomAttributes(mdm, "\t");
} // void MDInfo::DisplayScopeInfo()
void MDInfo::DisplayRaw()
{
int iDump; // Level of info to dump.
if (m_pTables == 0)
m_pImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables);
if (m_pTables == 0)
Error("Can't get table info.");
if (m_pTables2 == 0)
m_pImport->QueryInterface(IID_IMetaDataTables2, (void**)&m_pTables2);
if (m_DumpFilter & dumpCSV)
DumpRawCSV();
if (m_DumpFilter & (dumpSchema | dumpHeader | dumpRaw | dumpStats))
{
if (m_DumpFilter & dumpRaw)
iDump = 3;
else
if (m_DumpFilter & dumpSchema)
iDump = 2;
else
iDump = 1;
DumpRaw(iDump, (m_DumpFilter & dumpStats) != 0);
}
if (m_DumpFilter & dumpRawHeaps)
DumpRawHeaps();
} // void MDInfo::DisplayRaw()
// return the name of the type of token passed in
const char *MDInfo::TokenTypeName(mdToken inToken)
{
switch(TypeFromToken(inToken))
{
case mdtTypeDef: return "TypeDef";
case mdtInterfaceImpl: return "InterfaceImpl";
case mdtMethodDef: return "MethodDef";
case mdtFieldDef: return "FieldDef";
case mdtTypeRef: return "TypeRef";
case mdtMemberRef: return "MemberRef";
case mdtCustomAttribute:return "CustomAttribute";
case mdtParamDef: return "ParamDef";
case mdtProperty: return "Property";
case mdtEvent: return "Event";
case mdtTypeSpec: return "TypeSpec";
default: return "[UnknownTokenType]";
}
} // char *MDInfo::TokenTypeName()
// Prints out name of the given memberref
//
LPCWSTR MDInfo::MemberRefName(mdMemberRef inMemRef, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
HRESULT hr;
hr = m_pImport->GetMemberRefProps( inMemRef, NULL, buffer, bufLen,
NULL, NULL, NULL);
if (FAILED(hr)) Error("GetMemberRefProps failed.", hr);
return buffer;
} // LPCWSTR MDInfo::MemberRefName()
// Prints out information about the given memberref
//
void MDInfo::DisplayMemberRefInfo(mdMemberRef inMemRef, const char *preFix)
{
HRESULT hr;
WCHAR memRefName[STRING_BUFFER_LEN];
ULONG nameLen;
mdToken token;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
char newPreFix[STRING_BUFFER_LEN];
hr = m_pImport->GetMemberRefProps( inMemRef, &token, memRefName, STRING_BUFFER_LEN,
&nameLen, &pbSigBlob, &ulSigBlob);
if (FAILED(hr)) Error("GetMemberRefProps failed.", hr);
VWriteLine("%s\t\tMember: (%8.8x) %ls: ", preFix, inMemRef, memRefName);
if (ulSigBlob)
DisplaySignature(pbSigBlob, ulSigBlob, preFix);
else
VWriteLine("%s\t\tERROR: no valid signature ", preFix);
sprintf_s (newPreFix, STRING_BUFFER_LEN, "\t\t%s", preFix);
DisplayCustomAttributes(inMemRef, newPreFix);
} // void MDInfo::DisplayMemberRefInfo()
// Prints out information about all memberrefs of the given typeref
//
void MDInfo::DisplayMemberRefs(mdToken tkParent, const char *preFix)
{
HCORENUM memRefEnum = NULL;
HRESULT hr;
mdMemberRef memRefs[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
while (SUCCEEDED(hr = m_pImport->EnumMemberRefs( &memRefEnum, tkParent,
memRefs, ARRAY_SIZE(memRefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("%s\tMemberRef #%d (%08x)", preFix, totalCount, memRefs[i]);
VWriteLine("%s\t-------------------------------------------------------", preFix);
DisplayMemberRefInfo(memRefs[i], preFix);
}
}
m_pImport->CloseEnum( memRefEnum);
} // void MDInfo::DisplayMemberRefs()
// Prints out information about all resources in the com object
//
// Iterates through each typeref and prints out the information of each
//
void MDInfo::DisplayTypeRefs()
{
HCORENUM typeRefEnum = NULL;
mdTypeRef typeRefs[ENUM_BUFFER_SIZE];
ULONG count, totalCount=1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumTypeRefs( &typeRefEnum,
typeRefs, ARRAY_SIZE(typeRefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("TypeRef #%d (%08x)", totalCount, typeRefs[i]);
WriteLine("-------------------------------------------------------");
DisplayTypeRefInfo(typeRefs[i]);
DisplayMemberRefs(typeRefs[i], "");
WriteLine("");
}
}
m_pImport->CloseEnum( typeRefEnum);
} // void MDInfo::DisplayTypeRefs()
void MDInfo::DisplayTypeSpecs()
{
HCORENUM typespecEnum = NULL;
mdTypeSpec typespecs[ENUM_BUFFER_SIZE];
ULONG count, totalCount=1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumTypeSpecs( &typespecEnum,
typespecs, ARRAY_SIZE(typespecs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("TypeSpec #%d (%08x)", totalCount, typespecs[i]);
WriteLine("-------------------------------------------------------");
DisplayTypeSpecInfo(typespecs[i], "");
DisplayMemberRefs(typespecs[i], "");
WriteLine("");
}
}
m_pImport->CloseEnum( typespecEnum);
} // void MDInfo::DisplayTypeSpecs()
void MDInfo::DisplayMethodSpecs()
{
HCORENUM MethodSpecEnum = NULL;
mdMethodSpec MethodSpecs[ENUM_BUFFER_SIZE];
ULONG count, totalCount=1;
///// HRESULT hr;
///// HACK until I implement EnumMethodSpecs!
///// while (SUCCEEDED(hr = m_pImport->EnumMethodSpecs( &MethodSpecEnum,
///// MethodSpecs, ARRAY_SIZE(MethodSpecs), &count)) &&
///// count > 0)
for (ULONG rid=1; m_pImport->IsValidToken(TokenFromRid(rid, mdtMethodSpec)); ++rid)
{
// More hackery
count = 1;
MethodSpecs[0] = TokenFromRid(rid, mdtMethodSpec);
// More hackery
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("MethodSpec #%d (%08x)", totalCount, MethodSpecs[i]);
DisplayMethodSpecInfo(MethodSpecs[i], "");
WriteLine("");
}
}
m_pImport->CloseEnum( MethodSpecEnum);
} // void MDInfo::DisplayMethodSpecs()
// Called to display the information about all typedefs in the object.
//
void MDInfo::DisplayTypeDefs()
{
HCORENUM typeDefEnum = NULL;
mdTypeDef typeDefs[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumTypeDefs( &typeDefEnum,
typeDefs, ARRAY_SIZE(typeDefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("TypeDef #%d (%08x)", totalCount, typeDefs[i]);
WriteLine("-------------------------------------------------------");
DisplayTypeDefInfo(typeDefs[i]);
WriteLine("");
}
}
m_pImport->CloseEnum( typeDefEnum);
} // void MDInfo::DisplayTypeDefs()
// Called to display the information about all modulerefs in the object.
//
void MDInfo::DisplayModuleRefs()
{
HCORENUM moduleRefEnum = NULL;
mdModuleRef moduleRefs[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumModuleRefs( &moduleRefEnum,
moduleRefs, ARRAY_SIZE(moduleRefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("ModuleRef #%d (%08x)", totalCount, moduleRefs[i]);
WriteLine("-------------------------------------------------------");
DisplayModuleRefInfo(moduleRefs[i]);
DisplayMemberRefs(moduleRefs[i], "");
WriteLine("");
}
}
m_pImport->CloseEnum( moduleRefEnum);
} // void MDInfo::DisplayModuleRefs()
// Prints out information about the given moduleref
//
void MDInfo::DisplayModuleRefInfo(mdModuleRef inModuleRef)
{
HRESULT hr;
WCHAR moduleRefName[STRING_BUFFER_LEN];
ULONG nameLen;
hr = m_pImport->GetModuleRefProps( inModuleRef, moduleRefName, STRING_BUFFER_LEN,
&nameLen);
if (FAILED(hr)) Error("GetModuleRefProps failed.", hr);
VWriteLine("\t\tModuleRef: (%8.8x) %ls: ", inModuleRef, moduleRefName);
DisplayCustomAttributes(inModuleRef, "\t\t");
} // void MDInfo::DisplayModuleRefInfo()
// Called to display the information about all signatures in the object.
//
void MDInfo::DisplaySignatures()
{
HCORENUM signatureEnum = NULL;
mdSignature signatures[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumSignatures( &signatureEnum,
signatures, ARRAY_SIZE(signatures), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("Signature #%d (%#08x)", totalCount, signatures[i]);
WriteLine("-------------------------------------------------------");
DisplaySignatureInfo(signatures[i]);
WriteLine("");
}
}
m_pImport->CloseEnum( signatureEnum);
} // void MDInfo::DisplaySignatures()
// Prints out information about the given signature
//
void MDInfo::DisplaySignatureInfo(mdSignature inSignature)
{
HRESULT hr;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
hr = m_pImport->GetSigFromToken( inSignature, &pbSigBlob, &ulSigBlob );
if (FAILED(hr)) Error("GetSigFromToken failed.", hr);
if(ulSigBlob)
DisplaySignature(pbSigBlob, ulSigBlob, "");
else
VWriteLine("\t\tERROR: no valid signature ");
} // void MDInfo::DisplaySignatureInfo()
// returns the passed-in buffer which is filled with the name of the given
// member in wide characters
//
LPCWSTR MDInfo::MemberName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
HRESULT hr;
hr = m_pImport->GetMemberProps( inToken, NULL, buffer, bufLen,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
if (FAILED(hr)) Error("GetMemberProps failed.", hr);
return (buffer);
} // LPCWSTR MDInfo::MemberName()
// displays information for the given method
//
void MDInfo::DisplayMethodInfo(mdMethodDef inMethod, DWORD *pflags)
{
HRESULT hr;
mdTypeDef memTypeDef;
WCHAR memberName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
ULONG ulCodeRVA;
ULONG ulImplFlags;
hr = m_pImport->GetMethodProps( inMethod, &memTypeDef, memberName, STRING_BUFFER_LEN,
&nameLen, &flags, &pbSigBlob, &ulSigBlob, &ulCodeRVA, &ulImplFlags);
if (FAILED(hr)) Error("GetMethodProps failed.", hr);
if (pflags)
*pflags = flags;
VWriteLine("\t\tMethodName: %ls (%8.8X)", memberName, inMethod);
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Md, Public);
ISFLAG(Md, Private);
ISFLAG(Md, Family);
ISFLAG(Md, Assem);
ISFLAG(Md, FamANDAssem);
ISFLAG(Md, FamORAssem);
ISFLAG(Md, PrivateScope);
ISFLAG(Md, Static);
ISFLAG(Md, Final);
ISFLAG(Md, Virtual);
ISFLAG(Md, HideBySig);
ISFLAG(Md, ReuseSlot);
ISFLAG(Md, NewSlot);
ISFLAG(Md, Abstract);
ISFLAG(Md, SpecialName);
ISFLAG(Md, RTSpecialName);
ISFLAG(Md, PinvokeImpl);
ISFLAG(Md, UnmanagedExport);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
bool result = (((flags) & mdRTSpecialName) && !wcscmp((memberName), W(".ctor")));
if (result) strcat_s(sFlags, STRING_BUFFER_LEN, "[.ctor] ");
result = (((flags) & mdRTSpecialName) && !wcscmp((memberName), W(".cctor")));
if (result) strcat_s(sFlags,STRING_BUFFER_LEN, "[.cctor] ");
// "Reserved" flags
ISFLAG(Md, HasSecurity);
ISFLAG(Md, RequireSecObject);
VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags);
VWriteLine("\t\tRVA : 0x%08x", ulCodeRVA);
flags = ulImplFlags;
sFlags[0] = 0;
ISFLAG(Mi, Native);
ISFLAG(Mi, IL);
ISFLAG(Mi, OPTIL);
ISFLAG(Mi, Runtime);
ISFLAG(Mi, Unmanaged);
ISFLAG(Mi, Managed);
ISFLAG(Mi, ForwardRef);
ISFLAG(Mi, PreserveSig);
ISFLAG(Mi, InternalCall);
ISFLAG(Mi, Synchronized);
ISFLAG(Mi, NoInlining);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tImplFlags : %s (%08x)", sFlags, flags);
if (ulSigBlob)
DisplaySignature(pbSigBlob, ulSigBlob, "");
else
VWriteLine("\t\tERROR: no valid signature ");
DisplayGenericParams(inMethod, "\t\t");
} // void MDInfo::DisplayMethodInfo()
// displays the member information for the given field
//
void MDInfo::DisplayFieldInfo(mdFieldDef inField, DWORD *pdwFlags)
{
HRESULT hr;
mdTypeDef memTypeDef;
WCHAR memberName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
DWORD dwCPlusTypeFlag;
void const *pValue;
ULONG cbValue;
#ifdef FEATURE_COMINTEROP
VARIANT defaultValue;
::VariantInit(&defaultValue);
#endif
hr = m_pImport->GetFieldProps( inField, &memTypeDef, memberName, STRING_BUFFER_LEN,
&nameLen, &flags, &pbSigBlob, &ulSigBlob, &dwCPlusTypeFlag,
&pValue, &cbValue);
if (FAILED(hr)) Error("GetFieldProps failed.", hr);
if (pdwFlags)
*pdwFlags = flags;
#ifdef FEATURE_COMINTEROP
_FillVariant((BYTE)dwCPlusTypeFlag, pValue, cbValue, &defaultValue);
#endif
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Fd, Public);
ISFLAG(Fd, Private);
ISFLAG(Fd, Family);
ISFLAG(Fd, Assembly);
ISFLAG(Fd, FamANDAssem);
ISFLAG(Fd, FamORAssem);
ISFLAG(Fd, PrivateScope);
ISFLAG(Fd, Static);
ISFLAG(Fd, InitOnly);
ISFLAG(Fd, Literal);
ISFLAG(Fd, NotSerialized);
ISFLAG(Fd, SpecialName);
ISFLAG(Fd, RTSpecialName);
ISFLAG(Fd, PinvokeImpl);
// "Reserved" flags
ISFLAG(Fd, HasDefault);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tField Name: %ls (%8.8X)", memberName, inField);
VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags);
#ifdef FEATURE_COMINTEROP
if (IsFdHasDefault(flags))
VWriteLine("\tDefltValue: (%s) %ls", g_szMapElementType[dwCPlusTypeFlag], VariantAsString(&defaultValue));
#endif
if (!ulSigBlob) // Signature size should be non-zero for fields
VWriteLine("\t\tERROR: no valid signature ");
else
DisplaySignature(pbSigBlob, ulSigBlob, "");
#ifdef FEATURE_COMINTEROP
::VariantClear(&defaultValue);
#endif
} // void MDInfo::DisplayFieldInfo()
// displays the RVA for the given global field.
void MDInfo::DisplayFieldRVA(mdFieldDef inFieldDef)
{
HRESULT hr;
ULONG ulRVA;
hr = m_pImport->GetRVA(inFieldDef, &ulRVA, 0);
if (FAILED(hr) && hr != CLDB_E_RECORD_NOTFOUND) Error("GetRVA failed.", hr);
VWriteLine("\t\tRVA : 0x%08x", ulRVA);
} // void MDInfo::DisplayFieldRVA()
// displays information about every global function.
void MDInfo::DisplayGlobalFunctions()
{
WriteLine("Global functions");
WriteLine("-------------------------------------------------------");
DisplayMethods(mdTokenNil);
WriteLine("");
} // void MDInfo::DisplayGlobalFunctions()
// displays information about every global field.
void MDInfo::DisplayGlobalFields()
{
WriteLine("Global fields");
WriteLine("-------------------------------------------------------");
DisplayFields(mdTokenNil, NULL, 0);
WriteLine("");
} // void MDInfo::DisplayGlobalFields()
// displays information about every global memberref.
void MDInfo::DisplayGlobalMemberRefs()
{
WriteLine("Global MemberRefs");
WriteLine("-------------------------------------------------------");
DisplayMemberRefs(mdTokenNil, "");
WriteLine("");
} // void MDInfo::DisplayGlobalMemberRefs()
// displays information about every method in a given typedef
//
void MDInfo::DisplayMethods(mdTypeDef inTypeDef)
{
HCORENUM methodEnum = NULL;
mdToken methods[ENUM_BUFFER_SIZE];
DWORD flags;
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumMethods( &methodEnum, inTypeDef,
methods, ARRAY_SIZE(methods), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tMethod #%d (%08x) %s", totalCount, methods[i], (methods[i] == g_tkEntryPoint) ? "[ENTRYPOINT]" : "");
WriteLine("\t-------------------------------------------------------");
DisplayMethodInfo(methods[i], &flags);
DisplayParams(methods[i]);
DisplayCustomAttributes(methods[i], "\t\t");
DisplayPermissions(methods[i], "\t");
DisplayMemberRefs(methods[i], "\t");
// P-invoke data if present.
if (IsMdPinvokeImpl(flags))
DisplayPinvokeInfo(methods[i]);
WriteLine("");
}
}
m_pImport->CloseEnum( methodEnum);
} // void MDInfo::DisplayMethods()
// displays information about every field in a given typedef
//
void MDInfo::DisplayFields(mdTypeDef inTypeDef, COR_FIELD_OFFSET *rFieldOffset, ULONG cFieldOffset)
{
HCORENUM fieldEnum = NULL;
mdToken fields[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
DWORD flags;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumFields( &fieldEnum, inTypeDef,
fields, ARRAY_SIZE(fields), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tField #%d (%08x)",totalCount, fields[i]);
WriteLine("\t-------------------------------------------------------");
DisplayFieldInfo(fields[i], &flags);
DisplayCustomAttributes(fields[i], "\t\t");
DisplayPermissions(fields[i], "\t");
DisplayFieldMarshal(fields[i]);
// RVA if its a global field.
if (inTypeDef == mdTokenNil)
DisplayFieldRVA(fields[i]);
// P-invoke data if present.
if (IsFdPinvokeImpl(flags))
DisplayPinvokeInfo(fields[i]);
// Display offset if present.
if (cFieldOffset)
{
bool found = false;
for (ULONG iLayout = 0; iLayout < cFieldOffset; ++iLayout)
{
if (RidFromToken(rFieldOffset[iLayout].ridOfField) == RidFromToken(fields[i]))
{
found = true;
VWriteLine("\t\tOffset : 0x%08x", rFieldOffset[iLayout].ulOffset);
break;
}
}
_ASSERTE(found);
}
WriteLine("");
}
}
m_pImport->CloseEnum( fieldEnum);
} // void MDInfo::DisplayFields()
// displays information about every methodImpl in a given typedef
//
void MDInfo::DisplayMethodImpls(mdTypeDef inTypeDef)
{
HCORENUM methodImplEnum = NULL;
mdMethodDef rtkMethodBody[ENUM_BUFFER_SIZE];
mdMethodDef rtkMethodDecl[ENUM_BUFFER_SIZE];
ULONG count, totalCount=1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumMethodImpls( &methodImplEnum, inTypeDef,
rtkMethodBody, rtkMethodDecl, ARRAY_SIZE(rtkMethodBody), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\n\tMethodImpl #%d (%08x)", totalCount, totalCount);
WriteLine("\t-------------------------------------------------------");
VWriteLine("\t\tMethod Body Token : 0x%08x", rtkMethodBody[i]);
VWriteLine("\t\tMethod Declaration Token : 0x%08x", rtkMethodDecl[i]);
WriteLine("");
}
}
m_pImport->CloseEnum( methodImplEnum);
} // void MDInfo::DisplayMethodImpls()
// displays information about the given parameter
//
void MDInfo::DisplayParamInfo(mdParamDef inParamDef)
{
mdMethodDef md;
ULONG num;
WCHAR paramName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
VARIANT defValue;
DWORD dwCPlusFlags;
void const *pValue;
ULONG cbValue;
#ifdef FEATURE_COMINTEROP
::VariantInit(&defValue);
#endif
HRESULT hr = m_pImport->GetParamProps( inParamDef, &md, &num, paramName, ARRAY_SIZE(paramName),
&nameLen, &flags, &dwCPlusFlags, &pValue, &cbValue);
if (FAILED(hr)) Error("GetParamProps failed.", hr);
_FillVariant((BYTE)dwCPlusFlags, pValue, cbValue, &defValue);
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Pd, In);
ISFLAG(Pd, Out);
ISFLAG(Pd, Optional);
// "Reserved" flags.
ISFLAG(Pd, HasDefault);
ISFLAG(Pd, HasFieldMarshal);
if (!*sFlags)
strcpy_s(sFlags,STRING_BUFFER_LEN, "[none]");
VWrite("\t\t\t(%ld) ParamToken : (%08x) Name : %ls flags: %s (%08x)", num, inParamDef, paramName, sFlags, flags);
#ifdef FEATURE_COMINTEROP
if (IsPdHasDefault(flags))
VWriteLine(" Default: (%s) %ls", g_szMapElementType[dwCPlusFlags], VariantAsString(&defValue));
else
#endif
VWriteLine("");
DisplayCustomAttributes(inParamDef, "\t\t\t");
#ifdef FEATURE_COMINTEROP
::VariantClear(&defValue);
#endif
} // void MDInfo::DisplayParamInfo()
// displays all parameters for a given memberdef
//
void MDInfo::DisplayParams(mdMethodDef inMethodDef)
{
HCORENUM paramEnum = NULL;
mdParamDef params[ENUM_BUFFER_SIZE];
ULONG count, paramCount;
bool first = true;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumParams( ¶mEnum, inMethodDef,
params, ARRAY_SIZE(params), &count)) &&
count > 0)
{
if (first)
{
m_pImport->CountEnum( paramEnum, ¶mCount);
VWriteLine("\t\t%d Parameters", paramCount);
}
for (ULONG i = 0; i < count; i++)
{
DisplayParamInfo(params[i]);
DisplayFieldMarshal(params[i]);
}
first = false;
}
m_pImport->CloseEnum( paramEnum);
} // void MDInfo::DisplayParams()
void MDInfo::DisplayGenericParams(mdToken tk, const char *prefix)
{
HCORENUM paramEnum = NULL;
mdParamDef params[ENUM_BUFFER_SIZE];
ULONG count, paramCount;
bool first = true;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumGenericParams( ¶mEnum, tk,
params, ARRAY_SIZE(params), &count)) &&
count > 0)
{
if (first)
{
m_pImport->CountEnum( paramEnum, ¶mCount);
VWriteLine("%s%d Generic Parameters", prefix, paramCount);
}
for (ULONG i = 0; i < count; i++)
{
DisplayGenericParamInfo(params[i], prefix);
}
first = false;
}
m_pImport->CloseEnum( paramEnum);
}
void MDInfo::DisplayGenericParamInfo(mdGenericParam tkParam, const char *prefix)
{
ULONG ulSeq;
WCHAR paramName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
mdToken tkOwner;
char newprefix[30];
HCORENUM constraintEnum = NULL;
mdParamDef constraints[4];
ULONG count, constraintCount;
mdToken constraint;
mdToken owner;
bool first = true;
HRESULT hr = m_pImport->GetGenericParamProps(tkParam, &ulSeq, &flags, &tkOwner, NULL, paramName, ARRAY_SIZE(paramName), &nameLen);
if (FAILED(hr)) Error("GetGenericParamProps failed.", hr);
VWriteLine("%s\t(%ld) GenericParamToken : (%08x) Name : %ls flags: %08x Owner: %08x", prefix, ulSeq, tkParam, paramName, flags, tkOwner);
// Any constraints for the GenericParam
while (SUCCEEDED(hr = m_pImport->EnumGenericParamConstraints(&constraintEnum, tkParam,
constraints, ARRAY_SIZE(constraints), &count)) &&
count > 0)
{
if (first)
{
m_pImport->CountEnum( constraintEnum, &constraintCount);
VWriteLine("%s\t\t%d Constraint(s)", prefix, constraintCount);
}
VWrite("%s\t\t", prefix);
for (ULONG i=0; i< count; ++i)
{
hr = m_pImport->GetGenericParamConstraintProps(constraints[i], &owner, &constraint);
if (owner != tkParam)
VWrite("%08x (owner: %08x) ", constraint, owner);
else
VWrite("%08x ", constraint);
}
VWriteLine("");
}
m_pImport->CloseEnum(constraintEnum);
sprintf_s(newprefix, 30, "%s\t", prefix);
DisplayCustomAttributes(tkParam, newprefix);
}
LPCWSTR MDInfo::TokenName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
LPCUTF8 pName; // Token name in UTF8.
if (IsNilToken(inToken))
return W("");
m_pImport->GetNameFromToken(inToken, &pName);
WszMultiByteToWideChar(CP_UTF8,0, pName,-1, buffer,bufLen);
return buffer;
} // LPCWSTR MDInfo::TokenName()
// prints out name of typeref or typedef
//
LPCWSTR MDInfo::TypeDeforRefName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
if (RidFromToken(inToken))
{
if (TypeFromToken(inToken) == mdtTypeDef)
return (TypeDefName((mdTypeDef) inToken, buffer, bufLen));
else if (TypeFromToken(inToken) == mdtTypeRef)
return (TypeRefName((mdTypeRef) inToken, buffer, bufLen));
else if (TypeFromToken(inToken) == mdtTypeSpec)
return W("[TypeSpec]");
else
return W("[InvalidReference]");
}
else
return W("");
} // LPCWSTR MDInfo::TypeDeforRefName()
LPCWSTR MDInfo::MemberDeforRefName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
if (RidFromToken(inToken))
{
if (TypeFromToken(inToken) == mdtMethodDef || TypeFromToken(inToken) == mdtFieldDef)
return (MemberName(inToken, buffer, bufLen));
else if (TypeFromToken(inToken) == mdtMemberRef)
return (MemberRefName((mdMemberRef) inToken, buffer, bufLen));
else
return W("[InvalidReference]");
}
else
return W("");
} // LPCWSTR MDInfo::MemberDeforRefName()
// prints out only the name of the given typedef
//
//
LPCWSTR MDInfo::TypeDefName(mdTypeDef inTypeDef, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
HRESULT hr;
hr = m_pImport->GetTypeDefProps(
// [IN] The import scope.
inTypeDef, // [IN] TypeDef token for inquiry.
buffer, // [OUT] Put name here.
bufLen, // [IN] size of name buffer in wide chars.
NULL, // [OUT] put size of name (wide chars) here.
NULL, // [OUT] Put flags here.
NULL); // [OUT] Put base class TypeDef/TypeRef here.
if (FAILED(hr))
{
swprintf_s(buffer, bufLen, W("[Invalid TypeDef]"));
}
return buffer;
} // LPCWSTR MDInfo::TypeDefName()
// prints out all the properties of a given typedef
//
void MDInfo::DisplayTypeDefProps(mdTypeDef inTypeDef)
{
HRESULT hr;
WCHAR typeDefName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
mdToken extends;
ULONG dwPacking; // Packing size of class, if specified.
ULONG dwSize; // Total size of class, if specified.
hr = m_pImport->GetTypeDefProps(
inTypeDef, // [IN] TypeDef token for inquiry.
typeDefName, // [OUT] Put name here.
STRING_BUFFER_LEN, // [IN] size of name buffer in wide chars.
&nameLen, // [OUT] put size of name (wide chars) here.
&flags, // [OUT] Put flags here.
&extends); // [OUT] Put base class TypeDef/TypeRef here.
if (FAILED(hr)) Error("GetTypeDefProps failed.", hr);
char sFlags[STRING_BUFFER_LEN];
WCHAR szTempBuf[STRING_BUFFER_LEN];
VWriteLine("\tTypDefName: %ls (%8.8X)",typeDefName,inTypeDef);
VWriteLine("\tFlags : %s (%08x)",ClassFlags(flags, sFlags), flags);
VWriteLine("\tExtends : %8.8X [%s] %ls",extends,TokenTypeName(extends),
TypeDeforRefName(extends, szTempBuf, ARRAY_SIZE(szTempBuf)));
hr = m_pImport->GetClassLayout(inTypeDef, &dwPacking, 0,0,0, &dwSize);
if (hr == S_OK)
VWriteLine("\tLayout : Packing:%d, Size:%d", dwPacking, dwSize);
if (IsTdNested(flags))
{
mdTypeDef tkEnclosingClass;
hr = m_pImport->GetNestedClassProps(inTypeDef, &tkEnclosingClass);
if (hr == S_OK)
{
VWriteLine("\tEnclosingClass : %ls (%8.8X)", TypeDeforRefName(tkEnclosingClass,
szTempBuf, ARRAY_SIZE(szTempBuf)), tkEnclosingClass);
}
else if (hr == CLDB_E_RECORD_NOTFOUND)
WriteLine("ERROR: EnclosingClass not found for NestedClass");
else
Error("GetNestedClassProps failed.", hr);
}
} // void MDInfo::DisplayTypeDefProps()
// Prints out the name of the given TypeRef
//
LPCWSTR MDInfo::TypeRefName(mdTypeRef tr, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
HRESULT hr;
hr = m_pImport->GetTypeRefProps(
tr, // The class ref token.
NULL, // Resolution scope.
buffer, // Put the name here.
bufLen, // Size of the name buffer, wide chars.
NULL); // Put actual size of name here.
if (FAILED(hr))
{
swprintf_s(buffer, bufLen, W("[Invalid TypeRef]"));
}
return (buffer);
} // LPCWSTR MDInfo::TypeRefName()
// Prints out all the info of the given TypeRef
//
void MDInfo::DisplayTypeRefInfo(mdTypeRef tr)
{
HRESULT hr;
mdToken tkResolutionScope;
WCHAR typeRefName[STRING_BUFFER_LEN];
ULONG nameLen;
hr = m_pImport->GetTypeRefProps(
tr, // The class ref token.
&tkResolutionScope, // ResolutionScope.
typeRefName, // Put the name here.
STRING_BUFFER_LEN, // Size of the name buffer, wide chars.
&nameLen); // Put actual size of name here.
if (FAILED(hr)) Error("GetTypeRefProps failed.", hr);
VWriteLine("Token: 0x%08x", tr);
VWriteLine("ResolutionScope: 0x%08x", tkResolutionScope);
VWriteLine("TypeRefName: %ls",typeRefName);
DisplayCustomAttributes(tr, "\t");
} // void MDInfo::DisplayTypeRefInfo()
void MDInfo::DisplayTypeSpecInfo(mdTypeSpec ts, const char *preFix)
{
HRESULT hr;
PCCOR_SIGNATURE pvSig;
ULONG cbSig;
ULONG cb;
InitSigBuffer();
hr = m_pImport->GetTypeSpecFromToken(
ts, // The class ref token.
&pvSig,
&cbSig);
if (FAILED(hr)) Error("GetTypeSpecFromToken failed.", hr);
// DisplaySignature(pvSig, cbSig, preFix);
if (FAILED(hr = GetOneElementType(pvSig, cbSig, &cb)))
goto ErrExit;
VWriteLine("%s\tTypeSpec :%s", preFix, (LPSTR)m_sigBuf.Ptr());
// Hex, too?
if (m_DumpFilter & dumpMoreHex)
{
char rcNewPrefix[80];
sprintf_s(rcNewPrefix, 80, "%s\tSignature", preFix);
DumpHex(rcNewPrefix, pvSig, cbSig, false, 24);
}
ErrExit:
return;
} // void MDInfo::DisplayTypeSpecInfo()
void MDInfo::DisplayMethodSpecInfo(mdMethodSpec ms, const char *preFix)
{
HRESULT hr;
PCCOR_SIGNATURE pvSig;
ULONG cbSig;
mdToken tk;
InitSigBuffer();
hr = m_pImport->GetMethodSpecProps(
ms, // The MethodSpec token
&tk, // The MethodDef or MemberRef
&pvSig, // Signature.
&cbSig); // Size of signature.
VWriteLine("%s\tParent : 0x%08x", preFix, tk);
DisplaySignature(pvSig, cbSig, preFix);
//ErrExit:
return;
} // void MDInfo::DisplayMethodSpecInfo()
// Return the passed-in buffer filled with a string detailing the class flags
// associated with the class.
//
char *MDInfo::ClassFlags(DWORD flags, _Out_writes_(STRING_BUFFER_LEN) char *sFlags)
{
sFlags[0] = 0;
ISFLAG(Td, NotPublic);
ISFLAG(Td, Public);
ISFLAG(Td, NestedPublic);
ISFLAG(Td, NestedPrivate);
ISFLAG(Td, NestedFamily);
ISFLAG(Td, NestedAssembly);
ISFLAG(Td, NestedFamANDAssem);
ISFLAG(Td, NestedFamORAssem);
ISFLAG(Td, AutoLayout);
ISFLAG(Td, SequentialLayout);
ISFLAG(Td, ExplicitLayout);
ISFLAG(Td, Class);
ISFLAG(Td, Interface);
ISFLAG(Td, Abstract);
ISFLAG(Td, Sealed);
ISFLAG(Td, SpecialName);
ISFLAG(Td, Import);
ISFLAG(Td, Serializable);
ISFLAG(Td, AnsiClass);
ISFLAG(Td, UnicodeClass);
ISFLAG(Td, AutoClass);
ISFLAG(Td, BeforeFieldInit);
ISFLAG(Td, Forwarder);
// "Reserved" flags
ISFLAG(Td, RTSpecialName);
ISFLAG(Td, HasSecurity);
ISFLAG(Td, WindowsRuntime);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
return sFlags;
} // char *MDInfo::ClassFlags()
// prints out all info on the given typeDef, including all information that
// is specific to a given typedef
//
void MDInfo::DisplayTypeDefInfo(mdTypeDef inTypeDef)
{
DisplayTypeDefProps(inTypeDef);
// Get field layout information.
HRESULT hr = NOERROR;
COR_FIELD_OFFSET *rFieldOffset = NULL;
ULONG cFieldOffset = 0;
hr = m_pImport->GetClassLayout(inTypeDef, NULL, rFieldOffset, 0, &cFieldOffset, NULL);
if (SUCCEEDED(hr) && cFieldOffset)
{
rFieldOffset = new COR_FIELD_OFFSET[cFieldOffset];
if (rFieldOffset == NULL)
Error("_calloc failed.", E_OUTOFMEMORY);
hr = m_pImport->GetClassLayout(inTypeDef, NULL, rFieldOffset, cFieldOffset, &cFieldOffset, NULL);
if (FAILED(hr)) { delete [] rFieldOffset; Error("GetClassLayout() failed.", hr); }
}
//No reason to display members if we're displaying fields and methods separately
DisplayGenericParams(inTypeDef, "\t");
DisplayFields(inTypeDef, rFieldOffset, cFieldOffset);
delete [] rFieldOffset;
DisplayMethods(inTypeDef);
DisplayProperties(inTypeDef);
DisplayEvents(inTypeDef);
DisplayMethodImpls(inTypeDef);
DisplayPermissions(inTypeDef, "");
DisplayInterfaceImpls(inTypeDef);
DisplayCustomAttributes(inTypeDef, "\t");
} // void MDInfo::DisplayTypeDefInfo()
// print out information about every the given typeDef's interfaceImpls
//
void MDInfo::DisplayInterfaceImpls(mdTypeDef inTypeDef)
{
HCORENUM interfaceImplEnum = NULL;
mdTypeRef interfaceImpls[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while(SUCCEEDED(hr = m_pImport->EnumInterfaceImpls( &interfaceImplEnum,
inTypeDef,interfaceImpls,ARRAY_SIZE(interfaceImpls), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tInterfaceImpl #%d (%08x)", totalCount, interfaceImpls[i]);
WriteLine("\t-------------------------------------------------------");
DisplayInterfaceImplInfo(interfaceImpls[i]);
DisplayPermissions(interfaceImpls[i], "\t");
WriteLine("");
}
}
m_pImport->CloseEnum( interfaceImplEnum);
} // void MDInfo::DisplayInterfaceImpls()
// print the information for the given interface implementation
//
void MDInfo::DisplayInterfaceImplInfo(mdInterfaceImpl inImpl)
{
mdTypeDef typeDef;
mdToken token;
HRESULT hr;
WCHAR szTempBuf[STRING_BUFFER_LEN];
hr = m_pImport->GetInterfaceImplProps( inImpl, &typeDef, &token);
if (FAILED(hr)) Error("GetInterfaceImplProps failed.", hr);
VWriteLine("\t\tClass : %ls",TypeDeforRefName(typeDef, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\tToken : %8.8X [%s] %ls",token,TokenTypeName(token), TypeDeforRefName(token, szTempBuf, ARRAY_SIZE(szTempBuf)));
DisplayCustomAttributes(inImpl, "\t\t");
} // void MDInfo::DisplayInterfaceImplInfo()
// displays the information for a particular property
//
void MDInfo::DisplayPropertyInfo(mdProperty inProp)
{
HRESULT hr;
mdTypeDef typeDef;
WCHAR propName[STRING_BUFFER_LEN];
DWORD flags;
#ifdef FEATURE_COMINTEROP
VARIANT defaultValue;
#endif
void const *pValue;
ULONG cbValue;
DWORD dwCPlusTypeFlag;
mdMethodDef setter, getter, otherMethod[ENUM_BUFFER_SIZE];
ULONG others;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
#ifdef FEATURE_COMINTEROP
::VariantInit(&defaultValue);
#endif
hr = m_pImport->GetPropertyProps(
inProp, // [IN] property token
&typeDef, // [OUT] typedef containing the property declarion.
propName, // [OUT] Property name
STRING_BUFFER_LEN, // [IN] the count of wchar of szProperty
NULL, // [OUT] actual count of wchar for property name
&flags, // [OUT] property flags.
&pbSigBlob, // [OUT] Signature Blob.
&ulSigBlob, // [OUT] Number of bytes in the signature blob.
&dwCPlusTypeFlag, // [OUT] default value
&pValue,
&cbValue,
&setter, // [OUT] setter method of the property
&getter, // [OUT] getter method of the property
otherMethod, // [OUT] other methods of the property
ENUM_BUFFER_SIZE, // [IN] size of rmdOtherMethod
&others); // [OUT] total number of other method of this property
if (FAILED(hr)) Error("GetPropertyProps failed.", hr);
VWriteLine("\t\tProp.Name : %ls (%8.8X)",propName,inProp);
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Pr, SpecialName);
ISFLAG(Pr, RTSpecialName);
ISFLAG(Pr, HasDefault);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags);
if (ulSigBlob)
DisplaySignature(pbSigBlob, ulSigBlob, "");
else
VWriteLine("\t\tERROR: no valid signature ");
WCHAR szTempBuf[STRING_BUFFER_LEN];
#ifdef FEATURE_COMINTEROP
_FillVariant((BYTE)dwCPlusTypeFlag, pValue, cbValue, &defaultValue);
VWriteLine("\t\tDefltValue: %ls",VariantAsString(&defaultValue));
#endif
VWriteLine("\t\tSetter : (%08x) %ls",setter,MemberDeforRefName(setter, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\tGetter : (%08x) %ls",getter,MemberDeforRefName(getter, szTempBuf, ARRAY_SIZE(szTempBuf)));
// do something with others?
VWriteLine("\t\t%ld Others",others);
DisplayCustomAttributes(inProp, "\t\t");
#ifdef FEATURE_COMINTEROP
::VariantClear(&defaultValue);
#endif
} // void MDInfo::DisplayPropertyInfo()
// displays info for each property
//
void MDInfo::DisplayProperties(mdTypeDef inTypeDef)
{
HCORENUM propEnum = NULL;
mdProperty props[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while(SUCCEEDED(hr = m_pImport->EnumProperties( &propEnum,
inTypeDef,props,ARRAY_SIZE(props), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tProperty #%d (%08x)", totalCount, props[i]);
WriteLine("\t-------------------------------------------------------");
DisplayPropertyInfo(props[i]);
DisplayPermissions(props[i], "\t");
WriteLine("");
}
}
m_pImport->CloseEnum( propEnum);
} // void MDInfo::DisplayProperties()
// Display all information about a particular event
//
void MDInfo::DisplayEventInfo(mdEvent inEvent)
{
HRESULT hr;
mdTypeDef typeDef;
WCHAR eventName[STRING_BUFFER_LEN];
DWORD flags;
mdToken eventType;
mdMethodDef addOn, removeOn, fire, otherMethod[ENUM_BUFFER_SIZE];
ULONG totalOther;
hr = m_pImport->GetEventProps(
// [IN] The scope.
inEvent, // [IN] event token
&typeDef, // [OUT] typedef containing the event declarion.
eventName, // [OUT] Event name
STRING_BUFFER_LEN, // [IN] the count of wchar of szEvent
NULL, // [OUT] actual count of wchar for event's name
&flags, // [OUT] Event flags.
&eventType, // [OUT] EventType class
&addOn, // [OUT] AddOn method of the event
&removeOn, // [OUT] RemoveOn method of the event
&fire, // [OUT] Fire method of the event
otherMethod, // [OUT] other method of the event
ARRAY_SIZE(otherMethod), // [IN] size of rmdOtherMethod
&totalOther); // [OUT] total number of other method of this event
if (FAILED(hr)) Error("GetEventProps failed.", hr);
VWriteLine("\t\tName : %ls (%8.8X)",eventName,inEvent);
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Ev, SpecialName);
ISFLAG(Ev, RTSpecialName);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags);
WCHAR szTempBuf[STRING_BUFFER_LEN];
VWriteLine("\t\tEventType : %8.8X [%s]",eventType,TokenTypeName(eventType));
VWriteLine("\t\tAddOnMethd: (%08x) %ls",addOn,MemberDeforRefName(addOn, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\tRmvOnMethd: (%08x) %ls",removeOn,MemberDeforRefName(removeOn, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\tFireMethod: (%08x) %ls",fire,MemberDeforRefName(fire, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\t%ld OtherMethods",totalOther);
DisplayCustomAttributes(inEvent, "\t\t");
} // void MDInfo::DisplayEventInfo()
// Display information about all events in a typedef
//
void MDInfo::DisplayEvents(mdTypeDef inTypeDef)
{
HCORENUM eventEnum = NULL;
mdProperty events[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while(SUCCEEDED(hr = m_pImport->EnumEvents( &eventEnum,
inTypeDef,events,ARRAY_SIZE(events), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tEvent #%d (%08x)", totalCount, events[i]);
WriteLine("\t-------------------------------------------------------");
DisplayEventInfo(events[i]);
DisplayPermissions(events[i], "\t");
WriteLine("");
}
}
m_pImport->CloseEnum( eventEnum);
} // void MDInfo::DisplayEvents()
// print info for the passed-in custom attribute
// This function is used to print the custom attribute information for both TypeDefs and
// MethodDefs which need slightly different formatting. preFix helps fix it up.
//
void MDInfo::DisplayCustomAttributeInfo(mdCustomAttribute inValue, const char *preFix)
{
const BYTE *pValue; // The custom value.
ULONG cbValue; // Length of the custom value.
HRESULT hr; // A result.
mdToken tkObj; // Attributed object.
mdToken tkType; // Type of the custom attribute.
mdToken tk; // For name lookup.
LPCUTF8 pMethName=0; // Name of custom attribute ctor, if any.
CQuickBytes qSigName; // Buffer to pretty-print signature.
PCCOR_SIGNATURE pSig=0; // Signature of ctor.
ULONG cbSig; // Size of the signature.
BOOL bCoffSymbol = false; // true for coff symbol CA's.
WCHAR rcName[MAX_CLASS_NAME]; // Name of the type.
hr = m_pImport->GetCustomAttributeProps( // S_OK or error.
inValue, // The attribute.
&tkObj, // The attributed object
&tkType, // The attributes type.
(const void**)&pValue, // Put pointer to data here.
&cbValue); // Put size here.
if (FAILED(hr)) Error("GetCustomAttributeProps failed.", hr);
VWriteLine("%s\tCustomAttribute Type: %08x", preFix, tkType);
// Get the name of the memberref or methoddef.
tk = tkType;
rcName[0] = L'\0';
// Get the member name, and the parent token.
switch (TypeFromToken(tk))
{
case mdtMemberRef:
hr = m_pImport->GetNameFromToken(tk, &pMethName);
if (FAILED(hr)) Error("GetNameFromToken failed.", hr);
hr = m_pImport->GetMemberRefProps( tk, &tk, 0, 0, 0, &pSig, &cbSig);
if (FAILED(hr)) Error("GetMemberRefProps failed.", hr);
break;
case mdtMethodDef:
hr = m_pImport->GetNameFromToken(tk, &pMethName);
if (FAILED(hr)) Error("GetNameFromToken failed.", hr);
hr = m_pImport->GetMethodProps(tk, &tk, 0, 0, 0, 0, &pSig, &cbSig, 0, 0);
if (FAILED(hr)) Error("GetMethodProps failed.", hr);
break;
} // switch
// Get the type name.
switch (TypeFromToken(tk))
{
case mdtTypeDef:
hr = m_pImport->GetTypeDefProps(tk, rcName,MAX_CLASS_NAME,0, 0,0);
if (FAILED(hr)) Error("GetTypeDefProps failed.", hr);
break;
case mdtTypeRef:
hr = m_pImport->GetTypeRefProps(tk, 0, rcName,MAX_CLASS_NAME,0);
if (FAILED(hr)) Error("GetTypeRefProps failed.", hr);
break;
} // switch
if (pSig && pMethName)
{
int iLen;
LPWSTR pwzName = (LPWSTR)(new WCHAR[iLen= 1+(ULONG32)strlen(pMethName)]);
if(pwzName)
{
WszMultiByteToWideChar(CP_UTF8,0, pMethName,-1, pwzName,iLen);
PrettyPrintSigLegacy(pSig, cbSig, pwzName, &qSigName, m_pImport);
delete [] pwzName;
}
}
VWrite("%s\tCustomAttributeName: %ls", preFix, rcName);
if (pSig && pMethName)
VWrite(" :: %S", qSigName.Ptr());
// Keep track of coff overhead.
if (!wcscmp(W("__DecoratedName"), rcName))
{
bCoffSymbol = true;
g_cbCoffNames += cbValue + 6;
}
WriteLine("");
VWriteLine("%s\tLength: %ld", preFix, cbValue);
char newPreFix[40];
sprintf_s(newPreFix, 40, "%s\tValue ", preFix);
DumpHex(newPreFix, pValue, cbValue);
if (bCoffSymbol)
VWriteLine("%s\t %s", preFix, pValue);
// Try to decode the constructor blob. This is incomplete, but covers the most popular cases.
if (pSig)
{ // Interpret the signature.
PCCOR_SIGNATURE ps = pSig;
ULONG cb;
ULONG ulData;
ULONG cParams;
ULONG ulVal;
UINT8 u1 = 0;
UINT16 u2 = 0;
UINT32 u4 = 0;
UINT64 u8 = 0;
unsigned __int64 uI64;
double dblVal;
ULONG cbVal;
LPCUTF8 pStr;
CustomAttributeParser CA(pValue, cbValue);
CA.ValidateProlog();
// Get the calling convention.
cb = CorSigUncompressData(ps, &ulData);
ps += cb;
// Get the count of params.
cb = CorSigUncompressData(ps, &cParams);
ps += cb;
// Get the return value.
cb = CorSigUncompressData(ps, &ulData);
ps += cb;
if (ulData == ELEMENT_TYPE_VOID)
{
VWrite("%s\tctor args: (", preFix);
// For each param...
for (ULONG i=0; i<cParams; ++i)
{ // Get the next param type.
cb = CorSigUncompressData(ps, &ulData);
ps += cb;
if (i) Write(", ");
DoObject:
switch (ulData)
{
// For ET_OBJECT, the next byte in the blob is the ET of the actual data.
case ELEMENT_TYPE_OBJECT:
CA.GetU1(&u1);
ulData = u1;
goto DoObject;
case ELEMENT_TYPE_I1:
case ELEMENT_TYPE_U1:
CA.GetU1(&u1);
ulVal = u1;
goto PrintVal;
case ELEMENT_TYPE_I2:
case ELEMENT_TYPE_U2:
CA.GetU2(&u2);
ulVal = u2;
goto PrintVal;
case ELEMENT_TYPE_I4:
case ELEMENT_TYPE_U4:
CA.GetU4(&u4);
ulVal = u4;
PrintVal:
VWrite("%d", ulVal);
break;
case ELEMENT_TYPE_STRING:
CA.GetString(&pStr, &cbVal);
VWrite("\"%s\"", pStr);
break;
// The only class type that we accept is Type, which is stored as a string.
case ELEMENT_TYPE_CLASS:
// Eat the class type.
cb = CorSigUncompressData(ps, &ulData);
ps += cb;
// Get the name of the type.
CA.GetString(&pStr, &cbVal);
VWrite("typeof(%s)", pStr);
break;
case SERIALIZATION_TYPE_TYPE:
CA.GetString(&pStr, &cbVal);
VWrite("typeof(%s)", pStr);
break;
case ELEMENT_TYPE_I8:
case ELEMENT_TYPE_U8:
CA.GetU8(&u8);
uI64 = u8;
VWrite("%#lx", uI64);
break;
case ELEMENT_TYPE_R4:
dblVal = CA.GetR4();
VWrite("%f", dblVal);
break;
case ELEMENT_TYPE_R8:
dblVal = CA.GetR8();
VWrite("%f", dblVal);
break;
default:
// bail...
i = cParams;
Write(" <can not decode> ");
break;
}
}
WriteLine(")");
}
}
WriteLine("");
} // void MDInfo::DisplayCustomAttributeInfo()
// Print all custom values for the given token
// This function is used to print the custom value information for all tokens.
// which need slightly different formatting. preFix helps fix it up.
//
void MDInfo::DisplayCustomAttributes(mdToken inToken, const char *preFix)
{
HCORENUM customAttributeEnum = NULL;
mdTypeRef customAttributes[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while(SUCCEEDED(hr = m_pImport->EnumCustomAttributes( &customAttributeEnum, inToken, 0,
customAttributes, ARRAY_SIZE(customAttributes), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("%sCustomAttribute #%d (%08x)", preFix, totalCount, customAttributes[i]);
VWriteLine("%s-------------------------------------------------------", preFix);
DisplayCustomAttributeInfo(customAttributes[i], preFix);
}
}
m_pImport->CloseEnum( customAttributeEnum);
} // void MDInfo::DisplayCustomAttributes()
// Show the passed-in token's permissions
//
//
void MDInfo::DisplayPermissions(mdToken tk, const char *preFix)
{
HCORENUM permissionEnum = NULL;
mdPermission permissions[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumPermissionSets( &permissionEnum,
tk, 0, permissions, ARRAY_SIZE(permissions), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("%s\tPermission #%d (%08x)", preFix, totalCount, permissions[i]);
VWriteLine("%s\t-------------------------------------------------------", preFix);
DisplayPermissionInfo(permissions[i], preFix);
WriteLine("");
}
}
m_pImport->CloseEnum( permissionEnum);
} // void MDInfo::DisplayPermissions()
// print properties of given rolecheck
//
//
void MDInfo::DisplayPermissionInfo(mdPermission inPermission, const char *preFix)
{
DWORD dwAction;
const BYTE *pvPermission;
ULONG cbPermission;
const char *flagDesc = NULL;
char newPreFix[STRING_BUFFER_LEN];
HRESULT hr;
hr = m_pImport->GetPermissionSetProps( inPermission, &dwAction,
(const void**)&pvPermission, &cbPermission);
if (FAILED(hr)) Error("GetPermissionSetProps failed.", hr);
switch(dwAction)
{
case dclActionNil: flagDesc = "ActionNil"; break;
case dclRequest: flagDesc = "Request"; break;
case dclDemand: flagDesc = "Demand"; break;
case dclAssert: flagDesc = "Assert"; break;
case dclDeny: flagDesc = "Deny"; break;
case dclPermitOnly: flagDesc = "PermitOnly"; break;
case dclLinktimeCheck: flagDesc = "LinktimeCheck"; break;
case dclInheritanceCheck: flagDesc = "InheritanceCheck"; break;
case dclRequestMinimum: flagDesc = "RequestMinimum"; break;
case dclRequestOptional: flagDesc = "RequestOptional"; break;
case dclRequestRefuse: flagDesc = "RequestRefuse"; break;
case dclPrejitGrant: flagDesc = "PrejitGrant"; break;
case dclPrejitDenied: flagDesc = "PrejitDenied"; break;
case dclNonCasDemand: flagDesc = "NonCasDemand"; break;
case dclNonCasLinkDemand: flagDesc = "NonCasLinkDemand"; break;
case dclNonCasInheritance: flagDesc = "NonCasInheritance"; break;
}
VWriteLine("%s\t\tAction : %s", preFix, flagDesc);
VWriteLine("%s\t\tBlobLen : %d", preFix, cbPermission);
if (cbPermission)
{
sprintf_s(newPreFix, STRING_BUFFER_LEN, "%s\tBlob", preFix);
DumpHex(newPreFix, pvPermission, cbPermission, false, 24);
}
sprintf_s (newPreFix, STRING_BUFFER_LEN, "\t\t%s", preFix);
DisplayCustomAttributes(inPermission, newPreFix);
} // void MDInfo::DisplayPermissionInfo()
// simply prints out the given GUID in standard form
LPWSTR MDInfo::GUIDAsString(GUID inGuid, _Out_writes_(bufLen) LPWSTR guidString, ULONG bufLen)
{
StringFromGUID2(inGuid, guidString, bufLen);
return guidString;
} // LPWSTR MDInfo::GUIDAsString()
#ifdef FEATURE_COMINTEROP
LPCWSTR MDInfo::VariantAsString(VARIANT *pVariant)
{
HRESULT hr = S_OK;
if (V_VT(pVariant) == VT_UNKNOWN)
{
_ASSERTE(V_UNKNOWN(pVariant) == NULL);
return W("<NULL>");
}
else if (SUCCEEDED(hr = ::VariantChangeType(pVariant, pVariant, 0, VT_BSTR)))
return V_BSTR(pVariant);
else if (hr == DISP_E_BADVARTYPE && V_VT(pVariant) == VT_I8)
{
// allocate the bstr.
char szStr[32];
WCHAR wszStr[32];
// Set variant type to bstr.
V_VT(pVariant) = VT_BSTR;
// Create the ansi string.
sprintf_s(szStr, 32, "%I64d", V_CY(pVariant).int64);
// Convert to unicode.
WszMultiByteToWideChar(CP_ACP, 0, szStr, -1, wszStr, 32);
// convert to bstr and set variant value.
V_BSTR(pVariant) = ::SysAllocString(wszStr);
if (V_BSTR(pVariant) == NULL)
Error("SysAllocString() failed.", E_OUTOFMEMORY);
return V_BSTR(pVariant);
}
else
return W("ERROR");
} // LPWSTR MDInfo::VariantAsString()
#endif
bool TrySigUncompress(PCCOR_SIGNATURE pData, // [IN] compressed data
ULONG *pDataOut, // [OUT] the expanded *pData
ULONG *cbCur)
{
ULONG ulSize = CorSigUncompressData(pData, pDataOut);
if (ulSize == (ULONG)-1)
{
*cbCur = ulSize;
return false;
} else
{
*cbCur += ulSize;
return true;
}
}
void MDInfo::DisplayFieldMarshal(mdToken inToken)
{
PCCOR_SIGNATURE pvNativeType; // [OUT] native type of this field
ULONG cbNativeType; // [OUT] the count of bytes of *ppvNativeType
HRESULT hr;
hr = m_pImport->GetFieldMarshal( inToken, &pvNativeType, &cbNativeType);
if (FAILED(hr) && hr != CLDB_E_RECORD_NOTFOUND) Error("GetFieldMarshal failed.", hr);
if (hr != CLDB_E_RECORD_NOTFOUND)
{
ULONG cbCur = 0;
ULONG ulData;
ULONG ulStrLoc;
char szNTDesc[STRING_BUFFER_LEN];
while (cbCur < cbNativeType)
{
ulStrLoc = 0;
ulData = NATIVE_TYPE_MAX;
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
if (ulData >= sizeof(g_szNativeType)/sizeof(*g_szNativeType))
{
cbCur = (ULONG)-1;
continue;
}
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "%s ", g_szNativeType[ulData]);
switch (ulData)
{
case NATIVE_TYPE_FIXEDSYSSTRING:
{
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{StringElementCount: %d} ",ulData);
}
}
break;
case NATIVE_TYPE_FIXEDARRAY:
{
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{ArrayElementCount: %d",ulData);
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", ArrayElementType(NT): %d",ulData);
}
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc,"}");
}
}
break;
case NATIVE_TYPE_ARRAY:
{
if (cbCur < cbNativeType)
{
BOOL bElemTypeSpecified;
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
if (ulData != NATIVE_TYPE_MAX)
{
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{ArrayElementType(NT): %d", ulData);
bElemTypeSpecified = TRUE;
}
else
{
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{");
bElemTypeSpecified = FALSE;
}
if (cbCur < cbNativeType)
{
if (bElemTypeSpecified)
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", ");
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "SizeParamIndex: %d",ulData);
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", SizeParamMultiplier: %d",ulData);
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", SizeConst: %d",ulData);
}
}
}
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "}");
}
}
break;
case NATIVE_TYPE_SAFEARRAY:
{
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{SafeArraySubType(VT): %d, ",ulData);
// Extract the element type name if it is specified.
if (cbCur < cbNativeType)
{
LPUTF8 strTemp = NULL;
int strLen = 0;
int ByteCountLength = 0;
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "ElementTypeName: %s}", strTemp);
cbCur += strLen;
_ASSERTE(cbCur == cbNativeType);
delete [] strTemp;
}
}
else
{
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "ElementTypeName: }");
}
}
}
break;
case NATIVE_TYPE_CUSTOMMARSHALER:
{
LPUTF8 strTemp = NULL;
int strLen = 0;
int ByteCountLength = 0;
// Extract the typelib GUID.
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{Typelib: %s, ", strTemp);
cbCur += strLen;
_ASSERTE(cbCur < cbNativeType);
delete [] strTemp;
}
// Extract the name of the native type.
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Native: %s, ", strTemp);
cbCur += strLen;
_ASSERTE(cbCur < cbNativeType);
delete [] strTemp;
}
// Extract the name of the custom marshaler.
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Marshaler: %s, ", strTemp);
cbCur += strLen;
_ASSERTE(cbCur < cbNativeType);
delete [] strTemp;
}
// Extract the cookie string.
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
if (strLen > 0)
{
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Cookie: ");
// Copy the cookie string and transform the embedded nulls into \0's.
for (int i = 0; i < strLen - 1; i++, cbCur++)
{
if (strTemp[i] == 0)
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "\\0");
else
szNTDesc[ulStrLoc++] = strTemp[i];
}
szNTDesc[ulStrLoc++] = strTemp[strLen - 1];
cbCur++;
delete [] strTemp;
}
}
else
{
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Cookie: ");
}
// Finish the custom marshaler native type description.
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "}");
_ASSERTE(cbCur <= cbNativeType);
}
break;
default:
{
// normal nativetype element: do nothing
}
}
VWriteLine("\t\t\t\t%s",szNTDesc);
if (ulData >= NATIVE_TYPE_MAX)
break;
}
if (cbCur == (ULONG)-1)
{
// There was something that we didn't grok in the signature.
// Just dump out the blob as hex
VWrite("\t\t\t\t{", szNTDesc);
while (cbNativeType--)
VWrite(" %2.2X", *pvNativeType++);
VWriteLine(" }");
}
}
} // void MDInfo::DisplayFieldMarshal()
void MDInfo::DisplayPinvokeInfo(mdToken inToken)
{
HRESULT hr = NOERROR;
DWORD flags;
WCHAR rcImport[512];
mdModuleRef tkModuleRef;
char sFlags[STRING_BUFFER_LEN];
hr = m_pImport->GetPinvokeMap(inToken, &flags, rcImport,
ARRAY_SIZE(rcImport), 0, &tkModuleRef);
if (FAILED(hr))
{
if (hr != CLDB_E_RECORD_NOTFOUND)
VWriteLine("ERROR: GetPinvokeMap failed.", hr);
return;
}
WriteLine("\t\tPinvoke Map Data:");
VWriteLine("\t\tEntry point: %S", rcImport);
VWriteLine("\t\tModule ref: %08x", tkModuleRef);
sFlags[0] = 0;
ISFLAG(Pm, NoMangle);
ISFLAG(Pm, CharSetNotSpec);
ISFLAG(Pm, CharSetAnsi);
ISFLAG(Pm, CharSetUnicode);
ISFLAG(Pm, CharSetAuto);
ISFLAG(Pm, SupportsLastError);
ISFLAG(Pm, CallConvWinapi);
ISFLAG(Pm, CallConvCdecl);
ISFLAG(Pm, CallConvStdcall);
ISFLAG(Pm, CallConvThiscall);
ISFLAG(Pm, CallConvFastcall);
ISFLAG(Pm, BestFitEnabled);
ISFLAG(Pm, BestFitDisabled);
ISFLAG(Pm, BestFitUseAssem);
ISFLAG(Pm, ThrowOnUnmappableCharEnabled);
ISFLAG(Pm, ThrowOnUnmappableCharDisabled);
ISFLAG(Pm, ThrowOnUnmappableCharUseAssem);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tMapping flags: %s (%08x)", sFlags, flags);
} // void MDInfo::DisplayPinvokeInfo()
/////////////////////////////////////////////////////////////////////////
// void DisplaySignature(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob);
//
// Display COM+ signature -- taken from cordump.cpp's DumpSignature
/////////////////////////////////////////////////////////////////////////
void MDInfo::DisplaySignature(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob, const char *preFix)
{
ULONG cbCur = 0;
ULONG cb;
// 428793: Prefix complained correctly about unitialized data.
ULONG ulData = (ULONG) IMAGE_CEE_CS_CALLCONV_MAX;
ULONG ulArgs;
HRESULT hr = NOERROR;
ULONG ulSigBlobStart = ulSigBlob;
// initialize sigBuf
InitSigBuffer();
cb = CorSigUncompressData(pbSigBlob, &ulData);
VWriteLine("%s\t\tCallCnvntn: %s", preFix, (g_strCalling[ulData & IMAGE_CEE_CS_CALLCONV_MASK]));
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
if (ulData & IMAGE_CEE_CS_CALLCONV_HASTHIS)
VWriteLine("%s\t\thasThis ", preFix);
if (ulData & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS)
VWriteLine("%s\t\texplicit ", preFix);
if (ulData & IMAGE_CEE_CS_CALLCONV_GENERIC)
VWriteLine("%s\t\tgeneric ", preFix);
// initialize sigBuf
InitSigBuffer();
if ( isCallConv(ulData,IMAGE_CEE_CS_CALLCONV_FIELD) )
{
// display field type
if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb)))
goto ErrExit;
VWriteLine("%s\t\tField type: %s", preFix, (LPSTR)m_sigBuf.Ptr());
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
}
else
{
if (ulData & IMAGE_CEE_CS_CALLCONV_GENERIC)
{
ULONG ulTyArgs;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulTyArgs);
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
VWriteLine("%s\t\tType Arity:%d ", preFix, ulTyArgs);
}
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulArgs);
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
if (ulData != IMAGE_CEE_CS_CALLCONV_LOCAL_SIG && ulData != IMAGE_CEE_CS_CALLCONV_GENERICINST)
{
// display return type when it is not a local varsig
if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb)))
goto ErrExit;
VWriteLine("%s\t\tReturnType:%s", preFix, (LPSTR)m_sigBuf.Ptr());
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
}
// display count of argument
// display arguments
if (ulSigBlob)
VWriteLine("%s\t\t%ld Arguments", preFix, ulArgs);
else
VWriteLine("%s\t\tNo arguments.", preFix);
ULONG i = 0;
while (i < ulArgs && ulSigBlob > 0)
{
ULONG ulDataTemp;
// Handle the sentinal for varargs because it isn't counted in the args.
CorSigUncompressData(&pbSigBlob[cbCur], &ulDataTemp);
++i;
// initialize sigBuf
InitSigBuffer();
if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb)))
goto ErrExit;
VWriteLine("%s\t\t\tArgument #%ld: %s",preFix, i, (LPSTR)m_sigBuf.Ptr());
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
}
}
// Nothing consumed but not yet counted.
cb = 0;
ErrExit:
// We should have consumed all signature blob. If not, dump the sig in hex.
// Also dump in hex if so requested.
if (m_DumpFilter & dumpMoreHex || ulSigBlob != 0)
{
// Did we not consume enough, or try to consume too much?
if (cb > ulSigBlob)
WriteLine("\tERROR IN SIGNATURE: Signature should be larger.");
else
if (cb < ulSigBlob)
{
VWrite("\tERROR IN SIGNATURE: Not all of signature blob was consumed. %d byte(s) remain", ulSigBlob);
// If it is short, just append it to the end.
if (ulSigBlob < 4)
{
Write(": ");
for (; ulSigBlob; ++cbCur, --ulSigBlob)
VWrite("%02x ", pbSigBlob[cbCur]);
WriteLine("");
goto ErrExit2;
}
WriteLine("");
}
// Any appropriate error message has been issued. Dump sig in hex, as determined
// by error or command line switch.
cbCur = 0;
ulSigBlob = ulSigBlobStart;
char rcNewPrefix[80];
sprintf_s(rcNewPrefix, 80, "%s\t\tSignature ", preFix);
DumpHex(rcNewPrefix, pbSigBlob, ulSigBlob, false, 24);
}
ErrExit2:
if (FAILED(hr))
Error("ERROR!! Bad signature blob value!");
return;
} // void MDInfo::DisplaySignature()
/////////////////////////////////////////////////////////////////////////
// HRESULT GetOneElementType(mdScope tkScope, BYTE *pbSigBlob, ULONG ulSigBlob, ULONG *pcb)
//
// Adds description of element type to the end of buffer -- caller must ensure
// buffer is large enough.
/////////////////////////////////////////////////////////////////////////
HRESULT MDInfo::GetOneElementType(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob, ULONG *pcb)
{
HRESULT hr = S_OK; // A result.
ULONG cbCur = 0;
ULONG cb;
ULONG ulData = ELEMENT_TYPE_MAX;
ULONG ulTemp;
int iTemp = 0;
mdToken tk;
cb = CorSigUncompressData(pbSigBlob, &ulData);
cbCur += cb;
// Handle the modifiers.
if (ulData & ELEMENT_TYPE_MODIFIER)
{
if (ulData == ELEMENT_TYPE_SENTINEL)
IfFailGo(AddToSigBuffer("<ELEMENT_TYPE_SENTINEL>"));
else if (ulData == ELEMENT_TYPE_PINNED)
IfFailGo(AddToSigBuffer("PINNED"));
else
{
hr = E_FAIL;
goto ErrExit;
}
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
goto ErrExit;
}
// Handle the underlying element types.
if (ulData >= ELEMENT_TYPE_MAX)
{
hr = E_FAIL;
goto ErrExit;
}
while (ulData == ELEMENT_TYPE_PTR || ulData == ELEMENT_TYPE_BYREF)
{
IfFailGo(AddToSigBuffer(" "));
IfFailGo(AddToSigBuffer(g_szMapElementType[ulData]));
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
}
IfFailGo(AddToSigBuffer(" "));
IfFailGo(AddToSigBuffer(g_szMapElementType[ulData]));
if (CorIsPrimitiveType((CorElementType)ulData) ||
ulData == ELEMENT_TYPE_TYPEDBYREF ||
ulData == ELEMENT_TYPE_OBJECT ||
ulData == ELEMENT_TYPE_I ||
ulData == ELEMENT_TYPE_U)
{
// If this is a primitive type, we are done
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_VALUETYPE ||
ulData == ELEMENT_TYPE_CLASS ||
ulData == ELEMENT_TYPE_CMOD_REQD ||
ulData == ELEMENT_TYPE_CMOD_OPT)
{
cb = CorSigUncompressToken(&pbSigBlob[cbCur], &tk);
cbCur += cb;
// get the name of type ref. Don't care if truncated
if (TypeFromToken(tk) == mdtTypeDef || TypeFromToken(tk) == mdtTypeRef)
{
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %ls",TypeDeforRefName(tk, m_szTempBuf, ARRAY_SIZE(m_szTempBuf)));
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
}
else
{
_ASSERTE(TypeFromToken(tk) == mdtTypeSpec);
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %8x", tk);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
}
if (ulData == ELEMENT_TYPE_CMOD_REQD ||
ulData == ELEMENT_TYPE_CMOD_OPT)
{
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
}
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_SZARRAY)
{
// display the base type of SZARRAY
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
goto ErrExit;
}
// instantiated type
if (ulData == ELEMENT_TYPE_GENERICINST)
{
// display the type constructor
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
ULONG numArgs;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &numArgs);
cbCur += cb;
IfFailGo(AddToSigBuffer("<"));
while (numArgs > 0)
{
if (cbCur > ulSigBlob)
goto ErrExit;
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
--numArgs;
if (numArgs > 0)
IfFailGo(AddToSigBuffer(","));
}
IfFailGo(AddToSigBuffer(">"));
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_VAR)
{
ULONG index;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &index);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, "!%d", index);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_MVAR)
{
ULONG index;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &index);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, "!!%d", index);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_FNPTR)
{
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
if (ulData & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS)
IfFailGo(AddToSigBuffer(" explicit"));
if (ulData & IMAGE_CEE_CS_CALLCONV_HASTHIS)
IfFailGo(AddToSigBuffer(" hasThis"));
IfFailGo(AddToSigBuffer(" "));
IfFailGo(AddToSigBuffer(g_strCalling[ulData & IMAGE_CEE_CS_CALLCONV_MASK]));
// Get number of args
ULONG numArgs;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &numArgs);
cbCur += cb;
// do return type
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
IfFailGo(AddToSigBuffer("("));
while (numArgs > 0)
{
if (cbCur > ulSigBlob)
goto ErrExit;
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
--numArgs;
if (numArgs > 0)
IfFailGo(AddToSigBuffer(","));
}
IfFailGo(AddToSigBuffer(" )"));
goto ErrExit;
}
if(ulData != ELEMENT_TYPE_ARRAY) return E_FAIL;
// display the base type of SDARRAY
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
// display the rank of MDARRAY
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
if (ulData == 0)
// we are done if no rank specified
goto ErrExit;
// how many dimensions have size specified?
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
while (ulData)
{
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulTemp);
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulTemp);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
cbCur += cb;
ulData--;
}
// how many dimensions have lower bounds specified?
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
while (ulData)
{
cb = CorSigUncompressSignedInt(&pbSigBlob[cbCur], &iTemp);
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", iTemp);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
cbCur += cb;
ulData--;
}
ErrExit:
if (cbCur > ulSigBlob)
hr = E_FAIL;
*pcb = cbCur;
return hr;
} // HRESULT MDInfo::GetOneElementType()
// Display the fields of the N/Direct custom value structure.
void MDInfo::DisplayCorNativeLink(COR_NATIVE_LINK *pCorNLnk, const char *preFix)
{
// Print the LinkType.
const char *curField = "\tLink Type : ";
switch(pCorNLnk->m_linkType)
{
case nltNone:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nltNone", pCorNLnk->m_linkType);
break;
case nltAnsi:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nltAnsi", pCorNLnk->m_linkType);
break;
case nltUnicode:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nltUnicode", pCorNLnk->m_linkType);
break;
case nltAuto:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nltAuto", pCorNLnk->m_linkType);
break;
default:
_ASSERTE(!"Invalid Native Link Type!");
}
// Print the link flags
curField = "\tLink Flags : ";
switch(pCorNLnk->m_flags)
{
case nlfNone:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nlfNone", pCorNLnk->m_flags);
break;
case nlfLastError:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nlfLastError", pCorNLnk->m_flags);
break;
default:
_ASSERTE(!"Invalid Native Link Flags!");
}
// Print the entry point.
WCHAR memRefName[STRING_BUFFER_LEN];
HRESULT hr;
hr = m_pImport->GetMemberRefProps( pCorNLnk->m_entryPoint, NULL, memRefName,
STRING_BUFFER_LEN, NULL, NULL, NULL);
if (FAILED(hr)) Error("GetMemberRefProps failed.", hr);
VWriteLine("%s\tEntry Point : %ls (0x%08x)", preFix, memRefName, pCorNLnk->m_entryPoint);
} // void MDInfo::DisplayCorNativeLink()
// Fills given varaint with value given in pValue and of type in bCPlusTypeFlag
//
// Taken from MetaInternal.cpp
HRESULT _FillVariant(
BYTE bCPlusTypeFlag,
const void *pValue,
ULONG cbValue,
VARIANT *pvar)
{
HRESULT hr = NOERROR;
switch (bCPlusTypeFlag)
{
case ELEMENT_TYPE_BOOLEAN:
V_VT(pvar) = VT_BOOL;
V_BOOL(pvar) = *((BYTE*)pValue); //*((UNALIGNED VARIANT_BOOL *)pValue);
break;
case ELEMENT_TYPE_I1:
V_VT(pvar) = VT_I1;
V_I1(pvar) = *((CHAR*)pValue);
break;
case ELEMENT_TYPE_U1:
V_VT(pvar) = VT_UI1;
V_UI1(pvar) = *((BYTE*)pValue);
break;
case ELEMENT_TYPE_I2:
V_VT(pvar) = VT_I2;
V_I2(pvar) = GET_UNALIGNED_VAL16(pValue);
break;
case ELEMENT_TYPE_U2:
case ELEMENT_TYPE_CHAR:
V_VT(pvar) = VT_UI2;
V_UI2(pvar) = GET_UNALIGNED_VAL16(pValue);
break;
case ELEMENT_TYPE_I4:
V_VT(pvar) = VT_I4;
V_I4(pvar) = GET_UNALIGNED_VAL32(pValue);
break;
case ELEMENT_TYPE_U4:
V_VT(pvar) = VT_UI4;
V_UI4(pvar) = GET_UNALIGNED_VAL32(pValue);
break;
case ELEMENT_TYPE_R4:
{
V_VT(pvar) = VT_R4;
__int32 Value = GET_UNALIGNED_VAL32(pValue);
V_R4(pvar) = (float &)Value;
}
break;
case ELEMENT_TYPE_R8:
{
V_VT(pvar) = VT_R8;
__int64 Value = GET_UNALIGNED_VAL64(pValue);
V_R8(pvar) = (double &) Value;
}
break;
case ELEMENT_TYPE_STRING:
{
V_VT(pvar) = VT_BSTR;
WCHAR *TempString;;
#if BIGENDIAN
TempString = (WCHAR *)alloca(cbValue);
memcpy(TempString, pValue, cbValue);
SwapStringLength(TempString, cbValue/sizeof(WCHAR));
#else
TempString = (WCHAR *)pValue;
#endif
// allocated bstr here
V_BSTR(pvar) = ::SysAllocStringLen((LPWSTR)TempString, cbValue/sizeof(WCHAR));
if (V_BSTR(pvar) == NULL)
hr = E_OUTOFMEMORY;
}
break;
case ELEMENT_TYPE_CLASS:
V_VT(pvar) = VT_UNKNOWN;
V_UNKNOWN(pvar) = NULL;
// _ASSERTE( GET_UNALIGNED_VAL32(pValue) == 0);
break;
case ELEMENT_TYPE_I8:
V_VT(pvar) = VT_I8;
V_CY(pvar).int64 = GET_UNALIGNED_VAL64(pValue);
break;
case ELEMENT_TYPE_U8:
V_VT(pvar) = VT_UI8;
V_CY(pvar).int64 = GET_UNALIGNED_VAL64(pValue);
break;
case ELEMENT_TYPE_VOID:
V_VT(pvar) = VT_EMPTY;
break;
default:
_ASSERTE(!"bad constant value type!");
}
return hr;
} // HRESULT _FillVariant()
void MDInfo::DisplayAssembly()
{
if (m_pAssemblyImport)
{
DisplayAssemblyInfo();
DisplayAssemblyRefs();
DisplayFiles();
DisplayExportedTypes();
DisplayManifestResources();
}
} // void MDInfo::DisplayAssembly()
void MDInfo::DisplayAssemblyInfo()
{
HRESULT hr;
mdAssembly mda;
const BYTE *pbPublicKey;
ULONG cbPublicKey;
ULONG ulHashAlgId;
WCHAR szName[STRING_BUFFER_LEN];
ASSEMBLYMETADATA MetaData;
DWORD dwFlags;
hr = m_pAssemblyImport->GetAssemblyFromScope(&mda);
if (hr == CLDB_E_RECORD_NOTFOUND)
return;
else if (FAILED(hr)) Error("GetAssemblyFromScope() failed.", hr);
// Get the required sizes for the arrays of locales, processors etc.
ZeroMemory(&MetaData, sizeof(ASSEMBLYMETADATA));
hr = m_pAssemblyImport->GetAssemblyProps(mda,
NULL, NULL, // Public Key.
NULL, // Hash Algorithm.
NULL, 0, NULL, // Name.
&MetaData,
NULL); // Flags.
if (FAILED(hr)) Error("GetAssemblyProps() failed.", hr);
// Allocate space for the arrays in the ASSEMBLYMETADATA structure.
if (MetaData.cbLocale)
MetaData.szLocale = new WCHAR[MetaData.cbLocale];
if (MetaData.ulProcessor)
MetaData.rProcessor = new DWORD[MetaData.ulProcessor];
if (MetaData.ulOS)
MetaData.rOS = new OSINFO[MetaData.ulOS];
hr = m_pAssemblyImport->GetAssemblyProps(mda,
(const void **)&pbPublicKey, &cbPublicKey,
&ulHashAlgId,
szName, STRING_BUFFER_LEN, NULL,
&MetaData,
&dwFlags);
if (FAILED(hr)) Error("GetAssemblyProps() failed.", hr);
WriteLine("Assembly");
WriteLine("-------------------------------------------------------");
VWriteLine("\tToken: 0x%08x", mda);
VWriteLine("\tName : %ls", szName);
DumpHex("\tPublic Key ", pbPublicKey, cbPublicKey, false, 24);
VWriteLine("\tHash Algorithm : 0x%08x", ulHashAlgId);
DisplayASSEMBLYMETADATA(&MetaData);
if(MetaData.szLocale) delete [] MetaData.szLocale;
if(MetaData.rProcessor) delete [] MetaData.rProcessor;
if(MetaData.rOS) delete [] MetaData.rOS;
char sFlags[STRING_BUFFER_LEN];
DWORD flags = dwFlags;
sFlags[0] = 0;
ISFLAG(Af, PublicKey);
ISFLAG(Af, Retargetable);
ISFLAG(AfContentType_, WindowsRuntime);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\tFlags : %s (%08x)", sFlags, dwFlags);
DisplayCustomAttributes(mda, "\t");
DisplayPermissions(mda, "\t");
WriteLine("");
} // void MDInfo::DisplayAssemblyInfo()
void MDInfo::DisplayAssemblyRefs()
{
HCORENUM assemblyRefEnum = NULL;
mdAssemblyRef AssemblyRefs[ENUM_BUFFER_SIZE];
ULONG count;
ULONG totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pAssemblyImport->EnumAssemblyRefs( &assemblyRefEnum,
AssemblyRefs, ARRAY_SIZE(AssemblyRefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("AssemblyRef #%d (%08x)", totalCount, AssemblyRefs[i]);
WriteLine("-------------------------------------------------------");
DisplayAssemblyRefInfo(AssemblyRefs[i]);
WriteLine("");
}
}
m_pAssemblyImport->CloseEnum(assemblyRefEnum);
} // void MDInfo::DisplayAssemblyRefs()
void MDInfo::DisplayAssemblyRefInfo(mdAssemblyRef inAssemblyRef)
{
HRESULT hr;
const BYTE *pbPublicKeyOrToken;
ULONG cbPublicKeyOrToken;
WCHAR szName[STRING_BUFFER_LEN];
ASSEMBLYMETADATA MetaData;
const BYTE *pbHashValue;
ULONG cbHashValue;
DWORD dwFlags;
VWriteLine("\tToken: 0x%08x", inAssemblyRef);
// Get sizes for the arrays in the ASSEMBLYMETADATA structure.
ZeroMemory(&MetaData, sizeof(ASSEMBLYMETADATA));
hr = m_pAssemblyImport->GetAssemblyRefProps(inAssemblyRef,
NULL, NULL, // Public Key or Token.
NULL, 0, NULL, // Name.
&MetaData,
NULL, NULL, // HashValue.
NULL); // Flags.
if (FAILED(hr)) Error("GetAssemblyRefProps() failed.", hr);
// Allocate space for the arrays in the ASSEMBLYMETADATA structure.
if (MetaData.cbLocale)
MetaData.szLocale = new WCHAR[MetaData.cbLocale];
if (MetaData.ulProcessor)
MetaData.rProcessor = new DWORD[MetaData.ulProcessor];
if (MetaData.ulOS)
MetaData.rOS = new OSINFO[MetaData.ulOS];
hr = m_pAssemblyImport->GetAssemblyRefProps(inAssemblyRef,
(const void **)&pbPublicKeyOrToken, &cbPublicKeyOrToken,
szName, STRING_BUFFER_LEN, NULL,
&MetaData,
(const void **)&pbHashValue, &cbHashValue,
&dwFlags);
if (FAILED(hr)) Error("GetAssemblyRefProps() failed.", hr);
DumpHex("\tPublic Key or Token", pbPublicKeyOrToken, cbPublicKeyOrToken, false, 24);
VWriteLine("\tName: %ls", szName);
DisplayASSEMBLYMETADATA(&MetaData);
if(MetaData.szLocale) delete [] MetaData.szLocale;
if(MetaData.rProcessor) delete [] MetaData.rProcessor;
if(MetaData.rOS) delete [] MetaData.rOS;
DumpHex("\tHashValue Blob", pbHashValue, cbHashValue, false, 24);
char sFlags[STRING_BUFFER_LEN];
DWORD flags = dwFlags;
sFlags[0] = 0;
ISFLAG(Af, PublicKey);
ISFLAG(Af, Retargetable);
ISFLAG(AfContentType_, WindowsRuntime);
#if 0
ISFLAG(Af, LegacyLibrary);
ISFLAG(Af, LegacyPlatform);
ISFLAG(Af, Library);
ISFLAG(Af, Platform);
#endif
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\tFlags: %s (%08x)", sFlags, dwFlags);
DisplayCustomAttributes(inAssemblyRef, "\t");
WriteLine("");
} // void MDInfo::DisplayAssemblyRefInfo()
void MDInfo::DisplayFiles()
{
HCORENUM fileEnum = NULL;
mdFile Files[ENUM_BUFFER_SIZE];
ULONG count;
ULONG totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pAssemblyImport->EnumFiles( &fileEnum,
Files, ARRAY_SIZE(Files), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("File #%d (%08x)", totalCount, Files[i]);
WriteLine("-------------------------------------------------------");
DisplayFileInfo(Files[i]);
WriteLine("");
}
}
m_pAssemblyImport->CloseEnum(fileEnum);
} // void MDInfo::DisplayFiles()
void MDInfo::DisplayFileInfo(mdFile inFile)
{
HRESULT hr;
WCHAR szName[STRING_BUFFER_LEN];
const BYTE *pbHashValue;
ULONG cbHashValue;
DWORD dwFlags;
VWriteLine("\tToken: 0x%08x", inFile);
hr = m_pAssemblyImport->GetFileProps(inFile,
szName, STRING_BUFFER_LEN, NULL,
(const void **)&pbHashValue, &cbHashValue,
&dwFlags);
if (FAILED(hr)) Error("GetFileProps() failed.", hr);
VWriteLine("\tName : %ls", szName);
DumpHex("\tHashValue Blob ", pbHashValue, cbHashValue, false, 24);
char sFlags[STRING_BUFFER_LEN];
DWORD flags = dwFlags;
sFlags[0] = 0;
ISFLAG(Ff, ContainsMetaData);
ISFLAG(Ff, ContainsNoMetaData);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\tFlags : %s (%08x)", sFlags, dwFlags);
DisplayCustomAttributes(inFile, "\t");
WriteLine("");
} // MDInfo::DisplayFileInfo()
void MDInfo::DisplayExportedTypes()
{
HCORENUM comTypeEnum = NULL;
mdExportedType ExportedTypes[ENUM_BUFFER_SIZE];
ULONG count;
ULONG totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pAssemblyImport->EnumExportedTypes( &comTypeEnum,
ExportedTypes, ARRAY_SIZE(ExportedTypes), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("ExportedType #%d (%08x)", totalCount, ExportedTypes[i]);
WriteLine("-------------------------------------------------------");
DisplayExportedTypeInfo(ExportedTypes[i]);
WriteLine("");
}
}
m_pAssemblyImport->CloseEnum(comTypeEnum);
} // void MDInfo::DisplayExportedTypes()
void MDInfo::DisplayExportedTypeInfo(mdExportedType inExportedType)
{
HRESULT hr;
WCHAR szName[STRING_BUFFER_LEN];
mdToken tkImplementation;
mdTypeDef tkTypeDef;
DWORD dwFlags;
char sFlags[STRING_BUFFER_LEN];
VWriteLine("\tToken: 0x%08x", inExportedType);
hr = m_pAssemblyImport->GetExportedTypeProps(inExportedType,
szName, STRING_BUFFER_LEN, NULL,
&tkImplementation,
&tkTypeDef,
&dwFlags);
if (FAILED(hr)) Error("GetExportedTypeProps() failed.", hr);
VWriteLine("\tName: %ls", szName);
VWriteLine("\tImplementation token: 0x%08x", tkImplementation);
VWriteLine("\tTypeDef token: 0x%08x", tkTypeDef);
VWriteLine("\tFlags : %s (%08x)",ClassFlags(dwFlags, sFlags), dwFlags);
DisplayCustomAttributes(inExportedType, "\t");
WriteLine("");
} // void MDInfo::DisplayExportedTypeInfo()
void MDInfo::DisplayManifestResources()
{
HCORENUM manifestResourceEnum = NULL;
mdManifestResource ManifestResources[ENUM_BUFFER_SIZE];
ULONG count;
ULONG totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pAssemblyImport->EnumManifestResources( &manifestResourceEnum,
ManifestResources, ARRAY_SIZE(ManifestResources), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("ManifestResource #%d (%08x)", totalCount, ManifestResources[i]);
WriteLine("-------------------------------------------------------");
DisplayManifestResourceInfo(ManifestResources[i]);
WriteLine("");
}
}
m_pAssemblyImport->CloseEnum(manifestResourceEnum);
} // void MDInfo::DisplayManifestResources()
void MDInfo::DisplayManifestResourceInfo(mdManifestResource inManifestResource)
{
HRESULT hr;
WCHAR szName[STRING_BUFFER_LEN];
mdToken tkImplementation;
DWORD dwOffset;
DWORD dwFlags;
VWriteLine("\tToken: 0x%08x", inManifestResource);
hr = m_pAssemblyImport->GetManifestResourceProps(inManifestResource,
szName, STRING_BUFFER_LEN, NULL,
&tkImplementation,
&dwOffset,
&dwFlags);
if (FAILED(hr)) Error("GetManifestResourceProps() failed.", hr);
VWriteLine("Name: %ls", szName);
VWriteLine("Implementation token: 0x%08x", tkImplementation);
VWriteLine("Offset: 0x%08x", dwOffset);
char sFlags[STRING_BUFFER_LEN];
DWORD flags = dwFlags;
sFlags[0] = 0;
ISFLAG(Mr, Public);
ISFLAG(Mr, Private);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\tFlags: %s (%08x)", sFlags, dwFlags);
DisplayCustomAttributes(inManifestResource, "\t");
WriteLine("");
} // void MDInfo::DisplayManifestResourceInfo()
void MDInfo::DisplayASSEMBLYMETADATA(ASSEMBLYMETADATA *pMetaData)
{
ULONG i;
VWriteLine("\tVersion: %d.%d.%d.%d", pMetaData->usMajorVersion, pMetaData->usMinorVersion, pMetaData->usBuildNumber, pMetaData->usRevisionNumber);
VWriteLine("\tMajor Version: 0x%08x", pMetaData->usMajorVersion);
VWriteLine("\tMinor Version: 0x%08x", pMetaData->usMinorVersion);
VWriteLine("\tBuild Number: 0x%08x", pMetaData->usBuildNumber);
VWriteLine("\tRevision Number: 0x%08x", pMetaData->usRevisionNumber);
VWriteLine("\tLocale: %ls", pMetaData->cbLocale ? pMetaData->szLocale : W("<null>"));
for (i = 0; i < pMetaData->ulProcessor; i++)
VWriteLine("\tProcessor #%ld: 0x%08x", i+1, pMetaData->rProcessor[i]);
for (i = 0; i < pMetaData->ulOS; i++)
{
VWriteLine("\tOS #%ld:", i+1);
VWriteLine("\t\tOS Platform ID: 0x%08x", pMetaData->rOS[i].dwOSPlatformId);
VWriteLine("\t\tOS Major Version: 0x%08x", pMetaData->rOS[i].dwOSMajorVersion);
VWriteLine("\t\tOS Minor Version: 0x%08x", pMetaData->rOS[i].dwOSMinorVersion);
}
} // void MDInfo::DisplayASSEMBLYMETADATA()
void MDInfo::DisplayUserStrings()
{
HCORENUM stringEnum = NULL; // string enumerator.
mdString Strings[ENUM_BUFFER_SIZE]; // String tokens from enumerator.
CQuickArray<WCHAR> rUserString; // Buffer to receive string.
WCHAR *szUserString; // Working pointer into buffer.
ULONG chUserString; // Size of user string.
CQuickArray<char> rcBuf; // Buffer to hold the BLOB version of the string.
char *szBuf; // Working pointer into buffer.
ULONG chBuf; // Saved size of the user string.
ULONG count; // Items returned from enumerator.
ULONG totalCount = 1; // Running count of strings.
bool bUnprint = false; // Is an unprintable character found?
HRESULT hr; // A result.
while (SUCCEEDED(hr = m_pImport->EnumUserStrings( &stringEnum,
Strings, ARRAY_SIZE(Strings), &count)) &&
count > 0)
{
if (totalCount == 1)
{ // If only one, it is the NULL string, so don't print it.
WriteLine("User Strings");
WriteLine("-------------------------------------------------------");
}
for (ULONG i = 0; i < count; i++, totalCount++)
{
do { // Try to get the string into the existing buffer.
hr = m_pImport->GetUserString( Strings[i], rUserString.Ptr(),(ULONG32)rUserString.MaxSize(), &chUserString);
if (hr == CLDB_S_TRUNCATION)
{ // Buffer wasn't big enough, try to enlarge it.
if (FAILED(rUserString.ReSizeNoThrow(chUserString)))
Error("malloc failed.", E_OUTOFMEMORY);
continue;
}
} while (hr == CLDB_S_TRUNCATION);
if (FAILED(hr)) Error("GetUserString failed.", hr);
szUserString = rUserString.Ptr();
chBuf = chUserString;
VWrite("%08x : (%2d) L\"", Strings[i], chUserString);
for (ULONG j=0; j<chUserString; j++)
{
switch (*szUserString)
{
case 0:
Write("\\0"); break;
case L'\r':
Write("\\r"); break;
case L'\n':
Write("\\n"); break;
case L'\t':
Write("\\t"); break;
default:
if (iswprint(*szUserString))
VWrite("%lc", *szUserString);
else
{
bUnprint = true;
Write(".");
}
break;
}
++szUserString;
if((j>0)&&((j&0x7F)==0)) WriteLine("");
}
WriteLine("\"");
// Print the user string as a blob if an unprintable character is found.
if (bUnprint)
{
bUnprint = false;
szUserString = rUserString.Ptr();
if (FAILED(hr = rcBuf.ReSizeNoThrow(81))) //(chBuf * 5 + 1);
Error("ReSize failed.", hr);
szBuf = rcBuf.Ptr();
ULONG j,k;
WriteLine("\t\tUser string has unprintables, hex format below:");
for (j = 0,k=0; j < chBuf; j++)
{
sprintf_s (&szBuf[k*5], 81, "%04x ", szUserString[j]);
k++;
if((k==16)||(j == (chBuf-1)))
{
szBuf[k*5] = '\0';
VWriteLine("\t\t%s", szBuf);
k=0;
}
}
}
}
}
if (stringEnum)
m_pImport->CloseEnum(stringEnum);
} // void MDInfo::DisplayUserStrings()
void MDInfo::DisplayUnsatInfo()
{
HRESULT hr = S_OK;
HCORENUM henum = 0;
mdToken tk;
ULONG cMethods;
Write("\nUnresolved Externals\n");
Write("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
while ( (hr = m_pImport->EnumUnresolvedMethods(
&henum,
&tk,
1,
&cMethods)) == S_OK && cMethods )
{
if ( TypeFromToken(tk) == mdtMethodDef )
{
// a method definition without implementation
DisplayMethodInfo( tk );
}
else if ( TypeFromToken(tk) == mdtMemberRef )
{
// an unresolved MemberRef to a global function
DisplayMemberRefInfo( tk, "" );
}
else
{
_ASSERTE(!"Unknown token kind!");
}
}
m_pImport->CloseEnum(henum);
} // void MDInfo::DisplayUnsatInfo()
//*******************************************************************************
// This code is used for debugging purposes only. This will just print out the
// entire database.
//*******************************************************************************
const char *MDInfo::DumpRawNameOfType(ULONG iType)
{
if (iType <= iRidMax)
{
const char *pNameTable;
m_pTables->GetTableInfo(iType, 0,0,0,0, &pNameTable);
return pNameTable;
}
else
// Is the field a coded token?
if (iType <= iCodedTokenMax)
{
int iCdTkn = iType - iCodedToken;
const char *pNameCdTkn;
m_pTables->GetCodedTokenInfo(iCdTkn, 0,0, &pNameCdTkn);
return pNameCdTkn;
}
// Fixed type.
switch (iType)
{
case iBYTE:
return "BYTE";
case iSHORT:
return "short";
case iUSHORT:
return "USHORT";
case iLONG:
return "long";
case iULONG:
return "ULONG";
case iSTRING:
return "string";
case iGUID:
return "GUID";
case iBLOB:
return "blob";
}
// default:
static char buf[30];
sprintf_s(buf, 30, "unknown type 0x%02x", iType);
return buf;
} // const char *MDInfo::DumpRawNameOfType()
void MDInfo::DumpRawCol(ULONG ixTbl, ULONG ixCol, ULONG rid, bool bStats)
{
ULONG ulType; // Type of a column.
ULONG ulVal; // Value of a column.
LPCUTF8 pString; // Pointer to a string.
const void *pBlob; // Pointer to a blob.
ULONG cb; // Size of something.
m_pTables->GetColumn(ixTbl, ixCol, rid, &ulVal);
m_pTables->GetColumnInfo(ixTbl, ixCol, 0, 0, &ulType, 0);
if (ulType <= iRidMax)
{
const char *pNameTable;
m_pTables->GetTableInfo(ulType, 0,0,0,0, &pNameTable);
VWrite("%s[%x]", pNameTable, ulVal);
}
else
// Is the field a coded token?
if (ulType <= iCodedTokenMax)
{
int iCdTkn = ulType - iCodedToken;
const char *pNameCdTkn;
m_pTables->GetCodedTokenInfo(iCdTkn, 0,0, &pNameCdTkn);
VWrite("%s[%08x]", pNameCdTkn, ulVal);
}
else
{
// Fixed type.
switch (ulType)
{
case iBYTE:
VWrite("%02x", ulVal);
break;
case iSHORT:
case iUSHORT:
VWrite("%04x", ulVal);
break;
case iLONG:
case iULONG:
VWrite("%08x", ulVal);
break;
case iSTRING:
if (ulVal && (m_DumpFilter & dumpNames))
{
m_pTables->GetString(ulVal, &pString);
VWrite("(%x)\"%s\"", ulVal, pString);
}
else
VWrite("string#%x", ulVal);
if (bStats && ulVal)
{
m_pTables->GetString(ulVal, &pString);
cb = (ULONG) strlen(pString) + 1;
VWrite("(%d)", cb);
}
break;
case iGUID:
VWrite("guid#%x", ulVal);
if (bStats && ulVal)
{
VWrite("(16)");
}
break;
case iBLOB:
VWrite("blob#%x", ulVal);
if (bStats && ulVal)
{
m_pTables->GetBlob(ulVal, &cb, &pBlob);
cb += 1;
if (cb > 128)
cb += 1;
if (cb > 16535)
cb += 1;
VWrite("(%d)", cb);
}
break;
default:
VWrite("unknown type 0x%04x", ulVal);
break;
}
}
} // void MDInfo::DumpRawCol()
ULONG MDInfo::DumpRawColStats(ULONG ixTbl, ULONG ixCol, ULONG cRows)
{
ULONG rslt = 0;
ULONG ulType; // Type of a column.
ULONG ulVal; // Value of a column.
LPCUTF8 pString; // Pointer to a string.
const void *pBlob; // Pointer to a blob.
ULONG cb; // Size of something.
m_pTables->GetColumnInfo(ixTbl, ixCol, 0, 0, &ulType, 0);
if (IsHeapType(ulType))
{
for (ULONG rid=1; rid<=cRows; ++rid)
{
m_pTables->GetColumn(ixTbl, ixCol, rid, &ulVal);
// Fixed type.
switch (ulType)
{
case iSTRING:
if (ulVal)
{
m_pTables->GetString(ulVal, &pString);
cb = (ULONG) strlen(pString);
rslt += cb + 1;
}
break;
case iGUID:
if (ulVal)
rslt += 16;
break;
case iBLOB:
if (ulVal)
{
m_pTables->GetBlob(ulVal, &cb, &pBlob);
rslt += cb + 1;
if (cb > 128)
rslt += 1;
if (cb > 16535)
rslt += 1;
}
break;
default:
break;
}
}
}
return rslt;
} // ULONG MDInfo::DumpRawColStats()
int MDInfo::DumpHex(
const char *szPrefix, // String prefix for first line.
const void *pvData, // The data to print.
ULONG cbData, // Bytes of data to print.
int bText, // If true, also dump text.
ULONG nLine) // Bytes per line to print.
{
const BYTE *pbData = static_cast<const BYTE*>(pvData);
ULONG i; // Loop control.
ULONG nPrint; // Number to print in an iteration.
ULONG nSpace; // Spacing calculations.
ULONG nPrefix; // Size of the prefix.
ULONG nLines=0; // Number of lines printed.
const char *pPrefix; // For counting spaces in the prefix.
// Round down to 8 characters.
nLine = nLine & ~0x7;
for (nPrefix=0, pPrefix=szPrefix; *pPrefix; ++pPrefix)
{
if (*pPrefix == '\t')
nPrefix = (nPrefix + 8) & ~7;
else
++nPrefix;
}
//nPrefix = strlen(szPrefix);
do
{ // Write the line prefix.
if (szPrefix)
VWrite("%s:", szPrefix);
else
VWrite("%*s:", nPrefix, "");
szPrefix = 0;
++nLines;
// Calculate spacing.
nPrint = min(cbData, nLine);
nSpace = nLine - nPrint;
// dump in hex.
for(i=0; i<nPrint; i++)
{
if ((i&7) == 0)
Write(" ");
VWrite("%02x ", pbData[i]);
}
if (bText)
{
// Space out to the text spot.
if (nSpace)
VWrite("%*s", nSpace*3+nSpace/8, "");
// Dump in text.
Write(">");
for(i=0; i<nPrint; i++)
VWrite("%c", (isprint(pbData[i])) ? pbData[i] : ' ');
// Space out the text, and finish the line.
VWrite("%*s<", nSpace, "");
}
VWriteLine("");
// Next data to print.
cbData -= nPrint;
pbData += nPrint;
}
while (cbData > 0);
return nLines;
} // int MDInfo::DumpHex()
void MDInfo::DumpRawHeaps()
{
HRESULT hr; // A result.
ULONG ulSize; // Bytes in a heap.
const BYTE *pData; // Pointer to a blob.
ULONG cbData; // Size of a blob.
ULONG oData; // Offset of current blob.
char rcPrefix[30]; // To format line prefix.
m_pTables->GetBlobHeapSize(&ulSize);
VWriteLine("");
VWriteLine("Blob Heap: %d(%#x) bytes", ulSize,ulSize);
oData = 0;
do
{
m_pTables->GetBlob(oData, &cbData, (const void**)&pData);
sprintf_s(rcPrefix, 30, "%5x,%-2x", oData, cbData);
DumpHex(rcPrefix, pData, cbData);
hr = m_pTables->GetNextBlob(oData, &oData);
}
while (hr == S_OK);
m_pTables->GetStringHeapSize(&ulSize);
VWriteLine("");
VWriteLine("String Heap: %d(%#x) bytes", ulSize,ulSize);
oData = 0;
const char *pString;
do
{
m_pTables->GetString(oData, &pString);
if (m_DumpFilter & dumpMoreHex)
{
sprintf_s(rcPrefix, 30, "%08x", oData);
DumpHex(rcPrefix, pString, (ULONG)strlen(pString)+1);
}
else
if (*pString != 0)
VWrite("%08x: %s\n", oData, pString);
hr = m_pTables->GetNextString(oData, &oData);
}
while (hr == S_OK);
VWriteLine("");
DisplayUserStrings();
} // void MDInfo::DumpRawHeaps()
void MDInfo::DumpRaw(int iDump, bool bunused)
{
ULONG cTables; // Tables in the database.
ULONG cCols; // Columns in a table.
ULONG cRows; // Rows in a table.
ULONG cbRow; // Bytes in a row of a table.
ULONG iKey; // Key column of a table.
const char *pNameTable; // Name of a table.
ULONG oCol; // Offset of a column.
ULONG cbCol; // Size of a column.
ULONG ulType; // Type of a column.
const char *pNameColumn; // Name of a column.
ULONG ulSize;
// Heaps is easy -- there is a specific bit for that.
bool bStats = (m_DumpFilter & dumpStats) != 0;
// Rows are harder. Was there something else that limited data?
BOOL bRows = (m_DumpFilter & (dumpSchema | dumpHeader)) == 0;
BOOL bSchema = bRows || (m_DumpFilter & dumpSchema);
// (m_DumpFilter & (dumpSchema | dumpHeader | dumpCSV | dumpRaw | dumpStats | dumpRawHeaps))
if (m_pTables2)
{
// Get the raw metadata header.
const BYTE *pbData = NULL;
const BYTE *pbStream = NULL; // One of the stream.s
const BYTE *pbMd = NULL; // The metadata stream.
ULONG cbData = 0;
ULONG cbStream = 0; // One of the streams.
ULONG cbMd = 0; // The metadata stream.
const char *pName;
HRESULT hr = S_OK;
ULONG ix;
m_pTables2->GetMetaDataStorage((const void**)&pbData, &cbData);
// Per the ECMA spec, the section data looks like this:
struct MDSTORAGESIGNATURE
{
ULONG lSignature; // "Magic" signature.
USHORT iMajorVer; // Major file version.
USHORT iMinorVer; // Minor file version.
ULONG iExtraData; // Offset to next structure of information
ULONG iVersionString; // Length of version string
BYTE pVersion[0]; // Version string
};
struct MDSTORAGEHEADER
{
BYTE fFlags; // STGHDR_xxx flags.
BYTE pad;
USHORT iStreams; // How many streams are there.
};
const MDSTORAGESIGNATURE *pStorage = (const MDSTORAGESIGNATURE *) pbData;
const MDSTORAGEHEADER *pSHeader = (const MDSTORAGEHEADER *)(pbData + sizeof(MDSTORAGESIGNATURE) + pStorage->iVersionString);
VWriteLine("Metadata section: 0x%08x, version: %d.%d, extra: %d, version len: %d, version: %s", pStorage->lSignature, pStorage->iMajorVer, pStorage->iMinorVer, pStorage->iExtraData, pStorage->iVersionString, pStorage->pVersion);
VWriteLine(" flags: 0x%02x, streams: %d", pSHeader->fFlags, pSHeader->iStreams);
if (m_DumpFilter & dumpMoreHex)
{
const BYTE *pbEnd = pbData;
ULONG cb = sizeof(MDSTORAGESIGNATURE) + pStorage->iVersionString + sizeof(MDSTORAGEHEADER);
hr = m_pTables2->GetMetaDataStreamInfo(0, &pName, (const void**)&pbEnd, &cbStream);
if (hr == S_OK)
cb = (ULONG)(pbEnd - pbData);
DumpHex(" ", pbData, cb);
}
for (ix=0; hr == S_OK; ++ix)
{
hr = m_pTables2->GetMetaDataStreamInfo(ix, &pName, (const void**)&pbStream, &cbStream);
if (hr != S_OK)
break;
if (strcmp(pName, "#~") == 0 || strcmp(pName, "#-") == 0)
{
pbMd = pbStream;
cbMd = cbStream;
}
VWriteLine("Stream %d: name: %s, size %d", ix, pName, cbStream);
// hex for individual stream headers in metadata section dump. hex for
// the streams themselves distributed throughout the dump.
}
if (pbMd)
{
// Per ECMA, the metadata header looks like this:
struct MD
{
ULONG m_ulReserved; // Reserved, must be zero.
BYTE m_major; // Version numbers.
BYTE m_minor;
BYTE m_heaps; // Bits for heap sizes.
BYTE m_rid; // log-base-2 of largest rid.
unsigned __int64 m_maskvalid; // Bit mask of present table counts.
unsigned __int64 m_sorted; // Bit mask of sorted tables. };
};
const MD *pMd;
pMd = (const MD *)pbMd;
VWriteLine("Metadata header: %d.%d, heaps: 0x%02x, rid: 0x%02x, valid: 0x%016I64x, sorted: 0x%016I64x",
pMd->m_major, pMd->m_minor, pMd->m_heaps, pMd->m_rid,
(ULONGLONG)GET_UNALIGNED_VAL64(&(pMd->m_maskvalid)),
(ULONGLONG)GET_UNALIGNED_VAL64(&(pMd->m_sorted)));
if (m_DumpFilter & dumpMoreHex)
{
DumpHex(" ", pbMd, sizeof(MD));
}
}
VWriteLine("");
}
m_pTables->GetNumTables(&cTables);
m_pTables->GetStringHeapSize(&ulSize);
VWrite("Strings: %d(%#x)", ulSize, ulSize);
m_pTables->GetBlobHeapSize(&ulSize);
VWrite(", Blobs: %d(%#x)", ulSize, ulSize);
m_pTables->GetGuidHeapSize(&ulSize);
VWrite(", Guids: %d(%#x)", ulSize, ulSize);
m_pTables->GetUserStringHeapSize(&ulSize);
VWriteLine(", User strings: %d(%#x)", ulSize, ulSize);
for (ULONG ixTbl = 0; ixTbl < cTables; ++ixTbl)
{
m_pTables->GetTableInfo(ixTbl, &cbRow, &cRows, &cCols, &iKey, &pNameTable);
if (bRows) // when dumping rows, print a break between row data and schema
VWriteLine("=================================================");
VWriteLine("%2d(%#x): %-20s cRecs:%5d(%#x), cbRec:%3d(%#x), cbTable:%6d(%#x)",
ixTbl, ixTbl, pNameTable, cRows, cRows, cbRow, cbRow, cbRow * cRows, cbRow * cRows);
if (!bSchema && !bRows)
continue;
// Dump column definitions for the table.
ULONG ixCol;
for (ixCol=0; ixCol<cCols; ++ixCol)
{
m_pTables->GetColumnInfo(ixTbl, ixCol, &oCol, &cbCol, &ulType, &pNameColumn);
VWrite(" col %2x:%c %-12s oCol:%2x, cbCol:%x, %-7s",
ixCol, ((ixCol==iKey)?'*':' '), pNameColumn, oCol, cbCol, DumpRawNameOfType(ulType));
if (bStats)
{
ulSize = DumpRawColStats(ixTbl, ixCol, cRows);
if (ulSize)
VWrite("(%d)", ulSize);
}
VWriteLine("");
}
if (!bRows)
continue;
// Dump the rows.
for (ULONG rid = 1; rid <= cRows; ++rid)
{
if (rid == 1)
VWriteLine("-------------------------------------------------");
VWrite(" %3x == ", rid);
for (ixCol=0; ixCol < cCols; ++ixCol)
{
if (ixCol) VWrite(", ");
VWrite("%d:", ixCol);
DumpRawCol(ixTbl, ixCol, rid, bStats);
}
VWriteLine("");
}
}
} // void MDInfo::DumpRaw()
void MDInfo::DumpRawCSV()
{
ULONG cTables; // Tables in the database.
ULONG cCols; // Columns in a table.
ULONG cRows; // Rows in a table.
ULONG cbRow; // Bytes in a row of a table.
const char *pNameTable; // Name of a table.
ULONG ulSize;
m_pTables->GetNumTables(&cTables);
VWriteLine("Name,Size,cRecs,cbRec");
m_pTables->GetStringHeapSize(&ulSize);
VWriteLine("Strings,%d", ulSize);
m_pTables->GetBlobHeapSize(&ulSize);
VWriteLine("Blobs,%d", ulSize);
m_pTables->GetGuidHeapSize(&ulSize);
VWriteLine("Guids,%d", ulSize);
for (ULONG ixTbl = 0; ixTbl < cTables; ++ixTbl)
{
m_pTables->GetTableInfo(ixTbl, &cbRow, &cRows, &cCols, NULL, &pNameTable);
VWriteLine("%s,%d,%d,%d", pNameTable, cbRow*cRows, cRows, cbRow);
}
} // void MDInfo::DumpRawCSV()
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <windows.h>
#include <objbase.h>
#include <crtdbg.h>
#include <assert.h>
#include <corpriv.h>
#include <cor.h>
#include "assert.h"
#include "corerror.h"
#include <winwrap.h>
#include <prettyprintsig.h>
#include <cahlpr.h>
#include <limits.h>
#include "mdinfo.h"
#define ENUM_BUFFER_SIZE 10
#define TAB_SIZE 8
#define ISFLAG(p,x) if (Is##p##x(flags)) strcat_s(sFlags,STRING_BUFFER_LEN, "["#x "] ");
extern HRESULT _FillVariant(
BYTE bCPlusTypeFlag,
void const *pValue,
ULONG cbValue,
VARIANT *pvar);
// Validator declarations.
extern DWORD g_ValModuleType;
// Tables for mapping element type to text
const char *g_szMapElementType[] =
{
"End", // 0x0
"Void", // 0x1
"Boolean",
"Char",
"I1",
"UI1",
"I2", // 0x6
"UI2",
"I4",
"UI4",
"I8",
"UI8",
"R4",
"R8",
"String",
"Ptr", // 0xf
"ByRef", // 0x10
"ValueClass",
"Class",
"Var",
"MDArray", // 0x14
"GenericInst",
"TypedByRef",
"VALUEARRAY",
"I",
"U",
"R", // 0x1a
"FNPTR",
"Object",
"SZArray",
"MVar",
"CMOD_REQD",
"CMOD_OPT",
"INTERNAL",
};
const char *g_szMapUndecorateType[] =
{
"", // 0x0
"void",
"boolean",
"Char",
"byte",
"unsigned byte",
"short",
"unsigned short",
"int",
"unsigned int",
"long",
"unsigned long",
"float",
"double",
"String",
"*", // 0xf
"ByRef",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Function Pointer",
"Object",
"",
"",
"CMOD_REQD",
"CMOD_OPT",
"INTERNAL",
};
// Provide enough entries for IMAGE_CEE_CS_CALLCONV_MASK (defined in CorHdr.h)
const char *g_strCalling[] =
{
"[DEFAULT]",
"[C]",
"[STDCALL]",
"[THISCALL]",
"[FASTCALL]",
"[VARARG]",
"[FIELD]",
"[LOCALSIG]",
"[PROPERTY]",
"[UNMANAGED]",
"[GENERICINST]",
"[NATIVEVARARG]",
"[INVALID]",
"[INVALID]",
"[INVALID]",
"[INVALID]"
};
const char *g_szNativeType[] =
{
"NATIVE_TYPE_END(DEPRECATED!)", // = 0x0, //DEPRECATED
"NATIVE_TYPE_VOID(DEPRECATED!)", // = 0x1, //DEPRECATED
"NATIVE_TYPE_BOOLEAN", // = 0x2, // (4 byte boolean value: TRUE = non-zero, FALSE = 0)
"NATIVE_TYPE_I1", // = 0x3,
"NATIVE_TYPE_U1", // = 0x4,
"NATIVE_TYPE_I2", // = 0x5,
"NATIVE_TYPE_U2", // = 0x6,
"NATIVE_TYPE_I4", // = 0x7,
"NATIVE_TYPE_U4", // = 0x8,
"NATIVE_TYPE_I8", // = 0x9,
"NATIVE_TYPE_U8", // = 0xa,
"NATIVE_TYPE_R4", // = 0xb,
"NATIVE_TYPE_R8", // = 0xc,
"NATIVE_TYPE_SYSCHAR(DEPRECATED!)", // = 0xd, //DEPRECATED
"NATIVE_TYPE_VARIANT(DEPRECATED!)", // = 0xe, //DEPRECATED
"NATIVE_TYPE_CURRENCY", // = 0xf,
"NATIVE_TYPE_PTR(DEPRECATED!)", // = 0x10, //DEPRECATED
"NATIVE_TYPE_DECIMAL(DEPRECATED!)", // = 0x11, //DEPRECATED
"NATIVE_TYPE_DATE(DEPRECATED!)", // = 0x12, //DEPRECATED
"NATIVE_TYPE_BSTR", // = 0x13,
"NATIVE_TYPE_LPSTR", // = 0x14,
"NATIVE_TYPE_LPWSTR", // = 0x15,
"NATIVE_TYPE_LPTSTR", // = 0x16,
"NATIVE_TYPE_FIXEDSYSSTRING", // = 0x17,
"NATIVE_TYPE_OBJECTREF(DEPRECATED!)", // = 0x18, //DEPRECATED
"NATIVE_TYPE_IUNKNOWN", // = 0x19,
"NATIVE_TYPE_IDISPATCH", // = 0x1a,
"NATIVE_TYPE_STRUCT", // = 0x1b,
"NATIVE_TYPE_INTF", // = 0x1c,
"NATIVE_TYPE_SAFEARRAY", // = 0x1d,
"NATIVE_TYPE_FIXEDARRAY", // = 0x1e,
"NATIVE_TYPE_INT", // = 0x1f,
"NATIVE_TYPE_UINT", // = 0x20,
"NATIVE_TYPE_NESTEDSTRUCT(DEPRECATED!)", // = 0x21, //DEPRECATED (use "NATIVE_TYPE_STRUCT)
"NATIVE_TYPE_BYVALSTR", // = 0x22,
"NATIVE_TYPE_ANSIBSTR", // = 0x23,
"NATIVE_TYPE_TBSTR", // = 0x24, // select BSTR or ANSIBSTR depending on platform
"NATIVE_TYPE_VARIANTBOOL", // = 0x25, // (2-byte boolean value: TRUE = -1, FALSE = 0)
"NATIVE_TYPE_FUNC", // = 0x26,
"NATIVE_TYPE_LPVOID", // = 0x27, // blind pointer (no deep marshaling)
"NATIVE_TYPE_ASANY", // = 0x28,
"<UNDEFINED NATIVE TYPE 0x29>",
"NATIVE_TYPE_ARRAY", // = 0x2a,
"NATIVE_TYPE_LPSTRUCT", // = 0x2b,
"NATIVE_TYPE_CUSTOMMARSHALER", // = 0x2c, // Custom marshaler.
"NATIVE_TYPE_ERROR", // = 0x2d, // VT_HRESULT when exporting to a typelib.
};
size_t g_cbCoffNames = 0;
mdMethodDef g_tkEntryPoint = 0; // integration with ILDASM
// helper to init signature buffer
void MDInfo::InitSigBuffer()
{
strcpy_s((LPSTR)m_sigBuf.Ptr(), 1, "");
} // void MDInfo::InitSigBuffer()
// helper to append a string into the signature buffer. If size of signature buffer is not big enough,
// we will grow it.
HRESULT MDInfo::AddToSigBuffer(_In_z_ const char *string)
{
HRESULT hr;
size_t LL = strlen((LPSTR)m_sigBuf.Ptr()) + strlen(string) + 1;
IfFailRet( m_sigBuf.ReSizeNoThrow(LL) );
strcat_s((LPSTR)m_sigBuf.Ptr(), LL, string);
return NOERROR;
} // HRESULT MDInfo::AddToSigBuffer()
MDInfo::MDInfo(IMetaDataImport2 *pImport, IMetaDataAssemblyImport *pAssemblyImport, LPCWSTR szScope, strPassBackFn inPBFn, ULONG DumpFilter)
{ // This constructor is specific to ILDASM/MetaInfo integration
_ASSERTE(pImport != NULL);
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType));
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX);
Init(inPBFn, (DUMP_FILTER)DumpFilter);
m_pImport = pImport;
m_pImport->AddRef();
if ((m_pAssemblyImport = pAssemblyImport))
m_pAssemblyImport->AddRef();
else
{
HRESULT hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport);
if (FAILED(hr))
Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr);
}
} // MDInfo::MDInfo()
MDInfo::MDInfo(IMetaDataDispenserEx *pDispenser, LPCWSTR szScope, strPassBackFn inPBFn, ULONG DumpFilter)
{
HRESULT hr = S_OK;
VARIANT value;
_ASSERTE(pDispenser != NULL && inPBFn != NULL);
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType));
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX);
Init(inPBFn, (DUMP_FILTER)DumpFilter);
// Attempt to open scope on given file
V_VT(&value) = VT_UI4;
V_UI4(&value) = MDImportOptionAll;
if (FAILED(hr = pDispenser->SetOption(MetaDataImportOption, &value)))
Error("SetOption failed.", hr);
hr = pDispenser->OpenScope(szScope, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport);
if (hr == CLDB_E_BADUPDATEMODE)
{
V_VT(&value) = VT_UI4;
V_UI4(&value) = MDUpdateIncremental;
if (FAILED(hr = pDispenser->SetOption(MetaDataSetUpdate, &value)))
Error("SetOption failed.", hr);
hr = pDispenser->OpenScope(szScope, ofNoTransform, IID_IMetaDataImport2, (IUnknown**)&m_pImport);
}
if (FAILED(hr))
Error("OpenScope failed", hr);
// Query for the IMetaDataAssemblyImport interface.
hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport);
if (FAILED(hr))
Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr);
} // MDInfo::MDInfo()
MDInfo::MDInfo(IMetaDataDispenserEx *pDispenser, PBYTE pbMetaData, DWORD dwSize, strPassBackFn inPBFn, ULONG DumpFilter)
{
_ASSERTE(pDispenser != NULL && inPBFn != NULL);
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ARRAY_SIZE(g_szMapUndecorateType));
_ASSERTE(ARRAY_SIZE(g_szMapElementType) == ELEMENT_TYPE_MAX);
Init(inPBFn, (DUMP_FILTER)DumpFilter);
// Attempt to open scope on manifest. It's valid for this to fail, because
// the blob we open may just be the assembly resources (the space is
// overloaded until we remove LM -a assemblies, at which point this
// constructor should probably be removed too).
HRESULT hr;
VARIANT value;
V_VT(&value) = VT_UI4;
V_UI4(&value) = MDImportOptionAll;
if (FAILED(hr = pDispenser->SetOption(MetaDataImportOption, &value)))
Error("SetOption failed.", hr);
if (SUCCEEDED(hr = pDispenser->OpenScopeOnMemory(pbMetaData, dwSize, ofNoTransform,
IID_IMetaDataImport2, (IUnknown**)&m_pImport)))
{
// Query for the IMetaDataAssemblyImport interface.
hr = m_pImport->QueryInterface(IID_IMetaDataAssemblyImport, (void**) &m_pAssemblyImport);
if (FAILED(hr))
Error("QueryInterface failed for IID_IMetaDataAssemblyImport.", hr);
}
} // MDInfo::MDInfo()
void MDInfo::Init(
strPassBackFn inPBFn, // Callback to write text.
DUMP_FILTER DumpFilter) // Flags to control the dump.
{
m_pbFn = inPBFn;
m_DumpFilter = DumpFilter;
m_pTables = NULL;
m_pTables2 = NULL;
m_pImport = NULL;
m_pAssemblyImport = NULL;
} // void MDInfo::Init()
// Destructor
MDInfo::~MDInfo()
{
if (m_pImport)
m_pImport->Release();
if (m_pAssemblyImport)
m_pAssemblyImport->Release();
if (m_pTables)
m_pTables->Release();
if (m_pTables2)
m_pTables2->Release();
} // MDInfo::~MDInfo()
//=====================================================================================================================
// DisplayMD() function
//
// Displays the meta data content of a file
void MDInfo::DisplayMD()
{
if ((m_DumpFilter & dumpAssem) && m_pAssemblyImport)
DisplayAssemblyInfo();
WriteLine("===========================================================");
// Metadata itself: Raw or normal view
if (m_DumpFilter & (dumpSchema | dumpHeader | dumpCSV | dumpRaw | dumpStats | dumpRawHeaps))
DisplayRaw();
else
{
DisplayVersionInfo();
DisplayScopeInfo();
WriteLine("===========================================================");
DisplayGlobalFunctions();
DisplayGlobalFields();
DisplayGlobalMemberRefs();
DisplayTypeDefs();
DisplayTypeRefs();
DisplayTypeSpecs();
DisplayMethodSpecs();
DisplayModuleRefs();
DisplaySignatures();
DisplayAssembly();
DisplayUserStrings();
// WriteLine("============================================================");
// WriteLine("Unresolved MemberRefs");
// DisplayMemberRefs(0x00000001, "\t");
VWrite("\n\nCoff symbol name overhead: %d\n", g_cbCoffNames);
}
WriteLine("===========================================================");
if (m_DumpFilter & dumpUnsat)
DisplayUnsatInfo();
WriteLine("===========================================================");
} // MDVEHandlerClass()
int MDInfo::WriteLine(_In_z_ const char *str)
{
ULONG32 count = (ULONG32) strlen(str);
m_pbFn(str);
m_pbFn("\n");
return count;
} // int MDInfo::WriteLine()
int MDInfo::Write(_In_z_ const char *str)
{
ULONG32 count = (ULONG32) strlen(str);
m_pbFn(str);
return count;
} // int MDInfo::Write()
int MDInfo::VWriteLine(_In_z_ const char *str, ...)
{
va_list marker;
int count;
va_start(marker, str);
count = VWriteMarker(str, marker);
m_pbFn("\n");
va_end(marker);
return count;
} // int MDInfo::VWriteLine()
int MDInfo::VWrite(_In_z_ const char *str, ...)
{
va_list marker;
int count;
va_start(marker, str);
count = VWriteMarker(str, marker);
va_end(marker);
return count;
} // int MDInfo::VWrite()
int MDInfo::VWriteMarker(_In_z_ const char *str, va_list marker)
{
HRESULT hr;
int count = -1;
// Used to allocate 1K, then if not enough, 2K, then 4K.
// Faster to allocate 32K right away and be done with it,
// we're not running on Commodore 64
if (FAILED(hr = m_output.ReSizeNoThrow(STRING_BUFFER_LEN * 8)))
Error("ReSize failed.", hr);
else
{
count = vsprintf_s((char *)m_output.Ptr(), STRING_BUFFER_LEN * 8, str, marker);
m_pbFn((char *)m_output.Ptr());
}
return count;
} // int MDInfo::VWriteToBuffer()
// Error() function -- prints an error and returns
void MDInfo::Error(const char* szError, HRESULT hr)
{
printf("\n%s\n",szError);
if (hr != S_OK)
{
printf("Failed return code: 0x%08x\n", hr);
IErrorInfo *pIErr = NULL; // Error interface.
BSTR bstrDesc = NULL; // Description text.
#ifdef FEATURE_COMINTEROP
// Try to get an error info object and display the message.
if (GetErrorInfo(0, &pIErr) == S_OK &&
pIErr->GetDescription(&bstrDesc) == S_OK)
{
printf("%ls ", bstrDesc);
SysFreeString(bstrDesc);
}
#endif
// Free the error interface.
if (pIErr)
pIErr->Release();
}
exit(hr);
} // void MDInfo::Error()
// Print out the optional version info included in the MetaData.
void MDInfo::DisplayVersionInfo()
{
if (!(m_DumpFilter & MDInfo::dumpNoLogo))
{
LPCUTF8 pVersionStr;
HRESULT hr = S_OK;
if (m_pTables == 0)
{
if (m_pImport)
hr = m_pImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables);
else if (m_pAssemblyImport)
hr = m_pAssemblyImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables);
else
return;
if (FAILED(hr))
Error("QueryInterface failed for IID_IMetaDataTables.", hr);
}
hr = m_pTables->GetString(1, &pVersionStr);
if (FAILED(hr))
Error("GetString() failed.", hr);
if (strstr(pVersionStr, "Version of runtime against which the binary is built : ")
== pVersionStr)
{
WriteLine(const_cast<char *>(pVersionStr));
}
}
} // void MDInfo::DisplayVersionInfo()
// Prints out information about the scope
void MDInfo::DisplayScopeInfo()
{
HRESULT hr;
mdModule mdm;
GUID mvid;
WCHAR scopeName[STRING_BUFFER_LEN];
WCHAR guidString[STRING_BUFFER_LEN];
hr = m_pImport->GetScopeProps( scopeName, STRING_BUFFER_LEN, 0, &mvid);
if (FAILED(hr)) Error("GetScopeProps failed.", hr);
VWriteLine("ScopeName : %ls",scopeName);
if (!(m_DumpFilter & MDInfo::dumpNoLogo))
VWriteLine("MVID : %ls",GUIDAsString(mvid, guidString, STRING_BUFFER_LEN));
hr = m_pImport->GetModuleFromScope(&mdm);
if (FAILED(hr)) Error("GetModuleFromScope failed.", hr);
DisplayPermissions(mdm, "");
DisplayCustomAttributes(mdm, "\t");
} // void MDInfo::DisplayScopeInfo()
void MDInfo::DisplayRaw()
{
int iDump; // Level of info to dump.
if (m_pTables == 0)
m_pImport->QueryInterface(IID_IMetaDataTables, (void**)&m_pTables);
if (m_pTables == 0)
Error("Can't get table info.");
if (m_pTables2 == 0)
m_pImport->QueryInterface(IID_IMetaDataTables2, (void**)&m_pTables2);
if (m_DumpFilter & dumpCSV)
DumpRawCSV();
if (m_DumpFilter & (dumpSchema | dumpHeader | dumpRaw | dumpStats))
{
if (m_DumpFilter & dumpRaw)
iDump = 3;
else
if (m_DumpFilter & dumpSchema)
iDump = 2;
else
iDump = 1;
DumpRaw(iDump, (m_DumpFilter & dumpStats) != 0);
}
if (m_DumpFilter & dumpRawHeaps)
DumpRawHeaps();
} // void MDInfo::DisplayRaw()
// return the name of the type of token passed in
const char *MDInfo::TokenTypeName(mdToken inToken)
{
switch(TypeFromToken(inToken))
{
case mdtTypeDef: return "TypeDef";
case mdtInterfaceImpl: return "InterfaceImpl";
case mdtMethodDef: return "MethodDef";
case mdtFieldDef: return "FieldDef";
case mdtTypeRef: return "TypeRef";
case mdtMemberRef: return "MemberRef";
case mdtCustomAttribute:return "CustomAttribute";
case mdtParamDef: return "ParamDef";
case mdtProperty: return "Property";
case mdtEvent: return "Event";
case mdtTypeSpec: return "TypeSpec";
default: return "[UnknownTokenType]";
}
} // char *MDInfo::TokenTypeName()
// Prints out name of the given memberref
//
LPCWSTR MDInfo::MemberRefName(mdMemberRef inMemRef, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
HRESULT hr;
hr = m_pImport->GetMemberRefProps( inMemRef, NULL, buffer, bufLen,
NULL, NULL, NULL);
if (FAILED(hr)) Error("GetMemberRefProps failed.", hr);
return buffer;
} // LPCWSTR MDInfo::MemberRefName()
// Prints out information about the given memberref
//
void MDInfo::DisplayMemberRefInfo(mdMemberRef inMemRef, const char *preFix)
{
HRESULT hr;
WCHAR memRefName[STRING_BUFFER_LEN];
ULONG nameLen;
mdToken token;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
char newPreFix[STRING_BUFFER_LEN];
hr = m_pImport->GetMemberRefProps( inMemRef, &token, memRefName, STRING_BUFFER_LEN,
&nameLen, &pbSigBlob, &ulSigBlob);
if (FAILED(hr)) Error("GetMemberRefProps failed.", hr);
VWriteLine("%s\t\tMember: (%8.8x) %ls: ", preFix, inMemRef, memRefName);
if (ulSigBlob)
DisplaySignature(pbSigBlob, ulSigBlob, preFix);
else
VWriteLine("%s\t\tERROR: no valid signature ", preFix);
sprintf_s (newPreFix, STRING_BUFFER_LEN, "\t\t%s", preFix);
DisplayCustomAttributes(inMemRef, newPreFix);
} // void MDInfo::DisplayMemberRefInfo()
// Prints out information about all memberrefs of the given typeref
//
void MDInfo::DisplayMemberRefs(mdToken tkParent, const char *preFix)
{
HCORENUM memRefEnum = NULL;
HRESULT hr;
mdMemberRef memRefs[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
while (SUCCEEDED(hr = m_pImport->EnumMemberRefs( &memRefEnum, tkParent,
memRefs, ARRAY_SIZE(memRefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("%s\tMemberRef #%d (%08x)", preFix, totalCount, memRefs[i]);
VWriteLine("%s\t-------------------------------------------------------", preFix);
DisplayMemberRefInfo(memRefs[i], preFix);
}
}
m_pImport->CloseEnum( memRefEnum);
} // void MDInfo::DisplayMemberRefs()
// Prints out information about all resources in the com object
//
// Iterates through each typeref and prints out the information of each
//
void MDInfo::DisplayTypeRefs()
{
HCORENUM typeRefEnum = NULL;
mdTypeRef typeRefs[ENUM_BUFFER_SIZE];
ULONG count, totalCount=1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumTypeRefs( &typeRefEnum,
typeRefs, ARRAY_SIZE(typeRefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("TypeRef #%d (%08x)", totalCount, typeRefs[i]);
WriteLine("-------------------------------------------------------");
DisplayTypeRefInfo(typeRefs[i]);
DisplayMemberRefs(typeRefs[i], "");
WriteLine("");
}
}
m_pImport->CloseEnum( typeRefEnum);
} // void MDInfo::DisplayTypeRefs()
void MDInfo::DisplayTypeSpecs()
{
HCORENUM typespecEnum = NULL;
mdTypeSpec typespecs[ENUM_BUFFER_SIZE];
ULONG count, totalCount=1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumTypeSpecs( &typespecEnum,
typespecs, ARRAY_SIZE(typespecs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("TypeSpec #%d (%08x)", totalCount, typespecs[i]);
WriteLine("-------------------------------------------------------");
DisplayTypeSpecInfo(typespecs[i], "");
DisplayMemberRefs(typespecs[i], "");
WriteLine("");
}
}
m_pImport->CloseEnum( typespecEnum);
} // void MDInfo::DisplayTypeSpecs()
void MDInfo::DisplayMethodSpecs()
{
HCORENUM MethodSpecEnum = NULL;
mdMethodSpec MethodSpecs[ENUM_BUFFER_SIZE];
ULONG count, totalCount=1;
///// HRESULT hr;
///// HACK until I implement EnumMethodSpecs!
///// while (SUCCEEDED(hr = m_pImport->EnumMethodSpecs( &MethodSpecEnum,
///// MethodSpecs, ARRAY_SIZE(MethodSpecs), &count)) &&
///// count > 0)
for (ULONG rid=1; m_pImport->IsValidToken(TokenFromRid(rid, mdtMethodSpec)); ++rid)
{
// More hackery
count = 1;
MethodSpecs[0] = TokenFromRid(rid, mdtMethodSpec);
// More hackery
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("MethodSpec #%d (%08x)", totalCount, MethodSpecs[i]);
DisplayMethodSpecInfo(MethodSpecs[i], "");
WriteLine("");
}
}
m_pImport->CloseEnum( MethodSpecEnum);
} // void MDInfo::DisplayMethodSpecs()
// Called to display the information about all typedefs in the object.
//
void MDInfo::DisplayTypeDefs()
{
HCORENUM typeDefEnum = NULL;
mdTypeDef typeDefs[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumTypeDefs( &typeDefEnum,
typeDefs, ARRAY_SIZE(typeDefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("TypeDef #%d (%08x)", totalCount, typeDefs[i]);
WriteLine("-------------------------------------------------------");
DisplayTypeDefInfo(typeDefs[i]);
WriteLine("");
}
}
m_pImport->CloseEnum( typeDefEnum);
} // void MDInfo::DisplayTypeDefs()
// Called to display the information about all modulerefs in the object.
//
void MDInfo::DisplayModuleRefs()
{
HCORENUM moduleRefEnum = NULL;
mdModuleRef moduleRefs[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumModuleRefs( &moduleRefEnum,
moduleRefs, ARRAY_SIZE(moduleRefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("ModuleRef #%d (%08x)", totalCount, moduleRefs[i]);
WriteLine("-------------------------------------------------------");
DisplayModuleRefInfo(moduleRefs[i]);
DisplayMemberRefs(moduleRefs[i], "");
WriteLine("");
}
}
m_pImport->CloseEnum( moduleRefEnum);
} // void MDInfo::DisplayModuleRefs()
// Prints out information about the given moduleref
//
void MDInfo::DisplayModuleRefInfo(mdModuleRef inModuleRef)
{
HRESULT hr;
WCHAR moduleRefName[STRING_BUFFER_LEN];
ULONG nameLen;
hr = m_pImport->GetModuleRefProps( inModuleRef, moduleRefName, STRING_BUFFER_LEN,
&nameLen);
if (FAILED(hr)) Error("GetModuleRefProps failed.", hr);
VWriteLine("\t\tModuleRef: (%8.8x) %ls: ", inModuleRef, moduleRefName);
DisplayCustomAttributes(inModuleRef, "\t\t");
} // void MDInfo::DisplayModuleRefInfo()
// Called to display the information about all signatures in the object.
//
void MDInfo::DisplaySignatures()
{
HCORENUM signatureEnum = NULL;
mdSignature signatures[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumSignatures( &signatureEnum,
signatures, ARRAY_SIZE(signatures), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("Signature #%d (%#08x)", totalCount, signatures[i]);
WriteLine("-------------------------------------------------------");
DisplaySignatureInfo(signatures[i]);
WriteLine("");
}
}
m_pImport->CloseEnum( signatureEnum);
} // void MDInfo::DisplaySignatures()
// Prints out information about the given signature
//
void MDInfo::DisplaySignatureInfo(mdSignature inSignature)
{
HRESULT hr;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
hr = m_pImport->GetSigFromToken( inSignature, &pbSigBlob, &ulSigBlob );
if (FAILED(hr)) Error("GetSigFromToken failed.", hr);
if(ulSigBlob)
DisplaySignature(pbSigBlob, ulSigBlob, "");
else
VWriteLine("\t\tERROR: no valid signature ");
} // void MDInfo::DisplaySignatureInfo()
// returns the passed-in buffer which is filled with the name of the given
// member in wide characters
//
LPCWSTR MDInfo::MemberName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
HRESULT hr;
hr = m_pImport->GetMemberProps( inToken, NULL, buffer, bufLen,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
if (FAILED(hr)) Error("GetMemberProps failed.", hr);
return (buffer);
} // LPCWSTR MDInfo::MemberName()
// displays information for the given method
//
void MDInfo::DisplayMethodInfo(mdMethodDef inMethod, DWORD *pflags)
{
HRESULT hr;
mdTypeDef memTypeDef;
WCHAR memberName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
ULONG ulCodeRVA;
ULONG ulImplFlags;
hr = m_pImport->GetMethodProps( inMethod, &memTypeDef, memberName, STRING_BUFFER_LEN,
&nameLen, &flags, &pbSigBlob, &ulSigBlob, &ulCodeRVA, &ulImplFlags);
if (FAILED(hr)) Error("GetMethodProps failed.", hr);
if (pflags)
*pflags = flags;
VWriteLine("\t\tMethodName: %ls (%8.8X)", memberName, inMethod);
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Md, Public);
ISFLAG(Md, Private);
ISFLAG(Md, Family);
ISFLAG(Md, Assem);
ISFLAG(Md, FamANDAssem);
ISFLAG(Md, FamORAssem);
ISFLAG(Md, PrivateScope);
ISFLAG(Md, Static);
ISFLAG(Md, Final);
ISFLAG(Md, Virtual);
ISFLAG(Md, HideBySig);
ISFLAG(Md, ReuseSlot);
ISFLAG(Md, NewSlot);
ISFLAG(Md, Abstract);
ISFLAG(Md, SpecialName);
ISFLAG(Md, RTSpecialName);
ISFLAG(Md, PinvokeImpl);
ISFLAG(Md, UnmanagedExport);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
bool result = (((flags) & mdRTSpecialName) && !wcscmp((memberName), W(".ctor")));
if (result) strcat_s(sFlags, STRING_BUFFER_LEN, "[.ctor] ");
result = (((flags) & mdRTSpecialName) && !wcscmp((memberName), W(".cctor")));
if (result) strcat_s(sFlags,STRING_BUFFER_LEN, "[.cctor] ");
// "Reserved" flags
ISFLAG(Md, HasSecurity);
ISFLAG(Md, RequireSecObject);
VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags);
VWriteLine("\t\tRVA : 0x%08x", ulCodeRVA);
flags = ulImplFlags;
sFlags[0] = 0;
ISFLAG(Mi, Native);
ISFLAG(Mi, IL);
ISFLAG(Mi, OPTIL);
ISFLAG(Mi, Runtime);
ISFLAG(Mi, Unmanaged);
ISFLAG(Mi, Managed);
ISFLAG(Mi, ForwardRef);
ISFLAG(Mi, PreserveSig);
ISFLAG(Mi, InternalCall);
ISFLAG(Mi, Synchronized);
ISFLAG(Mi, NoInlining);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tImplFlags : %s (%08x)", sFlags, flags);
if (ulSigBlob)
DisplaySignature(pbSigBlob, ulSigBlob, "");
else
VWriteLine("\t\tERROR: no valid signature ");
DisplayGenericParams(inMethod, "\t\t");
} // void MDInfo::DisplayMethodInfo()
// displays the member information for the given field
//
void MDInfo::DisplayFieldInfo(mdFieldDef inField, DWORD *pdwFlags)
{
HRESULT hr;
mdTypeDef memTypeDef;
WCHAR memberName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
DWORD dwCPlusTypeFlag;
void const *pValue;
ULONG cbValue;
#ifdef FEATURE_COMINTEROP
VARIANT defaultValue;
::VariantInit(&defaultValue);
#endif
hr = m_pImport->GetFieldProps( inField, &memTypeDef, memberName, STRING_BUFFER_LEN,
&nameLen, &flags, &pbSigBlob, &ulSigBlob, &dwCPlusTypeFlag,
&pValue, &cbValue);
if (FAILED(hr)) Error("GetFieldProps failed.", hr);
if (pdwFlags)
*pdwFlags = flags;
#ifdef FEATURE_COMINTEROP
_FillVariant((BYTE)dwCPlusTypeFlag, pValue, cbValue, &defaultValue);
#endif
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Fd, Public);
ISFLAG(Fd, Private);
ISFLAG(Fd, Family);
ISFLAG(Fd, Assembly);
ISFLAG(Fd, FamANDAssem);
ISFLAG(Fd, FamORAssem);
ISFLAG(Fd, PrivateScope);
ISFLAG(Fd, Static);
ISFLAG(Fd, InitOnly);
ISFLAG(Fd, Literal);
ISFLAG(Fd, NotSerialized);
ISFLAG(Fd, SpecialName);
ISFLAG(Fd, RTSpecialName);
ISFLAG(Fd, PinvokeImpl);
// "Reserved" flags
ISFLAG(Fd, HasDefault);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tField Name: %ls (%8.8X)", memberName, inField);
VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags);
#ifdef FEATURE_COMINTEROP
if (IsFdHasDefault(flags))
VWriteLine("\tDefltValue: (%s) %ls", g_szMapElementType[dwCPlusTypeFlag], VariantAsString(&defaultValue));
#endif
if (!ulSigBlob) // Signature size should be non-zero for fields
VWriteLine("\t\tERROR: no valid signature ");
else
DisplaySignature(pbSigBlob, ulSigBlob, "");
#ifdef FEATURE_COMINTEROP
::VariantClear(&defaultValue);
#endif
} // void MDInfo::DisplayFieldInfo()
// displays the RVA for the given global field.
void MDInfo::DisplayFieldRVA(mdFieldDef inFieldDef)
{
HRESULT hr;
ULONG ulRVA;
hr = m_pImport->GetRVA(inFieldDef, &ulRVA, 0);
if (FAILED(hr) && hr != CLDB_E_RECORD_NOTFOUND) Error("GetRVA failed.", hr);
VWriteLine("\t\tRVA : 0x%08x", ulRVA);
} // void MDInfo::DisplayFieldRVA()
// displays information about every global function.
void MDInfo::DisplayGlobalFunctions()
{
WriteLine("Global functions");
WriteLine("-------------------------------------------------------");
DisplayMethods(mdTokenNil);
WriteLine("");
} // void MDInfo::DisplayGlobalFunctions()
// displays information about every global field.
void MDInfo::DisplayGlobalFields()
{
WriteLine("Global fields");
WriteLine("-------------------------------------------------------");
DisplayFields(mdTokenNil, NULL, 0);
WriteLine("");
} // void MDInfo::DisplayGlobalFields()
// displays information about every global memberref.
void MDInfo::DisplayGlobalMemberRefs()
{
WriteLine("Global MemberRefs");
WriteLine("-------------------------------------------------------");
DisplayMemberRefs(mdTokenNil, "");
WriteLine("");
} // void MDInfo::DisplayGlobalMemberRefs()
// displays information about every method in a given typedef
//
void MDInfo::DisplayMethods(mdTypeDef inTypeDef)
{
HCORENUM methodEnum = NULL;
mdToken methods[ENUM_BUFFER_SIZE];
DWORD flags;
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumMethods( &methodEnum, inTypeDef,
methods, ARRAY_SIZE(methods), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tMethod #%d (%08x) %s", totalCount, methods[i], (methods[i] == g_tkEntryPoint) ? "[ENTRYPOINT]" : "");
WriteLine("\t-------------------------------------------------------");
DisplayMethodInfo(methods[i], &flags);
DisplayParams(methods[i]);
DisplayCustomAttributes(methods[i], "\t\t");
DisplayPermissions(methods[i], "\t");
DisplayMemberRefs(methods[i], "\t");
// P-invoke data if present.
if (IsMdPinvokeImpl(flags))
DisplayPinvokeInfo(methods[i]);
WriteLine("");
}
}
m_pImport->CloseEnum( methodEnum);
} // void MDInfo::DisplayMethods()
// displays information about every field in a given typedef
//
void MDInfo::DisplayFields(mdTypeDef inTypeDef, COR_FIELD_OFFSET *rFieldOffset, ULONG cFieldOffset)
{
HCORENUM fieldEnum = NULL;
mdToken fields[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
DWORD flags;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumFields( &fieldEnum, inTypeDef,
fields, ARRAY_SIZE(fields), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tField #%d (%08x)",totalCount, fields[i]);
WriteLine("\t-------------------------------------------------------");
DisplayFieldInfo(fields[i], &flags);
DisplayCustomAttributes(fields[i], "\t\t");
DisplayPermissions(fields[i], "\t");
DisplayFieldMarshal(fields[i]);
// RVA if its a global field.
if (inTypeDef == mdTokenNil)
DisplayFieldRVA(fields[i]);
// P-invoke data if present.
if (IsFdPinvokeImpl(flags))
DisplayPinvokeInfo(fields[i]);
// Display offset if present.
if (cFieldOffset)
{
bool found = false;
for (ULONG iLayout = 0; iLayout < cFieldOffset; ++iLayout)
{
if (RidFromToken(rFieldOffset[iLayout].ridOfField) == RidFromToken(fields[i]))
{
found = true;
VWriteLine("\t\tOffset : 0x%08x", rFieldOffset[iLayout].ulOffset);
break;
}
}
_ASSERTE(found);
}
WriteLine("");
}
}
m_pImport->CloseEnum( fieldEnum);
} // void MDInfo::DisplayFields()
// displays information about every methodImpl in a given typedef
//
void MDInfo::DisplayMethodImpls(mdTypeDef inTypeDef)
{
HCORENUM methodImplEnum = NULL;
mdMethodDef rtkMethodBody[ENUM_BUFFER_SIZE];
mdMethodDef rtkMethodDecl[ENUM_BUFFER_SIZE];
ULONG count, totalCount=1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumMethodImpls( &methodImplEnum, inTypeDef,
rtkMethodBody, rtkMethodDecl, ARRAY_SIZE(rtkMethodBody), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\n\tMethodImpl #%d (%08x)", totalCount, totalCount);
WriteLine("\t-------------------------------------------------------");
VWriteLine("\t\tMethod Body Token : 0x%08x", rtkMethodBody[i]);
VWriteLine("\t\tMethod Declaration Token : 0x%08x", rtkMethodDecl[i]);
WriteLine("");
}
}
m_pImport->CloseEnum( methodImplEnum);
} // void MDInfo::DisplayMethodImpls()
// displays information about the given parameter
//
void MDInfo::DisplayParamInfo(mdParamDef inParamDef)
{
mdMethodDef md;
ULONG num;
WCHAR paramName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
VARIANT defValue;
DWORD dwCPlusFlags;
void const *pValue;
ULONG cbValue;
#ifdef FEATURE_COMINTEROP
::VariantInit(&defValue);
#endif
HRESULT hr = m_pImport->GetParamProps( inParamDef, &md, &num, paramName, ARRAY_SIZE(paramName),
&nameLen, &flags, &dwCPlusFlags, &pValue, &cbValue);
if (FAILED(hr)) Error("GetParamProps failed.", hr);
_FillVariant((BYTE)dwCPlusFlags, pValue, cbValue, &defValue);
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Pd, In);
ISFLAG(Pd, Out);
ISFLAG(Pd, Optional);
// "Reserved" flags.
ISFLAG(Pd, HasDefault);
ISFLAG(Pd, HasFieldMarshal);
if (!*sFlags)
strcpy_s(sFlags,STRING_BUFFER_LEN, "[none]");
VWrite("\t\t\t(%ld) ParamToken : (%08x) Name : %ls flags: %s (%08x)", num, inParamDef, paramName, sFlags, flags);
#ifdef FEATURE_COMINTEROP
if (IsPdHasDefault(flags))
VWriteLine(" Default: (%s) %ls", g_szMapElementType[dwCPlusFlags], VariantAsString(&defValue));
else
#endif
VWriteLine("");
DisplayCustomAttributes(inParamDef, "\t\t\t");
#ifdef FEATURE_COMINTEROP
::VariantClear(&defValue);
#endif
} // void MDInfo::DisplayParamInfo()
// displays all parameters for a given memberdef
//
void MDInfo::DisplayParams(mdMethodDef inMethodDef)
{
HCORENUM paramEnum = NULL;
mdParamDef params[ENUM_BUFFER_SIZE];
ULONG count, paramCount;
bool first = true;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumParams( ¶mEnum, inMethodDef,
params, ARRAY_SIZE(params), &count)) &&
count > 0)
{
if (first)
{
m_pImport->CountEnum( paramEnum, ¶mCount);
VWriteLine("\t\t%d Parameters", paramCount);
}
for (ULONG i = 0; i < count; i++)
{
DisplayParamInfo(params[i]);
DisplayFieldMarshal(params[i]);
}
first = false;
}
m_pImport->CloseEnum( paramEnum);
} // void MDInfo::DisplayParams()
void MDInfo::DisplayGenericParams(mdToken tk, const char *prefix)
{
HCORENUM paramEnum = NULL;
mdParamDef params[ENUM_BUFFER_SIZE];
ULONG count, paramCount;
bool first = true;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumGenericParams( ¶mEnum, tk,
params, ARRAY_SIZE(params), &count)) &&
count > 0)
{
if (first)
{
m_pImport->CountEnum( paramEnum, ¶mCount);
VWriteLine("%s%d Generic Parameters", prefix, paramCount);
}
for (ULONG i = 0; i < count; i++)
{
DisplayGenericParamInfo(params[i], prefix);
}
first = false;
}
m_pImport->CloseEnum( paramEnum);
}
void MDInfo::DisplayGenericParamInfo(mdGenericParam tkParam, const char *prefix)
{
ULONG ulSeq;
WCHAR paramName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
mdToken tkOwner;
char newprefix[30];
HCORENUM constraintEnum = NULL;
mdParamDef constraints[4];
ULONG count, constraintCount;
mdToken constraint;
mdToken owner;
bool first = true;
HRESULT hr = m_pImport->GetGenericParamProps(tkParam, &ulSeq, &flags, &tkOwner, NULL, paramName, ARRAY_SIZE(paramName), &nameLen);
if (FAILED(hr)) Error("GetGenericParamProps failed.", hr);
VWriteLine("%s\t(%ld) GenericParamToken : (%08x) Name : %ls flags: %08x Owner: %08x", prefix, ulSeq, tkParam, paramName, flags, tkOwner);
// Any constraints for the GenericParam
while (SUCCEEDED(hr = m_pImport->EnumGenericParamConstraints(&constraintEnum, tkParam,
constraints, ARRAY_SIZE(constraints), &count)) &&
count > 0)
{
if (first)
{
m_pImport->CountEnum( constraintEnum, &constraintCount);
VWriteLine("%s\t\t%d Constraint(s)", prefix, constraintCount);
}
VWrite("%s\t\t", prefix);
for (ULONG i=0; i< count; ++i)
{
hr = m_pImport->GetGenericParamConstraintProps(constraints[i], &owner, &constraint);
if (owner != tkParam)
VWrite("%08x (owner: %08x) ", constraint, owner);
else
VWrite("%08x ", constraint);
}
VWriteLine("");
}
m_pImport->CloseEnum(constraintEnum);
sprintf_s(newprefix, 30, "%s\t", prefix);
DisplayCustomAttributes(tkParam, newprefix);
}
LPCWSTR MDInfo::TokenName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
LPCUTF8 pName; // Token name in UTF8.
if (IsNilToken(inToken))
return W("");
m_pImport->GetNameFromToken(inToken, &pName);
WszMultiByteToWideChar(CP_UTF8,0, pName,-1, buffer,bufLen);
return buffer;
} // LPCWSTR MDInfo::TokenName()
// prints out name of typeref or typedef
//
LPCWSTR MDInfo::TypeDeforRefName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
if (RidFromToken(inToken))
{
if (TypeFromToken(inToken) == mdtTypeDef)
return (TypeDefName((mdTypeDef) inToken, buffer, bufLen));
else if (TypeFromToken(inToken) == mdtTypeRef)
return (TypeRefName((mdTypeRef) inToken, buffer, bufLen));
else if (TypeFromToken(inToken) == mdtTypeSpec)
return W("[TypeSpec]");
else
return W("[InvalidReference]");
}
else
return W("");
} // LPCWSTR MDInfo::TypeDeforRefName()
LPCWSTR MDInfo::MemberDeforRefName(mdToken inToken, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
if (RidFromToken(inToken))
{
if (TypeFromToken(inToken) == mdtMethodDef || TypeFromToken(inToken) == mdtFieldDef)
return (MemberName(inToken, buffer, bufLen));
else if (TypeFromToken(inToken) == mdtMemberRef)
return (MemberRefName((mdMemberRef) inToken, buffer, bufLen));
else
return W("[InvalidReference]");
}
else
return W("");
} // LPCWSTR MDInfo::MemberDeforRefName()
// prints out only the name of the given typedef
//
//
LPCWSTR MDInfo::TypeDefName(mdTypeDef inTypeDef, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
HRESULT hr;
hr = m_pImport->GetTypeDefProps(
// [IN] The import scope.
inTypeDef, // [IN] TypeDef token for inquiry.
buffer, // [OUT] Put name here.
bufLen, // [IN] size of name buffer in wide chars.
NULL, // [OUT] put size of name (wide chars) here.
NULL, // [OUT] Put flags here.
NULL); // [OUT] Put base class TypeDef/TypeRef here.
if (FAILED(hr))
{
swprintf_s(buffer, bufLen, W("[Invalid TypeDef]"));
}
return buffer;
} // LPCWSTR MDInfo::TypeDefName()
// prints out all the properties of a given typedef
//
void MDInfo::DisplayTypeDefProps(mdTypeDef inTypeDef)
{
HRESULT hr;
WCHAR typeDefName[STRING_BUFFER_LEN];
ULONG nameLen;
DWORD flags;
mdToken extends;
ULONG dwPacking; // Packing size of class, if specified.
ULONG dwSize; // Total size of class, if specified.
hr = m_pImport->GetTypeDefProps(
inTypeDef, // [IN] TypeDef token for inquiry.
typeDefName, // [OUT] Put name here.
STRING_BUFFER_LEN, // [IN] size of name buffer in wide chars.
&nameLen, // [OUT] put size of name (wide chars) here.
&flags, // [OUT] Put flags here.
&extends); // [OUT] Put base class TypeDef/TypeRef here.
if (FAILED(hr)) Error("GetTypeDefProps failed.", hr);
char sFlags[STRING_BUFFER_LEN];
WCHAR szTempBuf[STRING_BUFFER_LEN];
VWriteLine("\tTypDefName: %ls (%8.8X)",typeDefName,inTypeDef);
VWriteLine("\tFlags : %s (%08x)",ClassFlags(flags, sFlags), flags);
VWriteLine("\tExtends : %8.8X [%s] %ls",extends,TokenTypeName(extends),
TypeDeforRefName(extends, szTempBuf, ARRAY_SIZE(szTempBuf)));
hr = m_pImport->GetClassLayout(inTypeDef, &dwPacking, 0,0,0, &dwSize);
if (hr == S_OK)
VWriteLine("\tLayout : Packing:%d, Size:%d", dwPacking, dwSize);
if (IsTdNested(flags))
{
mdTypeDef tkEnclosingClass;
hr = m_pImport->GetNestedClassProps(inTypeDef, &tkEnclosingClass);
if (hr == S_OK)
{
VWriteLine("\tEnclosingClass : %ls (%8.8X)", TypeDeforRefName(tkEnclosingClass,
szTempBuf, ARRAY_SIZE(szTempBuf)), tkEnclosingClass);
}
else if (hr == CLDB_E_RECORD_NOTFOUND)
WriteLine("ERROR: EnclosingClass not found for NestedClass");
else
Error("GetNestedClassProps failed.", hr);
}
} // void MDInfo::DisplayTypeDefProps()
// Prints out the name of the given TypeRef
//
LPCWSTR MDInfo::TypeRefName(mdTypeRef tr, _Out_writes_(bufLen) LPWSTR buffer, ULONG bufLen)
{
HRESULT hr;
hr = m_pImport->GetTypeRefProps(
tr, // The class ref token.
NULL, // Resolution scope.
buffer, // Put the name here.
bufLen, // Size of the name buffer, wide chars.
NULL); // Put actual size of name here.
if (FAILED(hr))
{
swprintf_s(buffer, bufLen, W("[Invalid TypeRef]"));
}
return (buffer);
} // LPCWSTR MDInfo::TypeRefName()
// Prints out all the info of the given TypeRef
//
void MDInfo::DisplayTypeRefInfo(mdTypeRef tr)
{
HRESULT hr;
mdToken tkResolutionScope;
WCHAR typeRefName[STRING_BUFFER_LEN];
ULONG nameLen;
hr = m_pImport->GetTypeRefProps(
tr, // The class ref token.
&tkResolutionScope, // ResolutionScope.
typeRefName, // Put the name here.
STRING_BUFFER_LEN, // Size of the name buffer, wide chars.
&nameLen); // Put actual size of name here.
if (FAILED(hr)) Error("GetTypeRefProps failed.", hr);
VWriteLine("Token: 0x%08x", tr);
VWriteLine("ResolutionScope: 0x%08x", tkResolutionScope);
VWriteLine("TypeRefName: %ls",typeRefName);
DisplayCustomAttributes(tr, "\t");
} // void MDInfo::DisplayTypeRefInfo()
void MDInfo::DisplayTypeSpecInfo(mdTypeSpec ts, const char *preFix)
{
HRESULT hr;
PCCOR_SIGNATURE pvSig;
ULONG cbSig;
ULONG cb;
InitSigBuffer();
hr = m_pImport->GetTypeSpecFromToken(
ts, // The class ref token.
&pvSig,
&cbSig);
if (FAILED(hr)) Error("GetTypeSpecFromToken failed.", hr);
// DisplaySignature(pvSig, cbSig, preFix);
if (FAILED(hr = GetOneElementType(pvSig, cbSig, &cb)))
goto ErrExit;
VWriteLine("%s\tTypeSpec :%s", preFix, (LPSTR)m_sigBuf.Ptr());
// Hex, too?
if (m_DumpFilter & dumpMoreHex)
{
char rcNewPrefix[80];
sprintf_s(rcNewPrefix, 80, "%s\tSignature", preFix);
DumpHex(rcNewPrefix, pvSig, cbSig, false, 24);
}
ErrExit:
return;
} // void MDInfo::DisplayTypeSpecInfo()
void MDInfo::DisplayMethodSpecInfo(mdMethodSpec ms, const char *preFix)
{
HRESULT hr;
PCCOR_SIGNATURE pvSig;
ULONG cbSig;
mdToken tk;
InitSigBuffer();
hr = m_pImport->GetMethodSpecProps(
ms, // The MethodSpec token
&tk, // The MethodDef or MemberRef
&pvSig, // Signature.
&cbSig); // Size of signature.
VWriteLine("%s\tParent : 0x%08x", preFix, tk);
DisplaySignature(pvSig, cbSig, preFix);
//ErrExit:
return;
} // void MDInfo::DisplayMethodSpecInfo()
// Return the passed-in buffer filled with a string detailing the class flags
// associated with the class.
//
char *MDInfo::ClassFlags(DWORD flags, _Out_writes_(STRING_BUFFER_LEN) char *sFlags)
{
sFlags[0] = 0;
ISFLAG(Td, NotPublic);
ISFLAG(Td, Public);
ISFLAG(Td, NestedPublic);
ISFLAG(Td, NestedPrivate);
ISFLAG(Td, NestedFamily);
ISFLAG(Td, NestedAssembly);
ISFLAG(Td, NestedFamANDAssem);
ISFLAG(Td, NestedFamORAssem);
ISFLAG(Td, AutoLayout);
ISFLAG(Td, SequentialLayout);
ISFLAG(Td, ExplicitLayout);
ISFLAG(Td, Class);
ISFLAG(Td, Interface);
ISFLAG(Td, Abstract);
ISFLAG(Td, Sealed);
ISFLAG(Td, SpecialName);
ISFLAG(Td, Import);
ISFLAG(Td, Serializable);
ISFLAG(Td, AnsiClass);
ISFLAG(Td, UnicodeClass);
ISFLAG(Td, AutoClass);
ISFLAG(Td, BeforeFieldInit);
ISFLAG(Td, Forwarder);
// "Reserved" flags
ISFLAG(Td, RTSpecialName);
ISFLAG(Td, HasSecurity);
ISFLAG(Td, WindowsRuntime);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
return sFlags;
} // char *MDInfo::ClassFlags()
// prints out all info on the given typeDef, including all information that
// is specific to a given typedef
//
void MDInfo::DisplayTypeDefInfo(mdTypeDef inTypeDef)
{
DisplayTypeDefProps(inTypeDef);
// Get field layout information.
HRESULT hr = NOERROR;
COR_FIELD_OFFSET *rFieldOffset = NULL;
ULONG cFieldOffset = 0;
hr = m_pImport->GetClassLayout(inTypeDef, NULL, rFieldOffset, 0, &cFieldOffset, NULL);
if (SUCCEEDED(hr) && cFieldOffset)
{
rFieldOffset = new COR_FIELD_OFFSET[cFieldOffset];
if (rFieldOffset == NULL)
Error("_calloc failed.", E_OUTOFMEMORY);
hr = m_pImport->GetClassLayout(inTypeDef, NULL, rFieldOffset, cFieldOffset, &cFieldOffset, NULL);
if (FAILED(hr)) { delete [] rFieldOffset; Error("GetClassLayout() failed.", hr); }
}
//No reason to display members if we're displaying fields and methods separately
DisplayGenericParams(inTypeDef, "\t");
DisplayFields(inTypeDef, rFieldOffset, cFieldOffset);
delete [] rFieldOffset;
DisplayMethods(inTypeDef);
DisplayProperties(inTypeDef);
DisplayEvents(inTypeDef);
DisplayMethodImpls(inTypeDef);
DisplayPermissions(inTypeDef, "");
DisplayInterfaceImpls(inTypeDef);
DisplayCustomAttributes(inTypeDef, "\t");
} // void MDInfo::DisplayTypeDefInfo()
// print out information about every the given typeDef's interfaceImpls
//
void MDInfo::DisplayInterfaceImpls(mdTypeDef inTypeDef)
{
HCORENUM interfaceImplEnum = NULL;
mdTypeRef interfaceImpls[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while(SUCCEEDED(hr = m_pImport->EnumInterfaceImpls( &interfaceImplEnum,
inTypeDef,interfaceImpls,ARRAY_SIZE(interfaceImpls), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tInterfaceImpl #%d (%08x)", totalCount, interfaceImpls[i]);
WriteLine("\t-------------------------------------------------------");
DisplayInterfaceImplInfo(interfaceImpls[i]);
DisplayPermissions(interfaceImpls[i], "\t");
WriteLine("");
}
}
m_pImport->CloseEnum( interfaceImplEnum);
} // void MDInfo::DisplayInterfaceImpls()
// print the information for the given interface implementation
//
void MDInfo::DisplayInterfaceImplInfo(mdInterfaceImpl inImpl)
{
mdTypeDef typeDef;
mdToken token;
HRESULT hr;
WCHAR szTempBuf[STRING_BUFFER_LEN];
hr = m_pImport->GetInterfaceImplProps( inImpl, &typeDef, &token);
if (FAILED(hr)) Error("GetInterfaceImplProps failed.", hr);
VWriteLine("\t\tClass : %ls",TypeDeforRefName(typeDef, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\tToken : %8.8X [%s] %ls",token,TokenTypeName(token), TypeDeforRefName(token, szTempBuf, ARRAY_SIZE(szTempBuf)));
DisplayCustomAttributes(inImpl, "\t\t");
} // void MDInfo::DisplayInterfaceImplInfo()
// displays the information for a particular property
//
void MDInfo::DisplayPropertyInfo(mdProperty inProp)
{
HRESULT hr;
mdTypeDef typeDef;
WCHAR propName[STRING_BUFFER_LEN];
DWORD flags;
#ifdef FEATURE_COMINTEROP
VARIANT defaultValue;
#endif
void const *pValue;
ULONG cbValue;
DWORD dwCPlusTypeFlag;
mdMethodDef setter, getter, otherMethod[ENUM_BUFFER_SIZE];
ULONG others;
PCCOR_SIGNATURE pbSigBlob;
ULONG ulSigBlob;
#ifdef FEATURE_COMINTEROP
::VariantInit(&defaultValue);
#endif
hr = m_pImport->GetPropertyProps(
inProp, // [IN] property token
&typeDef, // [OUT] typedef containing the property declarion.
propName, // [OUT] Property name
STRING_BUFFER_LEN, // [IN] the count of wchar of szProperty
NULL, // [OUT] actual count of wchar for property name
&flags, // [OUT] property flags.
&pbSigBlob, // [OUT] Signature Blob.
&ulSigBlob, // [OUT] Number of bytes in the signature blob.
&dwCPlusTypeFlag, // [OUT] default value
&pValue,
&cbValue,
&setter, // [OUT] setter method of the property
&getter, // [OUT] getter method of the property
otherMethod, // [OUT] other methods of the property
ENUM_BUFFER_SIZE, // [IN] size of rmdOtherMethod
&others); // [OUT] total number of other method of this property
if (FAILED(hr)) Error("GetPropertyProps failed.", hr);
VWriteLine("\t\tProp.Name : %ls (%8.8X)",propName,inProp);
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Pr, SpecialName);
ISFLAG(Pr, RTSpecialName);
ISFLAG(Pr, HasDefault);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags);
if (ulSigBlob)
DisplaySignature(pbSigBlob, ulSigBlob, "");
else
VWriteLine("\t\tERROR: no valid signature ");
WCHAR szTempBuf[STRING_BUFFER_LEN];
#ifdef FEATURE_COMINTEROP
_FillVariant((BYTE)dwCPlusTypeFlag, pValue, cbValue, &defaultValue);
VWriteLine("\t\tDefltValue: %ls",VariantAsString(&defaultValue));
#endif
VWriteLine("\t\tSetter : (%08x) %ls",setter,MemberDeforRefName(setter, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\tGetter : (%08x) %ls",getter,MemberDeforRefName(getter, szTempBuf, ARRAY_SIZE(szTempBuf)));
// do something with others?
VWriteLine("\t\t%ld Others",others);
DisplayCustomAttributes(inProp, "\t\t");
#ifdef FEATURE_COMINTEROP
::VariantClear(&defaultValue);
#endif
} // void MDInfo::DisplayPropertyInfo()
// displays info for each property
//
void MDInfo::DisplayProperties(mdTypeDef inTypeDef)
{
HCORENUM propEnum = NULL;
mdProperty props[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while(SUCCEEDED(hr = m_pImport->EnumProperties( &propEnum,
inTypeDef,props,ARRAY_SIZE(props), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tProperty #%d (%08x)", totalCount, props[i]);
WriteLine("\t-------------------------------------------------------");
DisplayPropertyInfo(props[i]);
DisplayPermissions(props[i], "\t");
WriteLine("");
}
}
m_pImport->CloseEnum( propEnum);
} // void MDInfo::DisplayProperties()
// Display all information about a particular event
//
void MDInfo::DisplayEventInfo(mdEvent inEvent)
{
HRESULT hr;
mdTypeDef typeDef;
WCHAR eventName[STRING_BUFFER_LEN];
DWORD flags;
mdToken eventType;
mdMethodDef addOn, removeOn, fire, otherMethod[ENUM_BUFFER_SIZE];
ULONG totalOther;
hr = m_pImport->GetEventProps(
// [IN] The scope.
inEvent, // [IN] event token
&typeDef, // [OUT] typedef containing the event declarion.
eventName, // [OUT] Event name
STRING_BUFFER_LEN, // [IN] the count of wchar of szEvent
NULL, // [OUT] actual count of wchar for event's name
&flags, // [OUT] Event flags.
&eventType, // [OUT] EventType class
&addOn, // [OUT] AddOn method of the event
&removeOn, // [OUT] RemoveOn method of the event
&fire, // [OUT] Fire method of the event
otherMethod, // [OUT] other method of the event
ARRAY_SIZE(otherMethod), // [IN] size of rmdOtherMethod
&totalOther); // [OUT] total number of other method of this event
if (FAILED(hr)) Error("GetEventProps failed.", hr);
VWriteLine("\t\tName : %ls (%8.8X)",eventName,inEvent);
char sFlags[STRING_BUFFER_LEN];
sFlags[0] = 0;
ISFLAG(Ev, SpecialName);
ISFLAG(Ev, RTSpecialName);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tFlags : %s (%08x)", sFlags, flags);
WCHAR szTempBuf[STRING_BUFFER_LEN];
VWriteLine("\t\tEventType : %8.8X [%s]",eventType,TokenTypeName(eventType));
VWriteLine("\t\tAddOnMethd: (%08x) %ls",addOn,MemberDeforRefName(addOn, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\tRmvOnMethd: (%08x) %ls",removeOn,MemberDeforRefName(removeOn, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\tFireMethod: (%08x) %ls",fire,MemberDeforRefName(fire, szTempBuf, ARRAY_SIZE(szTempBuf)));
VWriteLine("\t\t%ld OtherMethods",totalOther);
DisplayCustomAttributes(inEvent, "\t\t");
} // void MDInfo::DisplayEventInfo()
// Display information about all events in a typedef
//
void MDInfo::DisplayEvents(mdTypeDef inTypeDef)
{
HCORENUM eventEnum = NULL;
mdProperty events[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while(SUCCEEDED(hr = m_pImport->EnumEvents( &eventEnum,
inTypeDef,events,ARRAY_SIZE(events), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("\tEvent #%d (%08x)", totalCount, events[i]);
WriteLine("\t-------------------------------------------------------");
DisplayEventInfo(events[i]);
DisplayPermissions(events[i], "\t");
WriteLine("");
}
}
m_pImport->CloseEnum( eventEnum);
} // void MDInfo::DisplayEvents()
// print info for the passed-in custom attribute
// This function is used to print the custom attribute information for both TypeDefs and
// MethodDefs which need slightly different formatting. preFix helps fix it up.
//
void MDInfo::DisplayCustomAttributeInfo(mdCustomAttribute inValue, const char *preFix)
{
const BYTE *pValue; // The custom value.
ULONG cbValue; // Length of the custom value.
HRESULT hr; // A result.
mdToken tkObj; // Attributed object.
mdToken tkType; // Type of the custom attribute.
mdToken tk; // For name lookup.
LPCUTF8 pMethName=0; // Name of custom attribute ctor, if any.
CQuickBytes qSigName; // Buffer to pretty-print signature.
PCCOR_SIGNATURE pSig=0; // Signature of ctor.
ULONG cbSig; // Size of the signature.
BOOL bCoffSymbol = false; // true for coff symbol CA's.
WCHAR rcName[MAX_CLASS_NAME]; // Name of the type.
hr = m_pImport->GetCustomAttributeProps( // S_OK or error.
inValue, // The attribute.
&tkObj, // The attributed object
&tkType, // The attributes type.
(const void**)&pValue, // Put pointer to data here.
&cbValue); // Put size here.
if (FAILED(hr)) Error("GetCustomAttributeProps failed.", hr);
VWriteLine("%s\tCustomAttribute Type: %08x", preFix, tkType);
// Get the name of the memberref or methoddef.
tk = tkType;
rcName[0] = L'\0';
// Get the member name, and the parent token.
switch (TypeFromToken(tk))
{
case mdtMemberRef:
hr = m_pImport->GetNameFromToken(tk, &pMethName);
if (FAILED(hr)) Error("GetNameFromToken failed.", hr);
hr = m_pImport->GetMemberRefProps( tk, &tk, 0, 0, 0, &pSig, &cbSig);
if (FAILED(hr)) Error("GetMemberRefProps failed.", hr);
break;
case mdtMethodDef:
hr = m_pImport->GetNameFromToken(tk, &pMethName);
if (FAILED(hr)) Error("GetNameFromToken failed.", hr);
hr = m_pImport->GetMethodProps(tk, &tk, 0, 0, 0, 0, &pSig, &cbSig, 0, 0);
if (FAILED(hr)) Error("GetMethodProps failed.", hr);
break;
} // switch
// Get the type name.
switch (TypeFromToken(tk))
{
case mdtTypeDef:
hr = m_pImport->GetTypeDefProps(tk, rcName,MAX_CLASS_NAME,0, 0,0);
if (FAILED(hr)) Error("GetTypeDefProps failed.", hr);
break;
case mdtTypeRef:
hr = m_pImport->GetTypeRefProps(tk, 0, rcName,MAX_CLASS_NAME,0);
if (FAILED(hr)) Error("GetTypeRefProps failed.", hr);
break;
} // switch
if (pSig && pMethName)
{
int iLen;
LPWSTR pwzName = (LPWSTR)(new WCHAR[iLen= 1+(ULONG32)strlen(pMethName)]);
if(pwzName)
{
WszMultiByteToWideChar(CP_UTF8,0, pMethName,-1, pwzName,iLen);
PrettyPrintSigLegacy(pSig, cbSig, pwzName, &qSigName, m_pImport);
delete [] pwzName;
}
}
VWrite("%s\tCustomAttributeName: %ls", preFix, rcName);
if (pSig && pMethName)
VWrite(" :: %S", qSigName.Ptr());
// Keep track of coff overhead.
if (!wcscmp(W("__DecoratedName"), rcName))
{
bCoffSymbol = true;
g_cbCoffNames += cbValue + 6;
}
WriteLine("");
VWriteLine("%s\tLength: %ld", preFix, cbValue);
char newPreFix[40];
sprintf_s(newPreFix, 40, "%s\tValue ", preFix);
DumpHex(newPreFix, pValue, cbValue);
if (bCoffSymbol)
VWriteLine("%s\t %s", preFix, pValue);
// Try to decode the constructor blob. This is incomplete, but covers the most popular cases.
if (pSig)
{ // Interpret the signature.
PCCOR_SIGNATURE ps = pSig;
ULONG cb;
ULONG ulData;
ULONG cParams;
ULONG ulVal;
UINT8 u1 = 0;
UINT16 u2 = 0;
UINT32 u4 = 0;
UINT64 u8 = 0;
unsigned __int64 uI64;
double dblVal;
ULONG cbVal;
LPCUTF8 pStr;
CustomAttributeParser CA(pValue, cbValue);
CA.ValidateProlog();
// Get the calling convention.
cb = CorSigUncompressData(ps, &ulData);
ps += cb;
// Get the count of params.
cb = CorSigUncompressData(ps, &cParams);
ps += cb;
// Get the return value.
cb = CorSigUncompressData(ps, &ulData);
ps += cb;
if (ulData == ELEMENT_TYPE_VOID)
{
VWrite("%s\tctor args: (", preFix);
// For each param...
for (ULONG i=0; i<cParams; ++i)
{ // Get the next param type.
cb = CorSigUncompressData(ps, &ulData);
ps += cb;
if (i) Write(", ");
DoObject:
switch (ulData)
{
// For ET_OBJECT, the next byte in the blob is the ET of the actual data.
case ELEMENT_TYPE_OBJECT:
CA.GetU1(&u1);
ulData = u1;
goto DoObject;
case ELEMENT_TYPE_I1:
case ELEMENT_TYPE_U1:
CA.GetU1(&u1);
ulVal = u1;
goto PrintVal;
case ELEMENT_TYPE_I2:
case ELEMENT_TYPE_U2:
CA.GetU2(&u2);
ulVal = u2;
goto PrintVal;
case ELEMENT_TYPE_I4:
case ELEMENT_TYPE_U4:
CA.GetU4(&u4);
ulVal = u4;
PrintVal:
VWrite("%d", ulVal);
break;
case ELEMENT_TYPE_STRING:
CA.GetString(&pStr, &cbVal);
VWrite("\"%s\"", pStr);
break;
// The only class type that we accept is Type, which is stored as a string.
case ELEMENT_TYPE_CLASS:
// Eat the class type.
cb = CorSigUncompressData(ps, &ulData);
ps += cb;
// Get the name of the type.
CA.GetString(&pStr, &cbVal);
VWrite("typeof(%s)", pStr);
break;
case SERIALIZATION_TYPE_TYPE:
CA.GetString(&pStr, &cbVal);
VWrite("typeof(%s)", pStr);
break;
case ELEMENT_TYPE_I8:
case ELEMENT_TYPE_U8:
CA.GetU8(&u8);
uI64 = u8;
VWrite("%#lx", uI64);
break;
case ELEMENT_TYPE_R4:
dblVal = CA.GetR4();
VWrite("%f", dblVal);
break;
case ELEMENT_TYPE_R8:
dblVal = CA.GetR8();
VWrite("%f", dblVal);
break;
default:
// bail...
i = cParams;
Write(" <can not decode> ");
break;
}
}
WriteLine(")");
}
}
WriteLine("");
} // void MDInfo::DisplayCustomAttributeInfo()
// Print all custom values for the given token
// This function is used to print the custom value information for all tokens.
// which need slightly different formatting. preFix helps fix it up.
//
void MDInfo::DisplayCustomAttributes(mdToken inToken, const char *preFix)
{
HCORENUM customAttributeEnum = NULL;
mdTypeRef customAttributes[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while(SUCCEEDED(hr = m_pImport->EnumCustomAttributes( &customAttributeEnum, inToken, 0,
customAttributes, ARRAY_SIZE(customAttributes), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("%sCustomAttribute #%d (%08x)", preFix, totalCount, customAttributes[i]);
VWriteLine("%s-------------------------------------------------------", preFix);
DisplayCustomAttributeInfo(customAttributes[i], preFix);
}
}
m_pImport->CloseEnum( customAttributeEnum);
} // void MDInfo::DisplayCustomAttributes()
// Show the passed-in token's permissions
//
//
void MDInfo::DisplayPermissions(mdToken tk, const char *preFix)
{
HCORENUM permissionEnum = NULL;
mdPermission permissions[ENUM_BUFFER_SIZE];
ULONG count, totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pImport->EnumPermissionSets( &permissionEnum,
tk, 0, permissions, ARRAY_SIZE(permissions), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("%s\tPermission #%d (%08x)", preFix, totalCount, permissions[i]);
VWriteLine("%s\t-------------------------------------------------------", preFix);
DisplayPermissionInfo(permissions[i], preFix);
WriteLine("");
}
}
m_pImport->CloseEnum( permissionEnum);
} // void MDInfo::DisplayPermissions()
// print properties of given rolecheck
//
//
void MDInfo::DisplayPermissionInfo(mdPermission inPermission, const char *preFix)
{
DWORD dwAction;
const BYTE *pvPermission;
ULONG cbPermission;
const char *flagDesc = NULL;
char newPreFix[STRING_BUFFER_LEN];
HRESULT hr;
hr = m_pImport->GetPermissionSetProps( inPermission, &dwAction,
(const void**)&pvPermission, &cbPermission);
if (FAILED(hr)) Error("GetPermissionSetProps failed.", hr);
switch(dwAction)
{
case dclActionNil: flagDesc = "ActionNil"; break;
case dclRequest: flagDesc = "Request"; break;
case dclDemand: flagDesc = "Demand"; break;
case dclAssert: flagDesc = "Assert"; break;
case dclDeny: flagDesc = "Deny"; break;
case dclPermitOnly: flagDesc = "PermitOnly"; break;
case dclLinktimeCheck: flagDesc = "LinktimeCheck"; break;
case dclInheritanceCheck: flagDesc = "InheritanceCheck"; break;
case dclRequestMinimum: flagDesc = "RequestMinimum"; break;
case dclRequestOptional: flagDesc = "RequestOptional"; break;
case dclRequestRefuse: flagDesc = "RequestRefuse"; break;
case dclPrejitGrant: flagDesc = "PrejitGrant"; break;
case dclPrejitDenied: flagDesc = "PrejitDenied"; break;
case dclNonCasDemand: flagDesc = "NonCasDemand"; break;
case dclNonCasLinkDemand: flagDesc = "NonCasLinkDemand"; break;
case dclNonCasInheritance: flagDesc = "NonCasInheritance"; break;
}
VWriteLine("%s\t\tAction : %s", preFix, flagDesc);
VWriteLine("%s\t\tBlobLen : %d", preFix, cbPermission);
if (cbPermission)
{
sprintf_s(newPreFix, STRING_BUFFER_LEN, "%s\tBlob", preFix);
DumpHex(newPreFix, pvPermission, cbPermission, false, 24);
}
sprintf_s (newPreFix, STRING_BUFFER_LEN, "\t\t%s", preFix);
DisplayCustomAttributes(inPermission, newPreFix);
} // void MDInfo::DisplayPermissionInfo()
// simply prints out the given GUID in standard form
LPWSTR MDInfo::GUIDAsString(GUID inGuid, _Out_writes_(bufLen) LPWSTR guidString, ULONG bufLen)
{
StringFromGUID2(inGuid, guidString, bufLen);
return guidString;
} // LPWSTR MDInfo::GUIDAsString()
#ifdef FEATURE_COMINTEROP
LPCWSTR MDInfo::VariantAsString(VARIANT *pVariant)
{
HRESULT hr = S_OK;
if (V_VT(pVariant) == VT_UNKNOWN)
{
_ASSERTE(V_UNKNOWN(pVariant) == NULL);
return W("<NULL>");
}
else if (SUCCEEDED(hr = ::VariantChangeType(pVariant, pVariant, 0, VT_BSTR)))
return V_BSTR(pVariant);
else if (hr == DISP_E_BADVARTYPE && V_VT(pVariant) == VT_I8)
{
// allocate the bstr.
char szStr[32];
WCHAR wszStr[32];
// Set variant type to bstr.
V_VT(pVariant) = VT_BSTR;
// Create the ansi string.
sprintf_s(szStr, 32, "%I64d", V_CY(pVariant).int64);
// Convert to unicode.
WszMultiByteToWideChar(CP_ACP, 0, szStr, -1, wszStr, 32);
// convert to bstr and set variant value.
V_BSTR(pVariant) = ::SysAllocString(wszStr);
if (V_BSTR(pVariant) == NULL)
Error("SysAllocString() failed.", E_OUTOFMEMORY);
return V_BSTR(pVariant);
}
else
return W("ERROR");
} // LPWSTR MDInfo::VariantAsString()
#endif
bool TrySigUncompress(PCCOR_SIGNATURE pData, // [IN] compressed data
ULONG *pDataOut, // [OUT] the expanded *pData
ULONG *cbCur)
{
ULONG ulSize = CorSigUncompressData(pData, pDataOut);
if (ulSize == (ULONG)-1)
{
*cbCur = ulSize;
return false;
} else
{
*cbCur += ulSize;
return true;
}
}
void MDInfo::DisplayFieldMarshal(mdToken inToken)
{
PCCOR_SIGNATURE pvNativeType; // [OUT] native type of this field
ULONG cbNativeType; // [OUT] the count of bytes of *ppvNativeType
HRESULT hr;
hr = m_pImport->GetFieldMarshal( inToken, &pvNativeType, &cbNativeType);
if (FAILED(hr) && hr != CLDB_E_RECORD_NOTFOUND) Error("GetFieldMarshal failed.", hr);
if (hr != CLDB_E_RECORD_NOTFOUND)
{
ULONG cbCur = 0;
ULONG ulData;
ULONG ulStrLoc;
char szNTDesc[STRING_BUFFER_LEN];
while (cbCur < cbNativeType)
{
ulStrLoc = 0;
ulData = NATIVE_TYPE_MAX;
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
if (ulData >= sizeof(g_szNativeType)/sizeof(*g_szNativeType))
{
cbCur = (ULONG)-1;
continue;
}
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "%s ", g_szNativeType[ulData]);
switch (ulData)
{
case NATIVE_TYPE_FIXEDSYSSTRING:
{
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{StringElementCount: %d} ",ulData);
}
}
break;
case NATIVE_TYPE_FIXEDARRAY:
{
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{ArrayElementCount: %d",ulData);
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", ArrayElementType(NT): %d",ulData);
}
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc,"}");
}
}
break;
case NATIVE_TYPE_ARRAY:
{
if (cbCur < cbNativeType)
{
BOOL bElemTypeSpecified;
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
if (ulData != NATIVE_TYPE_MAX)
{
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{ArrayElementType(NT): %d", ulData);
bElemTypeSpecified = TRUE;
}
else
{
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{");
bElemTypeSpecified = FALSE;
}
if (cbCur < cbNativeType)
{
if (bElemTypeSpecified)
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", ");
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "SizeParamIndex: %d",ulData);
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", SizeParamMultiplier: %d",ulData);
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, ", SizeConst: %d",ulData);
}
}
}
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "}");
}
}
break;
case NATIVE_TYPE_SAFEARRAY:
{
if (cbCur < cbNativeType)
{
if (!TrySigUncompress(&pvNativeType[cbCur], &ulData, &cbCur))
continue;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{SafeArraySubType(VT): %d, ",ulData);
// Extract the element type name if it is specified.
if (cbCur < cbNativeType)
{
LPUTF8 strTemp = NULL;
int strLen = 0;
int ByteCountLength = 0;
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "ElementTypeName: %s}", strTemp);
cbCur += strLen;
_ASSERTE(cbCur == cbNativeType);
delete [] strTemp;
}
}
else
{
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "ElementTypeName: }");
}
}
}
break;
case NATIVE_TYPE_CUSTOMMARSHALER:
{
LPUTF8 strTemp = NULL;
int strLen = 0;
int ByteCountLength = 0;
// Extract the typelib GUID.
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "{Typelib: %s, ", strTemp);
cbCur += strLen;
_ASSERTE(cbCur < cbNativeType);
delete [] strTemp;
}
// Extract the name of the native type.
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Native: %s, ", strTemp);
cbCur += strLen;
_ASSERTE(cbCur < cbNativeType);
delete [] strTemp;
}
// Extract the name of the custom marshaler.
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Marshaler: %s, ", strTemp);
cbCur += strLen;
_ASSERTE(cbCur < cbNativeType);
delete [] strTemp;
}
// Extract the cookie string.
strLen = CPackedLen::GetLength(&pvNativeType[cbCur], &ByteCountLength);
cbCur += ByteCountLength;
if (strLen > 0)
{
strTemp = (LPUTF8)(new char[strLen + 1]);
if(strTemp)
{
memcpy(strTemp, (LPUTF8)&pvNativeType[cbCur], strLen);
strTemp[strLen] = 0;
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Cookie: ");
// Copy the cookie string and transform the embedded nulls into \0's.
for (int i = 0; i < strLen - 1; i++, cbCur++)
{
if (strTemp[i] == 0)
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "\\0");
else
szNTDesc[ulStrLoc++] = strTemp[i];
}
szNTDesc[ulStrLoc++] = strTemp[strLen - 1];
cbCur++;
delete [] strTemp;
}
}
else
{
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "Cookie: ");
}
// Finish the custom marshaler native type description.
ulStrLoc += sprintf_s(szNTDesc + ulStrLoc, STRING_BUFFER_LEN-ulStrLoc, "}");
_ASSERTE(cbCur <= cbNativeType);
}
break;
default:
{
// normal nativetype element: do nothing
}
}
VWriteLine("\t\t\t\t%s",szNTDesc);
if (ulData >= NATIVE_TYPE_MAX)
break;
}
if (cbCur == (ULONG)-1)
{
// There was something that we didn't grok in the signature.
// Just dump out the blob as hex
VWrite("\t\t\t\t{", szNTDesc);
while (cbNativeType--)
VWrite(" %2.2X", *pvNativeType++);
VWriteLine(" }");
}
}
} // void MDInfo::DisplayFieldMarshal()
void MDInfo::DisplayPinvokeInfo(mdToken inToken)
{
HRESULT hr = NOERROR;
DWORD flags;
WCHAR rcImport[512];
mdModuleRef tkModuleRef;
char sFlags[STRING_BUFFER_LEN];
hr = m_pImport->GetPinvokeMap(inToken, &flags, rcImport,
ARRAY_SIZE(rcImport), 0, &tkModuleRef);
if (FAILED(hr))
{
if (hr != CLDB_E_RECORD_NOTFOUND)
VWriteLine("ERROR: GetPinvokeMap failed.", hr);
return;
}
WriteLine("\t\tPinvoke Map Data:");
VWriteLine("\t\tEntry point: %S", rcImport);
VWriteLine("\t\tModule ref: %08x", tkModuleRef);
sFlags[0] = 0;
ISFLAG(Pm, NoMangle);
ISFLAG(Pm, CharSetNotSpec);
ISFLAG(Pm, CharSetAnsi);
ISFLAG(Pm, CharSetUnicode);
ISFLAG(Pm, CharSetAuto);
ISFLAG(Pm, SupportsLastError);
ISFLAG(Pm, CallConvWinapi);
ISFLAG(Pm, CallConvCdecl);
ISFLAG(Pm, CallConvStdcall);
ISFLAG(Pm, CallConvThiscall);
ISFLAG(Pm, CallConvFastcall);
ISFLAG(Pm, BestFitEnabled);
ISFLAG(Pm, BestFitDisabled);
ISFLAG(Pm, BestFitUseAssem);
ISFLAG(Pm, ThrowOnUnmappableCharEnabled);
ISFLAG(Pm, ThrowOnUnmappableCharDisabled);
ISFLAG(Pm, ThrowOnUnmappableCharUseAssem);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\t\tMapping flags: %s (%08x)", sFlags, flags);
} // void MDInfo::DisplayPinvokeInfo()
/////////////////////////////////////////////////////////////////////////
// void DisplaySignature(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob);
//
// Display COM+ signature -- taken from cordump.cpp's DumpSignature
/////////////////////////////////////////////////////////////////////////
void MDInfo::DisplaySignature(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob, const char *preFix)
{
ULONG cbCur = 0;
ULONG cb;
// 428793: Prefix complained correctly about unitialized data.
ULONG ulData = (ULONG) IMAGE_CEE_CS_CALLCONV_MAX;
ULONG ulArgs;
HRESULT hr = NOERROR;
ULONG ulSigBlobStart = ulSigBlob;
// initialize sigBuf
InitSigBuffer();
cb = CorSigUncompressData(pbSigBlob, &ulData);
VWriteLine("%s\t\tCallCnvntn: %s", preFix, (g_strCalling[ulData & IMAGE_CEE_CS_CALLCONV_MASK]));
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
if (ulData & IMAGE_CEE_CS_CALLCONV_HASTHIS)
VWriteLine("%s\t\thasThis ", preFix);
if (ulData & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS)
VWriteLine("%s\t\texplicit ", preFix);
if (ulData & IMAGE_CEE_CS_CALLCONV_GENERIC)
VWriteLine("%s\t\tgeneric ", preFix);
// initialize sigBuf
InitSigBuffer();
if ( isCallConv(ulData,IMAGE_CEE_CS_CALLCONV_FIELD) )
{
// display field type
if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb)))
goto ErrExit;
VWriteLine("%s\t\tField type: %s", preFix, (LPSTR)m_sigBuf.Ptr());
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
}
else
{
if (ulData & IMAGE_CEE_CS_CALLCONV_GENERIC)
{
ULONG ulTyArgs;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulTyArgs);
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
VWriteLine("%s\t\tType Arity:%d ", preFix, ulTyArgs);
}
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulArgs);
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
if (ulData != IMAGE_CEE_CS_CALLCONV_LOCAL_SIG && ulData != IMAGE_CEE_CS_CALLCONV_GENERICINST)
{
// display return type when it is not a local varsig
if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb)))
goto ErrExit;
VWriteLine("%s\t\tReturnType:%s", preFix, (LPSTR)m_sigBuf.Ptr());
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
}
// display count of argument
// display arguments
if (ulSigBlob)
VWriteLine("%s\t\t%ld Arguments", preFix, ulArgs);
else
VWriteLine("%s\t\tNo arguments.", preFix);
ULONG i = 0;
while (i < ulArgs && ulSigBlob > 0)
{
ULONG ulDataTemp;
// Handle the sentinal for varargs because it isn't counted in the args.
CorSigUncompressData(&pbSigBlob[cbCur], &ulDataTemp);
++i;
// initialize sigBuf
InitSigBuffer();
if (FAILED(hr = GetOneElementType(&pbSigBlob[cbCur], ulSigBlob, &cb)))
goto ErrExit;
VWriteLine("%s\t\t\tArgument #%ld: %s",preFix, i, (LPSTR)m_sigBuf.Ptr());
if (cb>ulSigBlob)
goto ErrExit;
cbCur += cb;
ulSigBlob -= cb;
}
}
// Nothing consumed but not yet counted.
cb = 0;
ErrExit:
// We should have consumed all signature blob. If not, dump the sig in hex.
// Also dump in hex if so requested.
if (m_DumpFilter & dumpMoreHex || ulSigBlob != 0)
{
// Did we not consume enough, or try to consume too much?
if (cb > ulSigBlob)
WriteLine("\tERROR IN SIGNATURE: Signature should be larger.");
else
if (cb < ulSigBlob)
{
VWrite("\tERROR IN SIGNATURE: Not all of signature blob was consumed. %d byte(s) remain", ulSigBlob);
// If it is short, just append it to the end.
if (ulSigBlob < 4)
{
Write(": ");
for (; ulSigBlob; ++cbCur, --ulSigBlob)
VWrite("%02x ", pbSigBlob[cbCur]);
WriteLine("");
goto ErrExit2;
}
WriteLine("");
}
// Any appropriate error message has been issued. Dump sig in hex, as determined
// by error or command line switch.
cbCur = 0;
ulSigBlob = ulSigBlobStart;
char rcNewPrefix[80];
sprintf_s(rcNewPrefix, 80, "%s\t\tSignature ", preFix);
DumpHex(rcNewPrefix, pbSigBlob, ulSigBlob, false, 24);
}
ErrExit2:
if (FAILED(hr))
Error("ERROR!! Bad signature blob value!");
return;
} // void MDInfo::DisplaySignature()
/////////////////////////////////////////////////////////////////////////
// HRESULT GetOneElementType(mdScope tkScope, BYTE *pbSigBlob, ULONG ulSigBlob, ULONG *pcb)
//
// Adds description of element type to the end of buffer -- caller must ensure
// buffer is large enough.
/////////////////////////////////////////////////////////////////////////
HRESULT MDInfo::GetOneElementType(PCCOR_SIGNATURE pbSigBlob, ULONG ulSigBlob, ULONG *pcb)
{
HRESULT hr = S_OK; // A result.
ULONG cbCur = 0;
ULONG cb;
ULONG ulData = ELEMENT_TYPE_MAX;
ULONG ulTemp;
int iTemp = 0;
mdToken tk;
cb = CorSigUncompressData(pbSigBlob, &ulData);
cbCur += cb;
// Handle the modifiers.
if (ulData & ELEMENT_TYPE_MODIFIER)
{
if (ulData == ELEMENT_TYPE_SENTINEL)
IfFailGo(AddToSigBuffer("<ELEMENT_TYPE_SENTINEL>"));
else if (ulData == ELEMENT_TYPE_PINNED)
IfFailGo(AddToSigBuffer("PINNED"));
else
{
hr = E_FAIL;
goto ErrExit;
}
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
goto ErrExit;
}
// Handle the underlying element types.
if (ulData >= ELEMENT_TYPE_MAX)
{
hr = E_FAIL;
goto ErrExit;
}
while (ulData == ELEMENT_TYPE_PTR || ulData == ELEMENT_TYPE_BYREF)
{
IfFailGo(AddToSigBuffer(" "));
IfFailGo(AddToSigBuffer(g_szMapElementType[ulData]));
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
}
IfFailGo(AddToSigBuffer(" "));
IfFailGo(AddToSigBuffer(g_szMapElementType[ulData]));
if (CorIsPrimitiveType((CorElementType)ulData) ||
ulData == ELEMENT_TYPE_TYPEDBYREF ||
ulData == ELEMENT_TYPE_OBJECT ||
ulData == ELEMENT_TYPE_I ||
ulData == ELEMENT_TYPE_U)
{
// If this is a primitive type, we are done
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_VALUETYPE ||
ulData == ELEMENT_TYPE_CLASS ||
ulData == ELEMENT_TYPE_CMOD_REQD ||
ulData == ELEMENT_TYPE_CMOD_OPT)
{
cb = CorSigUncompressToken(&pbSigBlob[cbCur], &tk);
cbCur += cb;
// get the name of type ref. Don't care if truncated
if (TypeFromToken(tk) == mdtTypeDef || TypeFromToken(tk) == mdtTypeRef)
{
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %ls",TypeDeforRefName(tk, m_szTempBuf, ARRAY_SIZE(m_szTempBuf)));
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
}
else
{
_ASSERTE(TypeFromToken(tk) == mdtTypeSpec);
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %8x", tk);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
}
if (ulData == ELEMENT_TYPE_CMOD_REQD ||
ulData == ELEMENT_TYPE_CMOD_OPT)
{
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
}
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_SZARRAY)
{
// display the base type of SZARRAY
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
goto ErrExit;
}
// instantiated type
if (ulData == ELEMENT_TYPE_GENERICINST)
{
// display the type constructor
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
ULONG numArgs;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &numArgs);
cbCur += cb;
IfFailGo(AddToSigBuffer("<"));
while (numArgs > 0)
{
if (cbCur > ulSigBlob)
goto ErrExit;
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
--numArgs;
if (numArgs > 0)
IfFailGo(AddToSigBuffer(","));
}
IfFailGo(AddToSigBuffer(">"));
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_VAR)
{
ULONG index;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &index);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, "!%d", index);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_MVAR)
{
ULONG index;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &index);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, "!!%d", index);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
goto ErrExit;
}
if (ulData == ELEMENT_TYPE_FNPTR)
{
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
if (ulData & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS)
IfFailGo(AddToSigBuffer(" explicit"));
if (ulData & IMAGE_CEE_CS_CALLCONV_HASTHIS)
IfFailGo(AddToSigBuffer(" hasThis"));
IfFailGo(AddToSigBuffer(" "));
IfFailGo(AddToSigBuffer(g_strCalling[ulData & IMAGE_CEE_CS_CALLCONV_MASK]));
// Get number of args
ULONG numArgs;
cb = CorSigUncompressData(&pbSigBlob[cbCur], &numArgs);
cbCur += cb;
// do return type
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
IfFailGo(AddToSigBuffer("("));
while (numArgs > 0)
{
if (cbCur > ulSigBlob)
goto ErrExit;
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
--numArgs;
if (numArgs > 0)
IfFailGo(AddToSigBuffer(","));
}
IfFailGo(AddToSigBuffer(" )"));
goto ErrExit;
}
if(ulData != ELEMENT_TYPE_ARRAY) return E_FAIL;
// display the base type of SDARRAY
if (FAILED(GetOneElementType(&pbSigBlob[cbCur], ulSigBlob-cbCur, &cb)))
goto ErrExit;
cbCur += cb;
// display the rank of MDARRAY
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
if (ulData == 0)
// we are done if no rank specified
goto ErrExit;
// how many dimensions have size specified?
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
while (ulData)
{
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulTemp);
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulTemp);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
cbCur += cb;
ulData--;
}
// how many dimensions have lower bounds specified?
cb = CorSigUncompressData(&pbSigBlob[cbCur], &ulData);
cbCur += cb;
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", ulData);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
while (ulData)
{
cb = CorSigUncompressSignedInt(&pbSigBlob[cbCur], &iTemp);
sprintf_s(m_tempFormatBuffer, STRING_BUFFER_LEN, " %d", iTemp);
IfFailGo(AddToSigBuffer(m_tempFormatBuffer));
cbCur += cb;
ulData--;
}
ErrExit:
if (cbCur > ulSigBlob)
hr = E_FAIL;
*pcb = cbCur;
return hr;
} // HRESULT MDInfo::GetOneElementType()
// Display the fields of the N/Direct custom value structure.
void MDInfo::DisplayCorNativeLink(COR_NATIVE_LINK *pCorNLnk, const char *preFix)
{
// Print the LinkType.
const char *curField = "\tLink Type : ";
switch(pCorNLnk->m_linkType)
{
case nltNone:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nltNone", pCorNLnk->m_linkType);
break;
case nltAnsi:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nltAnsi", pCorNLnk->m_linkType);
break;
case nltUnicode:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nltUnicode", pCorNLnk->m_linkType);
break;
case nltAuto:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nltAuto", pCorNLnk->m_linkType);
break;
default:
_ASSERTE(!"Invalid Native Link Type!");
}
// Print the link flags
curField = "\tLink Flags : ";
switch(pCorNLnk->m_flags)
{
case nlfNone:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nlfNone", pCorNLnk->m_flags);
break;
case nlfLastError:
VWriteLine("%s%s%s(%02x)", preFix, curField, "nlfLastError", pCorNLnk->m_flags);
break;
default:
_ASSERTE(!"Invalid Native Link Flags!");
}
// Print the entry point.
WCHAR memRefName[STRING_BUFFER_LEN];
HRESULT hr;
hr = m_pImport->GetMemberRefProps( pCorNLnk->m_entryPoint, NULL, memRefName,
STRING_BUFFER_LEN, NULL, NULL, NULL);
if (FAILED(hr)) Error("GetMemberRefProps failed.", hr);
VWriteLine("%s\tEntry Point : %ls (0x%08x)", preFix, memRefName, pCorNLnk->m_entryPoint);
} // void MDInfo::DisplayCorNativeLink()
// Fills given varaint with value given in pValue and of type in bCPlusTypeFlag
//
// Taken from MetaInternal.cpp
HRESULT _FillVariant(
BYTE bCPlusTypeFlag,
const void *pValue,
ULONG cbValue,
VARIANT *pvar)
{
HRESULT hr = NOERROR;
switch (bCPlusTypeFlag)
{
case ELEMENT_TYPE_BOOLEAN:
V_VT(pvar) = VT_BOOL;
V_BOOL(pvar) = *((BYTE*)pValue); //*((UNALIGNED VARIANT_BOOL *)pValue);
break;
case ELEMENT_TYPE_I1:
V_VT(pvar) = VT_I1;
V_I1(pvar) = *((CHAR*)pValue);
break;
case ELEMENT_TYPE_U1:
V_VT(pvar) = VT_UI1;
V_UI1(pvar) = *((BYTE*)pValue);
break;
case ELEMENT_TYPE_I2:
V_VT(pvar) = VT_I2;
V_I2(pvar) = GET_UNALIGNED_VAL16(pValue);
break;
case ELEMENT_TYPE_U2:
case ELEMENT_TYPE_CHAR:
V_VT(pvar) = VT_UI2;
V_UI2(pvar) = GET_UNALIGNED_VAL16(pValue);
break;
case ELEMENT_TYPE_I4:
V_VT(pvar) = VT_I4;
V_I4(pvar) = GET_UNALIGNED_VAL32(pValue);
break;
case ELEMENT_TYPE_U4:
V_VT(pvar) = VT_UI4;
V_UI4(pvar) = GET_UNALIGNED_VAL32(pValue);
break;
case ELEMENT_TYPE_R4:
{
V_VT(pvar) = VT_R4;
__int32 Value = GET_UNALIGNED_VAL32(pValue);
V_R4(pvar) = (float &)Value;
}
break;
case ELEMENT_TYPE_R8:
{
V_VT(pvar) = VT_R8;
__int64 Value = GET_UNALIGNED_VAL64(pValue);
V_R8(pvar) = (double &) Value;
}
break;
case ELEMENT_TYPE_STRING:
{
V_VT(pvar) = VT_BSTR;
WCHAR *TempString;;
#if BIGENDIAN
TempString = (WCHAR *)alloca(cbValue);
memcpy(TempString, pValue, cbValue);
SwapStringLength(TempString, cbValue/sizeof(WCHAR));
#else
TempString = (WCHAR *)pValue;
#endif
// allocated bstr here
V_BSTR(pvar) = ::SysAllocStringLen((LPWSTR)TempString, cbValue/sizeof(WCHAR));
if (V_BSTR(pvar) == NULL)
hr = E_OUTOFMEMORY;
}
break;
case ELEMENT_TYPE_CLASS:
V_VT(pvar) = VT_UNKNOWN;
V_UNKNOWN(pvar) = NULL;
// _ASSERTE( GET_UNALIGNED_VAL32(pValue) == 0);
break;
case ELEMENT_TYPE_I8:
V_VT(pvar) = VT_I8;
V_CY(pvar).int64 = GET_UNALIGNED_VAL64(pValue);
break;
case ELEMENT_TYPE_U8:
V_VT(pvar) = VT_UI8;
V_CY(pvar).int64 = GET_UNALIGNED_VAL64(pValue);
break;
case ELEMENT_TYPE_VOID:
V_VT(pvar) = VT_EMPTY;
break;
default:
_ASSERTE(!"bad constant value type!");
}
return hr;
} // HRESULT _FillVariant()
void MDInfo::DisplayAssembly()
{
if (m_pAssemblyImport)
{
DisplayAssemblyInfo();
DisplayAssemblyRefs();
DisplayFiles();
DisplayExportedTypes();
DisplayManifestResources();
}
} // void MDInfo::DisplayAssembly()
void MDInfo::DisplayAssemblyInfo()
{
HRESULT hr;
mdAssembly mda;
const BYTE *pbPublicKey;
ULONG cbPublicKey;
ULONG ulHashAlgId;
WCHAR szName[STRING_BUFFER_LEN];
ASSEMBLYMETADATA MetaData;
DWORD dwFlags;
hr = m_pAssemblyImport->GetAssemblyFromScope(&mda);
if (hr == CLDB_E_RECORD_NOTFOUND)
return;
else if (FAILED(hr)) Error("GetAssemblyFromScope() failed.", hr);
// Get the required sizes for the arrays of locales, processors etc.
ZeroMemory(&MetaData, sizeof(ASSEMBLYMETADATA));
hr = m_pAssemblyImport->GetAssemblyProps(mda,
NULL, NULL, // Public Key.
NULL, // Hash Algorithm.
NULL, 0, NULL, // Name.
&MetaData,
NULL); // Flags.
if (FAILED(hr)) Error("GetAssemblyProps() failed.", hr);
// Allocate space for the arrays in the ASSEMBLYMETADATA structure.
if (MetaData.cbLocale)
MetaData.szLocale = new WCHAR[MetaData.cbLocale];
if (MetaData.ulProcessor)
MetaData.rProcessor = new DWORD[MetaData.ulProcessor];
if (MetaData.ulOS)
MetaData.rOS = new OSINFO[MetaData.ulOS];
hr = m_pAssemblyImport->GetAssemblyProps(mda,
(const void **)&pbPublicKey, &cbPublicKey,
&ulHashAlgId,
szName, STRING_BUFFER_LEN, NULL,
&MetaData,
&dwFlags);
if (FAILED(hr)) Error("GetAssemblyProps() failed.", hr);
WriteLine("Assembly");
WriteLine("-------------------------------------------------------");
VWriteLine("\tToken: 0x%08x", mda);
VWriteLine("\tName : %ls", szName);
DumpHex("\tPublic Key ", pbPublicKey, cbPublicKey, false, 24);
VWriteLine("\tHash Algorithm : 0x%08x", ulHashAlgId);
DisplayASSEMBLYMETADATA(&MetaData);
if(MetaData.szLocale) delete [] MetaData.szLocale;
if(MetaData.rProcessor) delete [] MetaData.rProcessor;
if(MetaData.rOS) delete [] MetaData.rOS;
char sFlags[STRING_BUFFER_LEN];
DWORD flags = dwFlags;
sFlags[0] = 0;
ISFLAG(Af, PublicKey);
ISFLAG(Af, Retargetable);
ISFLAG(AfContentType_, WindowsRuntime);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\tFlags : %s (%08x)", sFlags, dwFlags);
DisplayCustomAttributes(mda, "\t");
DisplayPermissions(mda, "\t");
WriteLine("");
} // void MDInfo::DisplayAssemblyInfo()
void MDInfo::DisplayAssemblyRefs()
{
HCORENUM assemblyRefEnum = NULL;
mdAssemblyRef AssemblyRefs[ENUM_BUFFER_SIZE];
ULONG count;
ULONG totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pAssemblyImport->EnumAssemblyRefs( &assemblyRefEnum,
AssemblyRefs, ARRAY_SIZE(AssemblyRefs), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("AssemblyRef #%d (%08x)", totalCount, AssemblyRefs[i]);
WriteLine("-------------------------------------------------------");
DisplayAssemblyRefInfo(AssemblyRefs[i]);
WriteLine("");
}
}
m_pAssemblyImport->CloseEnum(assemblyRefEnum);
} // void MDInfo::DisplayAssemblyRefs()
void MDInfo::DisplayAssemblyRefInfo(mdAssemblyRef inAssemblyRef)
{
HRESULT hr;
const BYTE *pbPublicKeyOrToken;
ULONG cbPublicKeyOrToken;
WCHAR szName[STRING_BUFFER_LEN];
ASSEMBLYMETADATA MetaData;
const BYTE *pbHashValue;
ULONG cbHashValue;
DWORD dwFlags;
VWriteLine("\tToken: 0x%08x", inAssemblyRef);
// Get sizes for the arrays in the ASSEMBLYMETADATA structure.
ZeroMemory(&MetaData, sizeof(ASSEMBLYMETADATA));
hr = m_pAssemblyImport->GetAssemblyRefProps(inAssemblyRef,
NULL, NULL, // Public Key or Token.
NULL, 0, NULL, // Name.
&MetaData,
NULL, NULL, // HashValue.
NULL); // Flags.
if (FAILED(hr)) Error("GetAssemblyRefProps() failed.", hr);
// Allocate space for the arrays in the ASSEMBLYMETADATA structure.
if (MetaData.cbLocale)
MetaData.szLocale = new WCHAR[MetaData.cbLocale];
if (MetaData.ulProcessor)
MetaData.rProcessor = new DWORD[MetaData.ulProcessor];
if (MetaData.ulOS)
MetaData.rOS = new OSINFO[MetaData.ulOS];
hr = m_pAssemblyImport->GetAssemblyRefProps(inAssemblyRef,
(const void **)&pbPublicKeyOrToken, &cbPublicKeyOrToken,
szName, STRING_BUFFER_LEN, NULL,
&MetaData,
(const void **)&pbHashValue, &cbHashValue,
&dwFlags);
if (FAILED(hr)) Error("GetAssemblyRefProps() failed.", hr);
DumpHex("\tPublic Key or Token", pbPublicKeyOrToken, cbPublicKeyOrToken, false, 24);
VWriteLine("\tName: %ls", szName);
DisplayASSEMBLYMETADATA(&MetaData);
if(MetaData.szLocale) delete [] MetaData.szLocale;
if(MetaData.rProcessor) delete [] MetaData.rProcessor;
if(MetaData.rOS) delete [] MetaData.rOS;
DumpHex("\tHashValue Blob", pbHashValue, cbHashValue, false, 24);
char sFlags[STRING_BUFFER_LEN];
DWORD flags = dwFlags;
sFlags[0] = 0;
ISFLAG(Af, PublicKey);
ISFLAG(Af, Retargetable);
ISFLAG(AfContentType_, WindowsRuntime);
#if 0
ISFLAG(Af, LegacyLibrary);
ISFLAG(Af, LegacyPlatform);
ISFLAG(Af, Library);
ISFLAG(Af, Platform);
#endif
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\tFlags: %s (%08x)", sFlags, dwFlags);
DisplayCustomAttributes(inAssemblyRef, "\t");
WriteLine("");
} // void MDInfo::DisplayAssemblyRefInfo()
void MDInfo::DisplayFiles()
{
HCORENUM fileEnum = NULL;
mdFile Files[ENUM_BUFFER_SIZE];
ULONG count;
ULONG totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pAssemblyImport->EnumFiles( &fileEnum,
Files, ARRAY_SIZE(Files), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("File #%d (%08x)", totalCount, Files[i]);
WriteLine("-------------------------------------------------------");
DisplayFileInfo(Files[i]);
WriteLine("");
}
}
m_pAssemblyImport->CloseEnum(fileEnum);
} // void MDInfo::DisplayFiles()
void MDInfo::DisplayFileInfo(mdFile inFile)
{
HRESULT hr;
WCHAR szName[STRING_BUFFER_LEN];
const BYTE *pbHashValue;
ULONG cbHashValue;
DWORD dwFlags;
VWriteLine("\tToken: 0x%08x", inFile);
hr = m_pAssemblyImport->GetFileProps(inFile,
szName, STRING_BUFFER_LEN, NULL,
(const void **)&pbHashValue, &cbHashValue,
&dwFlags);
if (FAILED(hr)) Error("GetFileProps() failed.", hr);
VWriteLine("\tName : %ls", szName);
DumpHex("\tHashValue Blob ", pbHashValue, cbHashValue, false, 24);
char sFlags[STRING_BUFFER_LEN];
DWORD flags = dwFlags;
sFlags[0] = 0;
ISFLAG(Ff, ContainsMetaData);
ISFLAG(Ff, ContainsNoMetaData);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\tFlags : %s (%08x)", sFlags, dwFlags);
DisplayCustomAttributes(inFile, "\t");
WriteLine("");
} // MDInfo::DisplayFileInfo()
void MDInfo::DisplayExportedTypes()
{
HCORENUM comTypeEnum = NULL;
mdExportedType ExportedTypes[ENUM_BUFFER_SIZE];
ULONG count;
ULONG totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pAssemblyImport->EnumExportedTypes( &comTypeEnum,
ExportedTypes, ARRAY_SIZE(ExportedTypes), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("ExportedType #%d (%08x)", totalCount, ExportedTypes[i]);
WriteLine("-------------------------------------------------------");
DisplayExportedTypeInfo(ExportedTypes[i]);
WriteLine("");
}
}
m_pAssemblyImport->CloseEnum(comTypeEnum);
} // void MDInfo::DisplayExportedTypes()
void MDInfo::DisplayExportedTypeInfo(mdExportedType inExportedType)
{
HRESULT hr;
WCHAR szName[STRING_BUFFER_LEN];
mdToken tkImplementation;
mdTypeDef tkTypeDef;
DWORD dwFlags;
char sFlags[STRING_BUFFER_LEN];
VWriteLine("\tToken: 0x%08x", inExportedType);
hr = m_pAssemblyImport->GetExportedTypeProps(inExportedType,
szName, STRING_BUFFER_LEN, NULL,
&tkImplementation,
&tkTypeDef,
&dwFlags);
if (FAILED(hr)) Error("GetExportedTypeProps() failed.", hr);
VWriteLine("\tName: %ls", szName);
VWriteLine("\tImplementation token: 0x%08x", tkImplementation);
VWriteLine("\tTypeDef token: 0x%08x", tkTypeDef);
VWriteLine("\tFlags : %s (%08x)",ClassFlags(dwFlags, sFlags), dwFlags);
DisplayCustomAttributes(inExportedType, "\t");
WriteLine("");
} // void MDInfo::DisplayExportedTypeInfo()
void MDInfo::DisplayManifestResources()
{
HCORENUM manifestResourceEnum = NULL;
mdManifestResource ManifestResources[ENUM_BUFFER_SIZE];
ULONG count;
ULONG totalCount = 1;
HRESULT hr;
while (SUCCEEDED(hr = m_pAssemblyImport->EnumManifestResources( &manifestResourceEnum,
ManifestResources, ARRAY_SIZE(ManifestResources), &count)) &&
count > 0)
{
for (ULONG i = 0; i < count; i++, totalCount++)
{
VWriteLine("ManifestResource #%d (%08x)", totalCount, ManifestResources[i]);
WriteLine("-------------------------------------------------------");
DisplayManifestResourceInfo(ManifestResources[i]);
WriteLine("");
}
}
m_pAssemblyImport->CloseEnum(manifestResourceEnum);
} // void MDInfo::DisplayManifestResources()
void MDInfo::DisplayManifestResourceInfo(mdManifestResource inManifestResource)
{
HRESULT hr;
WCHAR szName[STRING_BUFFER_LEN];
mdToken tkImplementation;
DWORD dwOffset;
DWORD dwFlags;
VWriteLine("\tToken: 0x%08x", inManifestResource);
hr = m_pAssemblyImport->GetManifestResourceProps(inManifestResource,
szName, STRING_BUFFER_LEN, NULL,
&tkImplementation,
&dwOffset,
&dwFlags);
if (FAILED(hr)) Error("GetManifestResourceProps() failed.", hr);
VWriteLine("Name: %ls", szName);
VWriteLine("Implementation token: 0x%08x", tkImplementation);
VWriteLine("Offset: 0x%08x", dwOffset);
char sFlags[STRING_BUFFER_LEN];
DWORD flags = dwFlags;
sFlags[0] = 0;
ISFLAG(Mr, Public);
ISFLAG(Mr, Private);
if (!*sFlags)
strcpy_s(sFlags, STRING_BUFFER_LEN, "[none]");
VWriteLine("\tFlags: %s (%08x)", sFlags, dwFlags);
DisplayCustomAttributes(inManifestResource, "\t");
WriteLine("");
} // void MDInfo::DisplayManifestResourceInfo()
void MDInfo::DisplayASSEMBLYMETADATA(ASSEMBLYMETADATA *pMetaData)
{
ULONG i;
VWriteLine("\tVersion: %d.%d.%d.%d", pMetaData->usMajorVersion, pMetaData->usMinorVersion, pMetaData->usBuildNumber, pMetaData->usRevisionNumber);
VWriteLine("\tMajor Version: 0x%08x", pMetaData->usMajorVersion);
VWriteLine("\tMinor Version: 0x%08x", pMetaData->usMinorVersion);
VWriteLine("\tBuild Number: 0x%08x", pMetaData->usBuildNumber);
VWriteLine("\tRevision Number: 0x%08x", pMetaData->usRevisionNumber);
VWriteLine("\tLocale: %ls", pMetaData->cbLocale ? pMetaData->szLocale : W("<null>"));
for (i = 0; i < pMetaData->ulProcessor; i++)
VWriteLine("\tProcessor #%ld: 0x%08x", i+1, pMetaData->rProcessor[i]);
for (i = 0; i < pMetaData->ulOS; i++)
{
VWriteLine("\tOS #%ld:", i+1);
VWriteLine("\t\tOS Platform ID: 0x%08x", pMetaData->rOS[i].dwOSPlatformId);
VWriteLine("\t\tOS Major Version: 0x%08x", pMetaData->rOS[i].dwOSMajorVersion);
VWriteLine("\t\tOS Minor Version: 0x%08x", pMetaData->rOS[i].dwOSMinorVersion);
}
} // void MDInfo::DisplayASSEMBLYMETADATA()
void MDInfo::DisplayUserStrings()
{
HCORENUM stringEnum = NULL; // string enumerator.
mdString Strings[ENUM_BUFFER_SIZE]; // String tokens from enumerator.
CQuickArray<WCHAR> rUserString; // Buffer to receive string.
WCHAR *szUserString; // Working pointer into buffer.
ULONG chUserString; // Size of user string.
CQuickArray<char> rcBuf; // Buffer to hold the BLOB version of the string.
char *szBuf; // Working pointer into buffer.
ULONG chBuf; // Saved size of the user string.
ULONG count; // Items returned from enumerator.
ULONG totalCount = 1; // Running count of strings.
bool bUnprint = false; // Is an unprintable character found?
HRESULT hr; // A result.
while (SUCCEEDED(hr = m_pImport->EnumUserStrings( &stringEnum,
Strings, ARRAY_SIZE(Strings), &count)) &&
count > 0)
{
if (totalCount == 1)
{ // If only one, it is the NULL string, so don't print it.
WriteLine("User Strings");
WriteLine("-------------------------------------------------------");
}
for (ULONG i = 0; i < count; i++, totalCount++)
{
do { // Try to get the string into the existing buffer.
hr = m_pImport->GetUserString( Strings[i], rUserString.Ptr(),(ULONG32)rUserString.MaxSize(), &chUserString);
if (hr == CLDB_S_TRUNCATION)
{ // Buffer wasn't big enough, try to enlarge it.
if (FAILED(rUserString.ReSizeNoThrow(chUserString)))
Error("malloc failed.", E_OUTOFMEMORY);
continue;
}
} while (hr == CLDB_S_TRUNCATION);
if (FAILED(hr)) Error("GetUserString failed.", hr);
szUserString = rUserString.Ptr();
chBuf = chUserString;
VWrite("%08x : (%2d) L\"", Strings[i], chUserString);
for (ULONG j=0; j<chUserString; j++)
{
switch (*szUserString)
{
case 0:
Write("\\0"); break;
case L'\r':
Write("\\r"); break;
case L'\n':
Write("\\n"); break;
case L'\t':
Write("\\t"); break;
default:
if (iswprint(*szUserString))
VWrite("%lc", *szUserString);
else
{
bUnprint = true;
Write(".");
}
break;
}
++szUserString;
if((j>0)&&((j&0x7F)==0)) WriteLine("");
}
WriteLine("\"");
// Print the user string as a blob if an unprintable character is found.
if (bUnprint)
{
bUnprint = false;
szUserString = rUserString.Ptr();
if (FAILED(hr = rcBuf.ReSizeNoThrow(81))) //(chBuf * 5 + 1);
Error("ReSize failed.", hr);
szBuf = rcBuf.Ptr();
ULONG j,k;
WriteLine("\t\tUser string has unprintables, hex format below:");
for (j = 0,k=0; j < chBuf; j++)
{
sprintf_s (&szBuf[k*5], 81, "%04x ", szUserString[j]);
k++;
if((k==16)||(j == (chBuf-1)))
{
szBuf[k*5] = '\0';
VWriteLine("\t\t%s", szBuf);
k=0;
}
}
}
}
}
if (stringEnum)
m_pImport->CloseEnum(stringEnum);
} // void MDInfo::DisplayUserStrings()
void MDInfo::DisplayUnsatInfo()
{
HRESULT hr = S_OK;
HCORENUM henum = 0;
mdToken tk;
ULONG cMethods;
Write("\nUnresolved Externals\n");
Write("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
while ( (hr = m_pImport->EnumUnresolvedMethods(
&henum,
&tk,
1,
&cMethods)) == S_OK && cMethods )
{
if ( TypeFromToken(tk) == mdtMethodDef )
{
// a method definition without implementation
DisplayMethodInfo( tk );
}
else if ( TypeFromToken(tk) == mdtMemberRef )
{
// an unresolved MemberRef to a global function
DisplayMemberRefInfo( tk, "" );
}
else
{
_ASSERTE(!"Unknown token kind!");
}
}
m_pImport->CloseEnum(henum);
} // void MDInfo::DisplayUnsatInfo()
//*******************************************************************************
// This code is used for debugging purposes only. This will just print out the
// entire database.
//*******************************************************************************
const char *MDInfo::DumpRawNameOfType(ULONG iType)
{
if (iType <= iRidMax)
{
const char *pNameTable;
m_pTables->GetTableInfo(iType, 0,0,0,0, &pNameTable);
return pNameTable;
}
else
// Is the field a coded token?
if (iType <= iCodedTokenMax)
{
int iCdTkn = iType - iCodedToken;
const char *pNameCdTkn;
m_pTables->GetCodedTokenInfo(iCdTkn, 0,0, &pNameCdTkn);
return pNameCdTkn;
}
// Fixed type.
switch (iType)
{
case iBYTE:
return "BYTE";
case iSHORT:
return "short";
case iUSHORT:
return "USHORT";
case iLONG:
return "long";
case iULONG:
return "ULONG";
case iSTRING:
return "string";
case iGUID:
return "GUID";
case iBLOB:
return "blob";
}
// default:
static char buf[30];
sprintf_s(buf, 30, "unknown type 0x%02x", iType);
return buf;
} // const char *MDInfo::DumpRawNameOfType()
void MDInfo::DumpRawCol(ULONG ixTbl, ULONG ixCol, ULONG rid, bool bStats)
{
ULONG ulType; // Type of a column.
ULONG ulVal; // Value of a column.
LPCUTF8 pString; // Pointer to a string.
const void *pBlob; // Pointer to a blob.
ULONG cb; // Size of something.
m_pTables->GetColumn(ixTbl, ixCol, rid, &ulVal);
m_pTables->GetColumnInfo(ixTbl, ixCol, 0, 0, &ulType, 0);
if (ulType <= iRidMax)
{
const char *pNameTable;
m_pTables->GetTableInfo(ulType, 0,0,0,0, &pNameTable);
VWrite("%s[%x]", pNameTable, ulVal);
}
else
// Is the field a coded token?
if (ulType <= iCodedTokenMax)
{
int iCdTkn = ulType - iCodedToken;
const char *pNameCdTkn;
m_pTables->GetCodedTokenInfo(iCdTkn, 0,0, &pNameCdTkn);
VWrite("%s[%08x]", pNameCdTkn, ulVal);
}
else
{
// Fixed type.
switch (ulType)
{
case iBYTE:
VWrite("%02x", ulVal);
break;
case iSHORT:
case iUSHORT:
VWrite("%04x", ulVal);
break;
case iLONG:
case iULONG:
VWrite("%08x", ulVal);
break;
case iSTRING:
if (ulVal && (m_DumpFilter & dumpNames))
{
m_pTables->GetString(ulVal, &pString);
VWrite("(%x)\"%s\"", ulVal, pString);
}
else
VWrite("string#%x", ulVal);
if (bStats && ulVal)
{
m_pTables->GetString(ulVal, &pString);
cb = (ULONG) strlen(pString) + 1;
VWrite("(%d)", cb);
}
break;
case iGUID:
VWrite("guid#%x", ulVal);
if (bStats && ulVal)
{
VWrite("(16)");
}
break;
case iBLOB:
VWrite("blob#%x", ulVal);
if (bStats && ulVal)
{
m_pTables->GetBlob(ulVal, &cb, &pBlob);
cb += 1;
if (cb > 128)
cb += 1;
if (cb > 16535)
cb += 1;
VWrite("(%d)", cb);
}
break;
default:
VWrite("unknown type 0x%04x", ulVal);
break;
}
}
} // void MDInfo::DumpRawCol()
ULONG MDInfo::DumpRawColStats(ULONG ixTbl, ULONG ixCol, ULONG cRows)
{
ULONG rslt = 0;
ULONG ulType; // Type of a column.
ULONG ulVal; // Value of a column.
LPCUTF8 pString; // Pointer to a string.
const void *pBlob; // Pointer to a blob.
ULONG cb; // Size of something.
m_pTables->GetColumnInfo(ixTbl, ixCol, 0, 0, &ulType, 0);
if (IsHeapType(ulType))
{
for (ULONG rid=1; rid<=cRows; ++rid)
{
m_pTables->GetColumn(ixTbl, ixCol, rid, &ulVal);
// Fixed type.
switch (ulType)
{
case iSTRING:
if (ulVal)
{
m_pTables->GetString(ulVal, &pString);
cb = (ULONG) strlen(pString);
rslt += cb + 1;
}
break;
case iGUID:
if (ulVal)
rslt += 16;
break;
case iBLOB:
if (ulVal)
{
m_pTables->GetBlob(ulVal, &cb, &pBlob);
rslt += cb + 1;
if (cb > 128)
rslt += 1;
if (cb > 16535)
rslt += 1;
}
break;
default:
break;
}
}
}
return rslt;
} // ULONG MDInfo::DumpRawColStats()
int MDInfo::DumpHex(
const char *szPrefix, // String prefix for first line.
const void *pvData, // The data to print.
ULONG cbData, // Bytes of data to print.
int bText, // If true, also dump text.
ULONG nLine) // Bytes per line to print.
{
const BYTE *pbData = static_cast<const BYTE*>(pvData);
ULONG i; // Loop control.
ULONG nPrint; // Number to print in an iteration.
ULONG nSpace; // Spacing calculations.
ULONG nPrefix; // Size of the prefix.
ULONG nLines=0; // Number of lines printed.
const char *pPrefix; // For counting spaces in the prefix.
// Round down to 8 characters.
nLine = nLine & ~0x7;
for (nPrefix=0, pPrefix=szPrefix; *pPrefix; ++pPrefix)
{
if (*pPrefix == '\t')
nPrefix = (nPrefix + 8) & ~7;
else
++nPrefix;
}
//nPrefix = strlen(szPrefix);
do
{ // Write the line prefix.
if (szPrefix)
VWrite("%s:", szPrefix);
else
VWrite("%*s:", nPrefix, "");
szPrefix = 0;
++nLines;
// Calculate spacing.
nPrint = min(cbData, nLine);
nSpace = nLine - nPrint;
// dump in hex.
for(i=0; i<nPrint; i++)
{
if ((i&7) == 0)
Write(" ");
VWrite("%02x ", pbData[i]);
}
if (bText)
{
// Space out to the text spot.
if (nSpace)
VWrite("%*s", nSpace*3+nSpace/8, "");
// Dump in text.
Write(">");
for(i=0; i<nPrint; i++)
VWrite("%c", (isprint(pbData[i])) ? pbData[i] : ' ');
// Space out the text, and finish the line.
VWrite("%*s<", nSpace, "");
}
VWriteLine("");
// Next data to print.
cbData -= nPrint;
pbData += nPrint;
}
while (cbData > 0);
return nLines;
} // int MDInfo::DumpHex()
void MDInfo::DumpRawHeaps()
{
HRESULT hr; // A result.
ULONG ulSize; // Bytes in a heap.
const BYTE *pData; // Pointer to a blob.
ULONG cbData; // Size of a blob.
ULONG oData; // Offset of current blob.
char rcPrefix[30]; // To format line prefix.
m_pTables->GetBlobHeapSize(&ulSize);
VWriteLine("");
VWriteLine("Blob Heap: %d(%#x) bytes", ulSize,ulSize);
oData = 0;
do
{
m_pTables->GetBlob(oData, &cbData, (const void**)&pData);
sprintf_s(rcPrefix, 30, "%5x,%-2x", oData, cbData);
DumpHex(rcPrefix, pData, cbData);
hr = m_pTables->GetNextBlob(oData, &oData);
}
while (hr == S_OK);
m_pTables->GetStringHeapSize(&ulSize);
VWriteLine("");
VWriteLine("String Heap: %d(%#x) bytes", ulSize,ulSize);
oData = 0;
const char *pString;
do
{
m_pTables->GetString(oData, &pString);
if (m_DumpFilter & dumpMoreHex)
{
sprintf_s(rcPrefix, 30, "%08x", oData);
DumpHex(rcPrefix, pString, (ULONG)strlen(pString)+1);
}
else
if (*pString != 0)
VWrite("%08x: %s\n", oData, pString);
hr = m_pTables->GetNextString(oData, &oData);
}
while (hr == S_OK);
VWriteLine("");
DisplayUserStrings();
} // void MDInfo::DumpRawHeaps()
void MDInfo::DumpRaw(int iDump, bool bunused)
{
ULONG cTables; // Tables in the database.
ULONG cCols; // Columns in a table.
ULONG cRows; // Rows in a table.
ULONG cbRow; // Bytes in a row of a table.
ULONG iKey; // Key column of a table.
const char *pNameTable; // Name of a table.
ULONG oCol; // Offset of a column.
ULONG cbCol; // Size of a column.
ULONG ulType; // Type of a column.
const char *pNameColumn; // Name of a column.
ULONG ulSize;
// Heaps is easy -- there is a specific bit for that.
bool bStats = (m_DumpFilter & dumpStats) != 0;
// Rows are harder. Was there something else that limited data?
BOOL bRows = (m_DumpFilter & (dumpSchema | dumpHeader)) == 0;
BOOL bSchema = bRows || (m_DumpFilter & dumpSchema);
// (m_DumpFilter & (dumpSchema | dumpHeader | dumpCSV | dumpRaw | dumpStats | dumpRawHeaps))
if (m_pTables2)
{
// Get the raw metadata header.
const BYTE *pbData = NULL;
const BYTE *pbStream = NULL; // One of the stream.s
const BYTE *pbMd = NULL; // The metadata stream.
ULONG cbData = 0;
ULONG cbStream = 0; // One of the streams.
ULONG cbMd = 0; // The metadata stream.
const char *pName;
HRESULT hr = S_OK;
ULONG ix;
m_pTables2->GetMetaDataStorage((const void**)&pbData, &cbData);
// Per the ECMA spec, the section data looks like this:
struct MDSTORAGESIGNATURE
{
ULONG lSignature; // "Magic" signature.
USHORT iMajorVer; // Major file version.
USHORT iMinorVer; // Minor file version.
ULONG iExtraData; // Offset to next structure of information
ULONG iVersionString; // Length of version string
BYTE pVersion[0]; // Version string
};
struct MDSTORAGEHEADER
{
BYTE fFlags; // STGHDR_xxx flags.
BYTE pad;
USHORT iStreams; // How many streams are there.
};
const MDSTORAGESIGNATURE *pStorage = (const MDSTORAGESIGNATURE *) pbData;
const MDSTORAGEHEADER *pSHeader = (const MDSTORAGEHEADER *)(pbData + sizeof(MDSTORAGESIGNATURE) + pStorage->iVersionString);
VWriteLine("Metadata section: 0x%08x, version: %d.%d, extra: %d, version len: %d, version: %s", pStorage->lSignature, pStorage->iMajorVer, pStorage->iMinorVer, pStorage->iExtraData, pStorage->iVersionString, pStorage->pVersion);
VWriteLine(" flags: 0x%02x, streams: %d", pSHeader->fFlags, pSHeader->iStreams);
if (m_DumpFilter & dumpMoreHex)
{
const BYTE *pbEnd = pbData;
ULONG cb = sizeof(MDSTORAGESIGNATURE) + pStorage->iVersionString + sizeof(MDSTORAGEHEADER);
hr = m_pTables2->GetMetaDataStreamInfo(0, &pName, (const void**)&pbEnd, &cbStream);
if (hr == S_OK)
cb = (ULONG)(pbEnd - pbData);
DumpHex(" ", pbData, cb);
}
for (ix=0; hr == S_OK; ++ix)
{
hr = m_pTables2->GetMetaDataStreamInfo(ix, &pName, (const void**)&pbStream, &cbStream);
if (hr != S_OK)
break;
if (strcmp(pName, "#~") == 0 || strcmp(pName, "#-") == 0)
{
pbMd = pbStream;
cbMd = cbStream;
}
VWriteLine("Stream %d: name: %s, size %d", ix, pName, cbStream);
// hex for individual stream headers in metadata section dump. hex for
// the streams themselves distributed throughout the dump.
}
if (pbMd)
{
// Per ECMA, the metadata header looks like this:
struct MD
{
ULONG m_ulReserved; // Reserved, must be zero.
BYTE m_major; // Version numbers.
BYTE m_minor;
BYTE m_heaps; // Bits for heap sizes.
BYTE m_rid; // log-base-2 of largest rid.
unsigned __int64 m_maskvalid; // Bit mask of present table counts.
unsigned __int64 m_sorted; // Bit mask of sorted tables. };
};
const MD *pMd;
pMd = (const MD *)pbMd;
VWriteLine("Metadata header: %d.%d, heaps: 0x%02x, rid: 0x%02x, valid: 0x%016I64x, sorted: 0x%016I64x",
pMd->m_major, pMd->m_minor, pMd->m_heaps, pMd->m_rid,
(ULONGLONG)GET_UNALIGNED_VAL64(&(pMd->m_maskvalid)),
(ULONGLONG)GET_UNALIGNED_VAL64(&(pMd->m_sorted)));
if (m_DumpFilter & dumpMoreHex)
{
DumpHex(" ", pbMd, sizeof(MD));
}
}
VWriteLine("");
}
m_pTables->GetNumTables(&cTables);
m_pTables->GetStringHeapSize(&ulSize);
VWrite("Strings: %d(%#x)", ulSize, ulSize);
m_pTables->GetBlobHeapSize(&ulSize);
VWrite(", Blobs: %d(%#x)", ulSize, ulSize);
m_pTables->GetGuidHeapSize(&ulSize);
VWrite(", Guids: %d(%#x)", ulSize, ulSize);
m_pTables->GetUserStringHeapSize(&ulSize);
VWriteLine(", User strings: %d(%#x)", ulSize, ulSize);
for (ULONG ixTbl = 0; ixTbl < cTables; ++ixTbl)
{
m_pTables->GetTableInfo(ixTbl, &cbRow, &cRows, &cCols, &iKey, &pNameTable);
if (bRows) // when dumping rows, print a break between row data and schema
VWriteLine("=================================================");
VWriteLine("%2d(%#x): %-20s cRecs:%5d(%#x), cbRec:%3d(%#x), cbTable:%6d(%#x)",
ixTbl, ixTbl, pNameTable, cRows, cRows, cbRow, cbRow, cbRow * cRows, cbRow * cRows);
if (!bSchema && !bRows)
continue;
// Dump column definitions for the table.
ULONG ixCol;
for (ixCol=0; ixCol<cCols; ++ixCol)
{
m_pTables->GetColumnInfo(ixTbl, ixCol, &oCol, &cbCol, &ulType, &pNameColumn);
VWrite(" col %2x:%c %-12s oCol:%2x, cbCol:%x, %-7s",
ixCol, ((ixCol==iKey)?'*':' '), pNameColumn, oCol, cbCol, DumpRawNameOfType(ulType));
if (bStats)
{
ulSize = DumpRawColStats(ixTbl, ixCol, cRows);
if (ulSize)
VWrite("(%d)", ulSize);
}
VWriteLine("");
}
if (!bRows)
continue;
// Dump the rows.
for (ULONG rid = 1; rid <= cRows; ++rid)
{
if (rid == 1)
VWriteLine("-------------------------------------------------");
VWrite(" %3x == ", rid);
for (ixCol=0; ixCol < cCols; ++ixCol)
{
if (ixCol) VWrite(", ");
VWrite("%d:", ixCol);
DumpRawCol(ixTbl, ixCol, rid, bStats);
}
VWriteLine("");
}
}
} // void MDInfo::DumpRaw()
void MDInfo::DumpRawCSV()
{
ULONG cTables; // Tables in the database.
ULONG cCols; // Columns in a table.
ULONG cRows; // Rows in a table.
ULONG cbRow; // Bytes in a row of a table.
const char *pNameTable; // Name of a table.
ULONG ulSize;
m_pTables->GetNumTables(&cTables);
VWriteLine("Name,Size,cRecs,cbRec");
m_pTables->GetStringHeapSize(&ulSize);
VWriteLine("Strings,%d", ulSize);
m_pTables->GetBlobHeapSize(&ulSize);
VWriteLine("Blobs,%d", ulSize);
m_pTables->GetGuidHeapSize(&ulSize);
VWriteLine("Guids,%d", ulSize);
for (ULONG ixTbl = 0; ixTbl < cTables; ++ixTbl)
{
m_pTables->GetTableInfo(ixTbl, &cbRow, &cRows, &cCols, NULL, &pNameTable);
VWriteLine("%s,%d,%d,%d", pNameTable, cbRow*cRows, cRows, cbRow);
}
} // void MDInfo::DumpRawCSV()
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/swprintf/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: General test to see if swprintf works correctly
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swprintf.h"
/*
* Uses memcmp & wcslen
*/
PALTEST(c_runtime_swprintf_test1_paltest_swprintf_test1, "c_runtime/swprintf/test1/paltest_swprintf_test1")
{
WCHAR *checkstr;
WCHAR buf[256];
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
checkstr = convert("hello world");
swprintf_s(buf, ARRAY_SIZE(buf), convert("hello world"));
if (memcmp(checkstr, buf, wcslen(checkstr)*2+2) != 0)
{
Fail("ERROR: Expected \"%s\", got \"%s\".\n", "hello world",
convertC(buf));
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: General test to see if swprintf works correctly
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swprintf.h"
/*
* Uses memcmp & wcslen
*/
PALTEST(c_runtime_swprintf_test1_paltest_swprintf_test1, "c_runtime/swprintf/test1/paltest_swprintf_test1")
{
WCHAR *checkstr;
WCHAR buf[256];
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
checkstr = convert("hello world");
swprintf_s(buf, ARRAY_SIZE(buf), convert("hello world"));
if (memcmp(checkstr, buf, wcslen(checkstr)*2+2) != 0)
{
Fail("ERROR: Expected \"%s\", got \"%s\".\n", "hello world",
convertC(buf));
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/threading/QueueUserAPC/test7/test7.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================================
**
** Source: test7.c
**
** Dependencies: PAL_Initialize
** PAL_Terminate
** CreateEvent
** SetEvent
** CreateThread
** ResumeThread
** WaitForMultipleObjectsEx
** CloseHandle
**
** Purpose:
**
** Test to ensure proper operation of the QueueUserAPC()
** API by trying to queue an APC function on a thread and
** activating it with WaitForMultipleObjectsEx.
**
**
**===========================================================================*/
#include <palsuite.h>
static HANDLE hSyncEvent_QueueUserAPC_test7 = NULL;
static HANDLE hTestEvent_QueueUserAPC_test7 = NULL;
static int nAPCExecuted_QueueUserAPC_test7 = 0;
static BOOL bThreadResult_QueueUserAPC_test7 = FALSE;
VOID PALAPI APCFunc_QueueUserAPC_test7( ULONG_PTR dwParam )
{
++nAPCExecuted_QueueUserAPC_test7;
}
/**
* ThreadFunc
*
* Dummy thread function for APC queuing.
*/
DWORD PALAPI ThreadFunc_QueueUserAPC_test7( LPVOID param )
{
DWORD ret = 0;
/* pessimism */
bThreadResult_QueueUserAPC_test7 = FALSE;
/* set the sync event to notify the main thread */
if( ! SetEvent( hSyncEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
goto done;
}
/* wait until the test event is signalled */
ret = WaitForSingleObject( hTestEvent_QueueUserAPC_test7, INFINITE );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject() returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto done;
}
/* now do an alertable wait on the same event, which is now
in an unsignalled state */
ret = WaitForMultipleObjectsEx( 1, &hTestEvent_QueueUserAPC_test7, TRUE, 2000, TRUE );
/* verify that we got a WAIT_IO_COMPLETION result */
if( ret != WAIT_IO_COMPLETION )
{
Trace( "ERROR:WaitForMultipleObjectsEx returned %lu, "
"expected WAIT_IO_COMPLETION\n",
ret );
goto done;
}
/* set the event again */
if( ! SetEvent( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
goto done;
}
/* do a non-alertable wait on the same event */
ret = WaitForMultipleObjectsEx( 1, &hTestEvent_QueueUserAPC_test7, TRUE, INFINITE, FALSE );
/* verify that we got a WAIT_OBJECT_0 result */
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForMultipleObjectsEx returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto done;
}
/* success at this point */
bThreadResult_QueueUserAPC_test7 = TRUE;
done:
return bThreadResult_QueueUserAPC_test7;
}
PALTEST(threading_QueueUserAPC_test7_paltest_queueuserapc_test7, "threading/QueueUserAPC/test7/paltest_queueuserapc_test7")
{
/* local variables */
HANDLE hThread = NULL;
DWORD IDThread;
DWORD ret;
BOOL bResult = FALSE;
/* PAL initialization */
if( (PAL_Initialize(argc, argv)) != 0 )
{
return( FAIL );
}
/* create an auto-reset event for the other thread to wait on */
hTestEvent_QueueUserAPC_test7 = CreateEvent( NULL, FALSE, FALSE, NULL );
if( hTestEvent_QueueUserAPC_test7 == NULL )
{
Fail( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() );
}
/* create an auto-reset event for synchronization */
hSyncEvent_QueueUserAPC_test7 = CreateEvent( NULL, FALSE, FALSE, NULL );
if( hSyncEvent_QueueUserAPC_test7 == NULL )
{
Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() );
if( ! CloseHandle( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() );
}
Fail( "test failed\n" );
}
/* run another dummy thread to cause notification of the library */
hThread = CreateThread( NULL, /* no security attributes */
0, /* use default stack size */
(LPTHREAD_START_ROUTINE) ThreadFunc_QueueUserAPC_test7, /* thread function */
(LPVOID) NULL, /* pass thread index as */
/* function argument */
CREATE_SUSPENDED, /* create suspended */
&IDThread ); /* returns thread id */
/* Check the return value for success. */
if( hThread == NULL )
{
/* error creating thread */
Trace( "ERROR:%lu:CreateThread call failed\n", GetLastError() );
if( ! CloseHandle( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
}
Fail( "test failed\n" );
}
/* Resume the suspended thread */
ResumeThread( hThread );
/* wait until the other thread is ready to proceed */
ret = WaitForSingleObject( hSyncEvent_QueueUserAPC_test7, 10000 );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto cleanup;
}
/* now queue our APC on the test thread */
ret = QueueUserAPC( APCFunc_QueueUserAPC_test7, hThread, 0 );
if( ret == 0 )
{
Trace( "ERROR:%lu:QueueUserAPC call failed\n", GetLastError() );
goto cleanup;
}
/* signal the test event so the other thread will proceed */
if( ! SetEvent( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
goto cleanup;
}
/* wait on the other thread to complete */
ret = WaitForSingleObject( hThread, INFINITE );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject() returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto cleanup;
}
/* check the result of the other thread */
if( bThreadResult_QueueUserAPC_test7 == FALSE )
{
goto cleanup;
}
/* check that the APC function was actually executed exactly one time */
if( nAPCExecuted_QueueUserAPC_test7 != 1 )
{
Trace( "ERROR:APC function was executed %d times, "
"expected once\n", nAPCExecuted_QueueUserAPC_test7 );
goto cleanup;
}
/* set the success flag */
bResult = PASS;
cleanup:
/* close the global event handles */
if( ! CloseHandle( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
bResult = FAIL;
}
if( ! CloseHandle( hSyncEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
bResult = FAIL;
}
/* close the thread handle */
if( ! CloseHandle( hThread ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
bResult = FAIL;
}
/* output final failure result for failure case */
if( bResult == FAIL )
{
Fail( "test failed\n" );
}
/* PAL termination */
PAL_Terminate();
/* return success */
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================================
**
** Source: test7.c
**
** Dependencies: PAL_Initialize
** PAL_Terminate
** CreateEvent
** SetEvent
** CreateThread
** ResumeThread
** WaitForMultipleObjectsEx
** CloseHandle
**
** Purpose:
**
** Test to ensure proper operation of the QueueUserAPC()
** API by trying to queue an APC function on a thread and
** activating it with WaitForMultipleObjectsEx.
**
**
**===========================================================================*/
#include <palsuite.h>
static HANDLE hSyncEvent_QueueUserAPC_test7 = NULL;
static HANDLE hTestEvent_QueueUserAPC_test7 = NULL;
static int nAPCExecuted_QueueUserAPC_test7 = 0;
static BOOL bThreadResult_QueueUserAPC_test7 = FALSE;
VOID PALAPI APCFunc_QueueUserAPC_test7( ULONG_PTR dwParam )
{
++nAPCExecuted_QueueUserAPC_test7;
}
/**
* ThreadFunc
*
* Dummy thread function for APC queuing.
*/
DWORD PALAPI ThreadFunc_QueueUserAPC_test7( LPVOID param )
{
DWORD ret = 0;
/* pessimism */
bThreadResult_QueueUserAPC_test7 = FALSE;
/* set the sync event to notify the main thread */
if( ! SetEvent( hSyncEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
goto done;
}
/* wait until the test event is signalled */
ret = WaitForSingleObject( hTestEvent_QueueUserAPC_test7, INFINITE );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject() returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto done;
}
/* now do an alertable wait on the same event, which is now
in an unsignalled state */
ret = WaitForMultipleObjectsEx( 1, &hTestEvent_QueueUserAPC_test7, TRUE, 2000, TRUE );
/* verify that we got a WAIT_IO_COMPLETION result */
if( ret != WAIT_IO_COMPLETION )
{
Trace( "ERROR:WaitForMultipleObjectsEx returned %lu, "
"expected WAIT_IO_COMPLETION\n",
ret );
goto done;
}
/* set the event again */
if( ! SetEvent( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
goto done;
}
/* do a non-alertable wait on the same event */
ret = WaitForMultipleObjectsEx( 1, &hTestEvent_QueueUserAPC_test7, TRUE, INFINITE, FALSE );
/* verify that we got a WAIT_OBJECT_0 result */
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForMultipleObjectsEx returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto done;
}
/* success at this point */
bThreadResult_QueueUserAPC_test7 = TRUE;
done:
return bThreadResult_QueueUserAPC_test7;
}
PALTEST(threading_QueueUserAPC_test7_paltest_queueuserapc_test7, "threading/QueueUserAPC/test7/paltest_queueuserapc_test7")
{
/* local variables */
HANDLE hThread = NULL;
DWORD IDThread;
DWORD ret;
BOOL bResult = FALSE;
/* PAL initialization */
if( (PAL_Initialize(argc, argv)) != 0 )
{
return( FAIL );
}
/* create an auto-reset event for the other thread to wait on */
hTestEvent_QueueUserAPC_test7 = CreateEvent( NULL, FALSE, FALSE, NULL );
if( hTestEvent_QueueUserAPC_test7 == NULL )
{
Fail( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() );
}
/* create an auto-reset event for synchronization */
hSyncEvent_QueueUserAPC_test7 = CreateEvent( NULL, FALSE, FALSE, NULL );
if( hSyncEvent_QueueUserAPC_test7 == NULL )
{
Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() );
if( ! CloseHandle( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() );
}
Fail( "test failed\n" );
}
/* run another dummy thread to cause notification of the library */
hThread = CreateThread( NULL, /* no security attributes */
0, /* use default stack size */
(LPTHREAD_START_ROUTINE) ThreadFunc_QueueUserAPC_test7, /* thread function */
(LPVOID) NULL, /* pass thread index as */
/* function argument */
CREATE_SUSPENDED, /* create suspended */
&IDThread ); /* returns thread id */
/* Check the return value for success. */
if( hThread == NULL )
{
/* error creating thread */
Trace( "ERROR:%lu:CreateThread call failed\n", GetLastError() );
if( ! CloseHandle( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
}
Fail( "test failed\n" );
}
/* Resume the suspended thread */
ResumeThread( hThread );
/* wait until the other thread is ready to proceed */
ret = WaitForSingleObject( hSyncEvent_QueueUserAPC_test7, 10000 );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto cleanup;
}
/* now queue our APC on the test thread */
ret = QueueUserAPC( APCFunc_QueueUserAPC_test7, hThread, 0 );
if( ret == 0 )
{
Trace( "ERROR:%lu:QueueUserAPC call failed\n", GetLastError() );
goto cleanup;
}
/* signal the test event so the other thread will proceed */
if( ! SetEvent( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
goto cleanup;
}
/* wait on the other thread to complete */
ret = WaitForSingleObject( hThread, INFINITE );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject() returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto cleanup;
}
/* check the result of the other thread */
if( bThreadResult_QueueUserAPC_test7 == FALSE )
{
goto cleanup;
}
/* check that the APC function was actually executed exactly one time */
if( nAPCExecuted_QueueUserAPC_test7 != 1 )
{
Trace( "ERROR:APC function was executed %d times, "
"expected once\n", nAPCExecuted_QueueUserAPC_test7 );
goto cleanup;
}
/* set the success flag */
bResult = PASS;
cleanup:
/* close the global event handles */
if( ! CloseHandle( hTestEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
bResult = FAIL;
}
if( ! CloseHandle( hSyncEvent_QueueUserAPC_test7 ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
bResult = FAIL;
}
/* close the thread handle */
if( ! CloseHandle( hThread ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
bResult = FAIL;
}
/* output final failure result for failure case */
if( bResult == FAIL )
{
Fail( "test failed\n" );
}
/* PAL termination */
PAL_Terminate();
/* return success */
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/utilcode/prettyprintsig.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// This code supports formatting a method and it's signature in a friendly
// and consistent format.
//
//*****************************************************************************
#include "stdafx.h"
#include "prettyprintsig.h"
#include "utilcode.h"
#include "metadata.h"
#include "corpriv.h"
/***********************************************************************/
// Null-terminates the string held in "out"
static WCHAR* asStringW(CQuickBytes *out)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return NULL;);
}
CONTRACTL_END
SIZE_T oldSize = out->Size();
if (FAILED(out->ReSizeNoThrow(oldSize + 1)))
return 0;
WCHAR * cur = (WCHAR *) ((BYTE *) out->Ptr() + oldSize);
*cur = 0;
return((WCHAR*) out->Ptr());
} // static WCHAR* asStringW()
// Null-terminates the string held in "out"
static CHAR* asStringA(CQuickBytes *out)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return NULL;);
}
CONTRACTL_END
SIZE_T oldSize = out->Size();
if (FAILED(out->ReSizeNoThrow(oldSize + 1)))
return 0;
CHAR * cur = (CHAR *) ((BYTE *) out->Ptr() + oldSize);
*cur = 0;
return((CHAR*) out->Ptr());
} // static CHAR* asStringA()
/***********************************************************************/
// Appends the str to "out"
// The string held in "out" is not NULL-terminated. asStringW() needs to
// be called for the NULL-termination
static HRESULT appendStrW(CQuickBytes *out, const WCHAR* str)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
SIZE_T len = wcslen(str) * sizeof(WCHAR);
SIZE_T oldSize = out->Size();
if (FAILED(out->ReSizeNoThrow(oldSize + len)))
return E_OUTOFMEMORY;
WCHAR * cur = (WCHAR *) ((BYTE *) out->Ptr() + oldSize);
memcpy(cur, str, len);
// Note no trailing null!
return S_OK;
} // static HRESULT appendStrW()
// Appends the str to "out"
// The string held in "out" is not NULL-terminated. asStringA() needs to
// be called for the NULL-termination
static HRESULT appendStrA(CQuickBytes *out, const CHAR* str)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
SIZE_T len = strlen(str) * sizeof(CHAR);
SIZE_T oldSize = out->Size();
if (FAILED(out->ReSizeNoThrow(oldSize + len)))
return E_OUTOFMEMORY;
CHAR * cur = (CHAR *) ((BYTE *) out->Ptr() + oldSize);
memcpy(cur, str, len);
// Note no trailing null!
return S_OK;
} // static HRESULT appendStrA()
static HRESULT appendStrNumW(CQuickBytes *out, int num)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
WCHAR buff[32];
swprintf_s(buff, 32, W("%d"), num);
return appendStrW(out, buff);
} // static HRESULT appendStrNumW()
static HRESULT appendStrNumA(CQuickBytes *out, int num)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
CHAR buff[32];
sprintf_s(buff, 32, "%d", num);
return appendStrA(out, buff);
} // static HRESULT appendStrNumA()
static HRESULT appendStrHexW(CQuickBytes *out, int num)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
WCHAR buff[32];
swprintf_s(buff, 32, W("%08X"), num);
return appendStrW(out, buff);
} // static HRESULT appendStrHexW()
static HRESULT appendStrHexA(CQuickBytes *out, int num)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
CHAR buff[32];
sprintf_s(buff, 32, "%08X", num);
return appendStrA(out, buff);
} // static HRESULT appendStrHexA()
/***********************************************************************/
LPCWSTR PrettyPrintSigWorker(
PCCOR_SIGNATURE & typePtr, // type to convert,
size_t typeLen, // length of type
const WCHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMetaDataImport * pIMDI); // Import api to use.
//*****************************************************************************
//*****************************************************************************
// pretty prints 'type' to the buffer 'out' returns a pointer to the next type,
// or 0 on a format failure
static PCCOR_SIGNATURE PrettyPrintType(
PCCOR_SIGNATURE typePtr, // type to convert,
size_t typeLen, // Maximum length of the type
CQuickBytes * out, // where to put the pretty printed string
IMetaDataImport * pIMDI) // ptr to IMDInternal class with ComSig
{
mdToken tk;
const WCHAR * str;
WCHAR rcname[MAX_CLASS_NAME];
HRESULT hr;
unsigned __int8 elt = *typePtr++;
PCCOR_SIGNATURE typeEnd = typePtr + typeLen;
switch(elt)
{
case ELEMENT_TYPE_VOID:
str = W("void");
goto APPEND;
case ELEMENT_TYPE_BOOLEAN:
str = W("bool");
goto APPEND;
case ELEMENT_TYPE_CHAR:
str = W("wchar");
goto APPEND;
case ELEMENT_TYPE_I1:
str = W("int8");
goto APPEND;
case ELEMENT_TYPE_U1:
str = W("unsigned int8");
goto APPEND;
case ELEMENT_TYPE_I2:
str = W("int16");
goto APPEND;
case ELEMENT_TYPE_U2:
str = W("unsigned int16");
goto APPEND;
case ELEMENT_TYPE_I4:
str = W("int32");
goto APPEND;
case ELEMENT_TYPE_U4:
str = W("unsigned int32");
goto APPEND;
case ELEMENT_TYPE_I8:
str = W("int64");
goto APPEND;
case ELEMENT_TYPE_U8:
str = W("unsigned int64");
goto APPEND;
case ELEMENT_TYPE_R4:
str = W("float32");
goto APPEND;
case ELEMENT_TYPE_R8:
str = W("float64");
goto APPEND;
case ELEMENT_TYPE_U:
str = W("unsigned int");
goto APPEND;
case ELEMENT_TYPE_I:
str = W("int");
goto APPEND;
case ELEMENT_TYPE_OBJECT:
str = W("class System.Object");
goto APPEND;
case ELEMENT_TYPE_STRING:
str = W("class System.String");
goto APPEND;
case ELEMENT_TYPE_CANON_ZAPSIG:
str = W("class System.__Canon");
goto APPEND;
case ELEMENT_TYPE_TYPEDBYREF:
str = W("refany");
goto APPEND;
APPEND:
appendStrW(out, str);
break;
case ELEMENT_TYPE_VALUETYPE:
str = W("value class ");
goto DO_CLASS;
case ELEMENT_TYPE_CLASS:
str = W("class ");
goto DO_CLASS;
DO_CLASS:
typePtr += CorSigUncompressToken(typePtr, &tk);
appendStrW(out, str);
rcname[0] = 0;
str = rcname;
if (TypeFromToken(tk) == mdtTypeRef)
{
hr = pIMDI->GetTypeRefProps(tk, 0, rcname, ARRAY_SIZE(rcname), 0);
}
else if (TypeFromToken(tk) == mdtTypeDef)
{
hr = pIMDI->GetTypeDefProps(tk, rcname, ARRAY_SIZE(rcname), 0, 0, 0);
}
else
{
_ASSERTE(!"Unknown token type encountered in signature.");
str = W("<UNKNOWN>");
}
appendStrW(out, str);
break;
case ELEMENT_TYPE_SZARRAY:
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
appendStrW(out, W("[]"));
break;
case ELEMENT_TYPE_ARRAY:
{
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
unsigned rank = CorSigUncompressData(typePtr);
PREFIX_ASSUME(rank <= 0xffffff);
// <TODO>TODO what is the syntax for the rank 0 case? </TODO>
if (rank == 0)
{
appendStrW(out, W("[??]"));
}
else
{
_ASSERTE(rank != 0);
int* lowerBounds = (int*) _alloca(sizeof(int)*2*rank);
int* sizes = &lowerBounds[rank];
memset(lowerBounds, 0, sizeof(int)*2*rank);
unsigned numSizes = CorSigUncompressData(typePtr);
_ASSERTE(numSizes <= rank);
unsigned int i;
for(i =0; i < numSizes; i++)
sizes[i] = CorSigUncompressData(typePtr);
unsigned numLowBounds = CorSigUncompressData(typePtr);
_ASSERTE(numLowBounds <= rank);
for(i = 0; i < numLowBounds; i++)
lowerBounds[i] = CorSigUncompressData(typePtr);
appendStrW(out, W("["));
for(i = 0; i < rank; i++)
{
if (sizes[i] != 0 && lowerBounds[i] != 0)
{
appendStrNumW(out, lowerBounds[i]);
appendStrW(out, W("..."));
appendStrNumW(out, lowerBounds[i] + sizes[i] + 1);
}
if (i < rank-1)
appendStrW(out, W(","));
}
appendStrW(out, W("]"));
}
}
break;
case ELEMENT_TYPE_MVAR:
appendStrW(out, W("!!"));
appendStrNumW(out, CorSigUncompressData(typePtr));
break;
case ELEMENT_TYPE_VAR:
appendStrW(out, W("!"));
appendStrNumW(out, CorSigUncompressData(typePtr));
break;
case ELEMENT_TYPE_GENERICINST:
{
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
unsigned ntypars = CorSigUncompressData(typePtr);
appendStrW(out, W("<"));
for (unsigned i = 0; i < ntypars; i++)
{
if (i > 0)
appendStrW(out, W(","));
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
}
appendStrW(out, W(">"));
}
break;
case ELEMENT_TYPE_MODULE_ZAPSIG:
appendStrW(out, W("[module#"));
appendStrNumW(out, CorSigUncompressData(typePtr));
appendStrW(out, W(", token#"));
typePtr += CorSigUncompressToken(typePtr, &tk);
appendStrHexW(out, tk);
appendStrW(out, W("]"));
break;
case ELEMENT_TYPE_FNPTR:
appendStrW(out, W("fnptr "));
PrettyPrintSigWorker(typePtr, (typeEnd - typePtr), W(""), out, pIMDI);
break;
case ELEMENT_TYPE_NATIVE_VALUETYPE_ZAPSIG:
appendStrW(out, W("native "));
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
break;
// Modifiers or depedant types
case ELEMENT_TYPE_PINNED:
str = W(" pinned");
goto MODIFIER;
case ELEMENT_TYPE_PTR:
str = W("*");
goto MODIFIER;
case ELEMENT_TYPE_BYREF:
str = W("&");
goto MODIFIER;
MODIFIER:
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
appendStrW(out, str);
break;
default:
case ELEMENT_TYPE_SENTINEL:
case ELEMENT_TYPE_END:
_ASSERTE(!"Unknown Type");
return(typePtr);
break;
}
return(typePtr);
} // static PCCOR_SIGNATURE PrettyPrintType()
//*****************************************************************************
// Converts a com signature to a text signature.
//
// Note that this function DOES NULL terminate the result signature string.
//*****************************************************************************
LPCWSTR PrettyPrintSigLegacy(
PCCOR_SIGNATURE typePtr, // type to convert,
unsigned typeLen, // length of type
const WCHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMetaDataImport * pIMDI) // Import api to use.
{
return PrettyPrintSigWorker(typePtr, typeLen, name, out, pIMDI);
} // LPCWSTR PrettyPrintSigLegacy()
LPCWSTR PrettyPrintSigWorker(
PCCOR_SIGNATURE & typePtr, // type to convert,
size_t typeLen, // length of type
const WCHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMetaDataImport * pIMDI) // Import api to use.
{
out->Shrink(0);
unsigned numTyArgs = 0;
unsigned numArgs;
PCCOR_SIGNATURE typeEnd = typePtr + typeLen; // End of the signature.
if (name != 0) // 0 means a local var sig
{
// get the calling convention out
unsigned callConv = CorSigUncompressData(typePtr);
if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD))
{
PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
if (name != 0 && *name != 0)
{
appendStrW(out, W(" "));
appendStrW(out, name);
}
return(asStringW(out));
}
if (callConv & IMAGE_CEE_CS_CALLCONV_HASTHIS)
appendStrW(out, W("instance "));
if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
{
appendStrW(out, W("generic "));
numTyArgs = CorSigUncompressData(typePtr);
}
static const WCHAR * const callConvNames[IMAGE_CEE_CS_CALLCONV_MAX] =
{
W(""),
W("unmanaged cdecl "),
W("unmanaged stdcall "),
W("unmanaged thiscall "),
W("unmanaged fastcall "),
W("vararg "),
W("<error> "),
W("<error> "),
W(""),
W(""),
W(""),
W("native vararg ")
};
if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) < IMAGE_CEE_CS_CALLCONV_MAX)
{
appendStrW(out, callConvNames[callConv & IMAGE_CEE_CS_CALLCONV_MASK]);
}
numArgs = CorSigUncompressData(typePtr);
// do return type
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
}
else
{
numArgs = CorSigUncompressData(typePtr);
}
if (name != 0 && *name != 0)
{
appendStrW(out, W(" "));
appendStrW(out, name);
}
appendStrW(out, W("("));
bool needComma = false;
while (numArgs)
{
if (typePtr >= typeEnd)
break;
if (*typePtr == ELEMENT_TYPE_SENTINEL)
{
if (needComma)
appendStrW(out, W(","));
appendStrW(out, W("..."));
typePtr++;
}
else
{
if (numArgs <= 0)
break;
if (needComma)
appendStrW(out, W(","));
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
--numArgs;
}
needComma = true;
}
appendStrW(out, W(")"));
return (asStringW(out));
} // LPCWSTR PrettyPrintSigWorker()
// Internal implementation of PrettyPrintSig().
HRESULT PrettyPrintSigWorkerInternal(
PCCOR_SIGNATURE & typePtr, // type to convert,
size_t typeLen, // length of type
const CHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMDInternalImport * pIMDI); // Import api to use.
static HRESULT PrettyPrintClass(
PCCOR_SIGNATURE &typePtr, // type to convert
PCCOR_SIGNATURE typeEnd, // end of the signature.
CQuickBytes *out, // where to put the pretty printed string
IMDInternalImport *pIMDI); // ptr to IMDInternal class with ComSig
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
#endif
//*****************************************************************************
//*****************************************************************************
// pretty prints 'type' to the buffer 'out' returns a pointer to the next type,
// or 0 on a format failure
__checkReturn
static HRESULT PrettyPrintTypeA(
PCCOR_SIGNATURE &typePtr, // type to convert,
size_t typeLen, // Maximum length of the type.
CQuickBytes *out, // where to put the pretty printed string
IMDInternalImport *pIMDI) // ptr to IMDInternal class with ComSig
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
mdToken tk; // A type's token.
const CHAR *str; // Temporary string.
HRESULT hr; // A result.
PCCOR_SIGNATURE typeEnd = typePtr + typeLen; // End of the signature.
unsigned __int8 elt = *typePtr++;
switch(elt) {
case ELEMENT_TYPE_VOID:
str = "void";
goto APPEND;
case ELEMENT_TYPE_BOOLEAN:
str = "bool";
goto APPEND;
case ELEMENT_TYPE_CHAR:
str = "wchar";
goto APPEND;
case ELEMENT_TYPE_I1:
str = "int8";
goto APPEND;
case ELEMENT_TYPE_U1:
str = "unsigned int8";
goto APPEND;
case ELEMENT_TYPE_I2:
str = "int16";
goto APPEND;
case ELEMENT_TYPE_U2:
str = "unsigned int16";
goto APPEND;
case ELEMENT_TYPE_I4:
str = "int32";
goto APPEND;
case ELEMENT_TYPE_U4:
str = "unsigned int32";
goto APPEND;
case ELEMENT_TYPE_I8:
str = "int64";
goto APPEND;
case ELEMENT_TYPE_U8:
str = "unsigned int64";
goto APPEND;
case ELEMENT_TYPE_R4:
str = "float32";
goto APPEND;
case ELEMENT_TYPE_R8:
str = "float64";
goto APPEND;
case ELEMENT_TYPE_U:
str = "unsigned int";
goto APPEND;
case ELEMENT_TYPE_I:
str = "int";
goto APPEND;
case ELEMENT_TYPE_OBJECT:
str = "class System.Object";
goto APPEND;
case ELEMENT_TYPE_STRING:
str = "class System.String";
goto APPEND;
case ELEMENT_TYPE_CANON_ZAPSIG:
str = "class System.__Canon";
goto APPEND;
case ELEMENT_TYPE_TYPEDBYREF:
str = "refany";
goto APPEND;
APPEND:
IfFailGo(appendStrA(out, str));
break;
case ELEMENT_TYPE_INTERNAL:
void* pMT;
pMT = *((void* UNALIGNED *)typePtr);
typePtr += sizeof(void*);
CHAR tempBuffer[64];
sprintf_s(tempBuffer, 64, "pMT: %p", pMT);
IfFailGo(appendStrA(out, tempBuffer));
break;
case ELEMENT_TYPE_VALUETYPE:
str = "value class ";
goto DO_CLASS;
case ELEMENT_TYPE_CLASS:
str = "class ";
goto DO_CLASS;
DO_CLASS:
IfFailGo(appendStrA(out, str));
IfFailGo(PrettyPrintClass(typePtr, typeEnd, out, pIMDI));
break;
case ELEMENT_TYPE_CMOD_REQD:
str = "required_modifier ";
goto CMOD;
case ELEMENT_TYPE_CMOD_OPT:
str = "optional_modifier ";
goto CMOD;
CMOD:
IfFailGo(appendStrA(out, str));
IfFailGo(PrettyPrintClass(typePtr, typeEnd, out, pIMDI));
IfFailGo(appendStrA(out, " "));
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
break;
case ELEMENT_TYPE_SZARRAY:
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
IfFailGo(appendStrA(out, "[]"));
break;
case ELEMENT_TYPE_ARRAY:
{
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
unsigned rank = CorSigUncompressData(typePtr);
PREFIX_ASSUME(rank <= 0xffffff);
// <TODO>TODO what is the syntax for the rank 0 case? </TODO>
if (rank == 0)
{
IfFailGo(appendStrA(out, "[??]"));
}
else
{
_ASSERTE(rank != 0);
int* lowerBounds = (int*) _alloca(sizeof(int)*2*rank);
int* sizes = &lowerBounds[rank];
memset(lowerBounds, 0, sizeof(int)*2*rank);
unsigned numSizes = CorSigUncompressData(typePtr);
_ASSERTE(numSizes <= rank);
unsigned int i;
for(i =0; i < numSizes; i++)
sizes[i] = CorSigUncompressData(typePtr);
unsigned numLowBounds = CorSigUncompressData(typePtr);
_ASSERTE(numLowBounds <= rank);
for(i = 0; i < numLowBounds; i++)
lowerBounds[i] = CorSigUncompressData(typePtr);
IfFailGo(appendStrA(out, "["));
for(i = 0; i < rank; i++)
{
if (sizes[i] != 0 && lowerBounds[i] != 0)
{
appendStrNumA(out, lowerBounds[i]);
IfFailGo(appendStrA(out, "..."));
appendStrNumA(out, lowerBounds[i] + sizes[i] + 1);
}
if (i < rank-1)
IfFailGo(appendStrA(out, ","));
}
IfFailGo(appendStrA(out, "]"));
}
}
break;
case ELEMENT_TYPE_MVAR:
IfFailGo(appendStrA(out, "!!"));
appendStrNumA(out, CorSigUncompressData(typePtr));
break;
case ELEMENT_TYPE_VAR:
IfFailGo(appendStrA(out, "!"));
appendStrNumA(out, CorSigUncompressData(typePtr));
break;
case ELEMENT_TYPE_GENERICINST:
{
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
unsigned ntypars = CorSigUncompressData(typePtr);
IfFailGo(appendStrA(out, "<"));
for (unsigned i = 0; i < ntypars; i++)
{
if (i > 0)
IfFailGo(appendStrA(out, ","));
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
}
IfFailGo(appendStrA(out, ">"));
}
break;
case ELEMENT_TYPE_MODULE_ZAPSIG:
IfFailGo(appendStrA(out, "[module#"));
appendStrNumA(out, CorSigUncompressData(typePtr));
IfFailGo(appendStrA(out, ", token#"));
typePtr += CorSigUncompressToken(typePtr, &tk);
IfFailGo(appendStrHexA(out, tk));
IfFailGo(appendStrA(out, "]"));
break;
case ELEMENT_TYPE_FNPTR:
IfFailGo(appendStrA(out, "fnptr "));
IfFailGo(PrettyPrintSigWorkerInternal(typePtr, (typeEnd - typePtr), "", out,pIMDI));
break;
case ELEMENT_TYPE_NATIVE_VALUETYPE_ZAPSIG:
IfFailGo(appendStrA(out, "native "));
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
break;
// Modifiers or dependent types
case ELEMENT_TYPE_PINNED:
str = " pinned";
goto MODIFIER;
case ELEMENT_TYPE_PTR:
str = "*";
goto MODIFIER;
case ELEMENT_TYPE_BYREF:
str = "&";
goto MODIFIER;
MODIFIER:
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
IfFailGo(appendStrA(out, str));
break;
default:
case ELEMENT_TYPE_SENTINEL:
case ELEMENT_TYPE_END:
hr = E_INVALIDARG;
break;
}
ErrExit:
return hr;
} // PrettyPrintTypeA
#ifdef _PREFAST_
#pragma warning(pop)
#endif
// pretty prints the class 'type' to the buffer 'out'
static HRESULT PrettyPrintClass(
PCCOR_SIGNATURE &typePtr, // type to convert
PCCOR_SIGNATURE typeEnd, // end of the signature.
CQuickBytes *out, // where to put the pretty printed string
IMDInternalImport *pIMDI) // ptr to IMDInternal class with ComSig
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
mdToken tk;
const CHAR *str; // type's token.
LPCUTF8 pNS; // type's namespace.
LPCUTF8 pN; // type's name.
HRESULT hr; // result
IfFailGo(CorSigUncompressToken_EndPtr(typePtr, typeEnd, &tk));
str = "<UNKNOWN>";
if (TypeFromToken(tk) == mdtTypeSpec)
{
ULONG cSig;
PCCOR_SIGNATURE sig;
IfFailGo(pIMDI->GetSigFromToken(tk, &cSig, &sig));
IfFailGo(PrettyPrintTypeA(sig, cSig, out, pIMDI));
}
else
{
if (TypeFromToken(tk) == mdtTypeRef)
{
//<TODO>@consider: assembly name?</TODO>
if (FAILED(pIMDI->GetNameOfTypeRef(tk, &pNS, &pN)))
{
pNS = pN = "Invalid TypeRef record";
}
}
else
{
_ASSERTE(TypeFromToken(tk) == mdtTypeDef);
if (FAILED(pIMDI->GetNameOfTypeDef(tk, &pN, &pNS)))
{
pNS = pN = "Invalid TypeDef record";
}
}
if (pNS && *pNS)
{
IfFailGo(appendStrA(out, pNS));
IfFailGo(appendStrA(out, NAMESPACE_SEPARATOR_STR));
}
IfFailGo(appendStrA(out, pN));
}
return S_OK;
ErrExit:
return hr;
} // static HRESULT PrettyPrintClass()
//*****************************************************************************
// Converts a com signature to a text signature.
//
// Note that this function DOES NULL terminate the result signature string.
//*****************************************************************************
HRESULT PrettyPrintSigInternalLegacy(
PCCOR_SIGNATURE typePtr, // type to convert,
unsigned typeLen, // length of type
const CHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMDInternalImport * pIMDI) // Import api to use.
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
return PrettyPrintSigWorkerInternal(typePtr, typeLen, name, out, pIMDI);
} // HRESULT PrettyPrintSigInternalLegacy()
HRESULT PrettyPrintSigWorkerInternal(
PCCOR_SIGNATURE & typePtr, // type to convert,
size_t typeLen, // length of type
const CHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMDInternalImport * pIMDI) // Import api to use.
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
HRESULT hr = S_OK;
unsigned numArgs; // Count of arugments to function, or count of local vars.
unsigned numTyArgs = 0;
PCCOR_SIGNATURE typeEnd = typePtr + typeLen;
bool needComma = false;
out->Shrink(0);
if (name != 0) // 0 means a local var sig
{
// get the calling convention out
unsigned callConv = CorSigUncompressData(typePtr);
if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD))
{
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
if (name != 0 && *name != 0)
{
IfFailGo(appendStrA(out, " "));
IfFailGo(appendStrA(out, name));
}
goto ErrExit;
}
if (callConv & IMAGE_CEE_CS_CALLCONV_HASTHIS)
IfFailGo(appendStrA(out, "instance "));
if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
{
IfFailGo(appendStrA(out, "generic "));
numTyArgs = CorSigUncompressData(typePtr);
}
static const CHAR* const callConvNames[IMAGE_CEE_CS_CALLCONV_MAX] =
{
"",
"unmanaged cdecl ",
"unmanaged stdcall ",
"unmanaged thiscall ",
"unmanaged fastcall ",
"vararg ",
"<error> ",
"<error> ",
"",
"",
"",
"native vararg "
};
if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) < IMAGE_CEE_CS_CALLCONV_MAX)
{
appendStrA(out, callConvNames[callConv & IMAGE_CEE_CS_CALLCONV_MASK]);
}
numArgs = CorSigUncompressData(typePtr);
// do return type
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
}
else
{
numArgs = CorSigUncompressData(typePtr);
}
if (name != 0 && *name != 0)
{
IfFailGo(appendStrA(out, " "));
IfFailGo(appendStrA(out, name));
}
IfFailGo(appendStrA(out, "("));
while (numArgs)
{
if (typePtr >= typeEnd)
break;
if (*typePtr == ELEMENT_TYPE_SENTINEL)
{
if (needComma)
IfFailGo(appendStrA(out, ","));
IfFailGo(appendStrA(out, "..."));
++typePtr;
}
else
{
if (needComma)
IfFailGo(appendStrA(out, ","));
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
--numArgs;
}
needComma = true;
}
IfFailGo(appendStrA(out, ")"));
if (asStringA(out) == 0)
IfFailGo(E_OUTOFMEMORY);
ErrExit:
return hr;
} // HRESULT PrettyPrintSigWorkerInternal()
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// This code supports formatting a method and it's signature in a friendly
// and consistent format.
//
//*****************************************************************************
#include "stdafx.h"
#include "prettyprintsig.h"
#include "utilcode.h"
#include "metadata.h"
#include "corpriv.h"
/***********************************************************************/
// Null-terminates the string held in "out"
static WCHAR* asStringW(CQuickBytes *out)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return NULL;);
}
CONTRACTL_END
SIZE_T oldSize = out->Size();
if (FAILED(out->ReSizeNoThrow(oldSize + 1)))
return 0;
WCHAR * cur = (WCHAR *) ((BYTE *) out->Ptr() + oldSize);
*cur = 0;
return((WCHAR*) out->Ptr());
} // static WCHAR* asStringW()
// Null-terminates the string held in "out"
static CHAR* asStringA(CQuickBytes *out)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return NULL;);
}
CONTRACTL_END
SIZE_T oldSize = out->Size();
if (FAILED(out->ReSizeNoThrow(oldSize + 1)))
return 0;
CHAR * cur = (CHAR *) ((BYTE *) out->Ptr() + oldSize);
*cur = 0;
return((CHAR*) out->Ptr());
} // static CHAR* asStringA()
/***********************************************************************/
// Appends the str to "out"
// The string held in "out" is not NULL-terminated. asStringW() needs to
// be called for the NULL-termination
static HRESULT appendStrW(CQuickBytes *out, const WCHAR* str)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
SIZE_T len = wcslen(str) * sizeof(WCHAR);
SIZE_T oldSize = out->Size();
if (FAILED(out->ReSizeNoThrow(oldSize + len)))
return E_OUTOFMEMORY;
WCHAR * cur = (WCHAR *) ((BYTE *) out->Ptr() + oldSize);
memcpy(cur, str, len);
// Note no trailing null!
return S_OK;
} // static HRESULT appendStrW()
// Appends the str to "out"
// The string held in "out" is not NULL-terminated. asStringA() needs to
// be called for the NULL-termination
static HRESULT appendStrA(CQuickBytes *out, const CHAR* str)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
SIZE_T len = strlen(str) * sizeof(CHAR);
SIZE_T oldSize = out->Size();
if (FAILED(out->ReSizeNoThrow(oldSize + len)))
return E_OUTOFMEMORY;
CHAR * cur = (CHAR *) ((BYTE *) out->Ptr() + oldSize);
memcpy(cur, str, len);
// Note no trailing null!
return S_OK;
} // static HRESULT appendStrA()
static HRESULT appendStrNumW(CQuickBytes *out, int num)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
WCHAR buff[32];
swprintf_s(buff, 32, W("%d"), num);
return appendStrW(out, buff);
} // static HRESULT appendStrNumW()
static HRESULT appendStrNumA(CQuickBytes *out, int num)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
CHAR buff[32];
sprintf_s(buff, 32, "%d", num);
return appendStrA(out, buff);
} // static HRESULT appendStrNumA()
static HRESULT appendStrHexW(CQuickBytes *out, int num)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
WCHAR buff[32];
swprintf_s(buff, 32, W("%08X"), num);
return appendStrW(out, buff);
} // static HRESULT appendStrHexW()
static HRESULT appendStrHexA(CQuickBytes *out, int num)
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
CHAR buff[32];
sprintf_s(buff, 32, "%08X", num);
return appendStrA(out, buff);
} // static HRESULT appendStrHexA()
/***********************************************************************/
LPCWSTR PrettyPrintSigWorker(
PCCOR_SIGNATURE & typePtr, // type to convert,
size_t typeLen, // length of type
const WCHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMetaDataImport * pIMDI); // Import api to use.
//*****************************************************************************
//*****************************************************************************
// pretty prints 'type' to the buffer 'out' returns a pointer to the next type,
// or 0 on a format failure
static PCCOR_SIGNATURE PrettyPrintType(
PCCOR_SIGNATURE typePtr, // type to convert,
size_t typeLen, // Maximum length of the type
CQuickBytes * out, // where to put the pretty printed string
IMetaDataImport * pIMDI) // ptr to IMDInternal class with ComSig
{
mdToken tk;
const WCHAR * str;
WCHAR rcname[MAX_CLASS_NAME];
HRESULT hr;
unsigned __int8 elt = *typePtr++;
PCCOR_SIGNATURE typeEnd = typePtr + typeLen;
switch(elt)
{
case ELEMENT_TYPE_VOID:
str = W("void");
goto APPEND;
case ELEMENT_TYPE_BOOLEAN:
str = W("bool");
goto APPEND;
case ELEMENT_TYPE_CHAR:
str = W("wchar");
goto APPEND;
case ELEMENT_TYPE_I1:
str = W("int8");
goto APPEND;
case ELEMENT_TYPE_U1:
str = W("unsigned int8");
goto APPEND;
case ELEMENT_TYPE_I2:
str = W("int16");
goto APPEND;
case ELEMENT_TYPE_U2:
str = W("unsigned int16");
goto APPEND;
case ELEMENT_TYPE_I4:
str = W("int32");
goto APPEND;
case ELEMENT_TYPE_U4:
str = W("unsigned int32");
goto APPEND;
case ELEMENT_TYPE_I8:
str = W("int64");
goto APPEND;
case ELEMENT_TYPE_U8:
str = W("unsigned int64");
goto APPEND;
case ELEMENT_TYPE_R4:
str = W("float32");
goto APPEND;
case ELEMENT_TYPE_R8:
str = W("float64");
goto APPEND;
case ELEMENT_TYPE_U:
str = W("unsigned int");
goto APPEND;
case ELEMENT_TYPE_I:
str = W("int");
goto APPEND;
case ELEMENT_TYPE_OBJECT:
str = W("class System.Object");
goto APPEND;
case ELEMENT_TYPE_STRING:
str = W("class System.String");
goto APPEND;
case ELEMENT_TYPE_CANON_ZAPSIG:
str = W("class System.__Canon");
goto APPEND;
case ELEMENT_TYPE_TYPEDBYREF:
str = W("refany");
goto APPEND;
APPEND:
appendStrW(out, str);
break;
case ELEMENT_TYPE_VALUETYPE:
str = W("value class ");
goto DO_CLASS;
case ELEMENT_TYPE_CLASS:
str = W("class ");
goto DO_CLASS;
DO_CLASS:
typePtr += CorSigUncompressToken(typePtr, &tk);
appendStrW(out, str);
rcname[0] = 0;
str = rcname;
if (TypeFromToken(tk) == mdtTypeRef)
{
hr = pIMDI->GetTypeRefProps(tk, 0, rcname, ARRAY_SIZE(rcname), 0);
}
else if (TypeFromToken(tk) == mdtTypeDef)
{
hr = pIMDI->GetTypeDefProps(tk, rcname, ARRAY_SIZE(rcname), 0, 0, 0);
}
else
{
_ASSERTE(!"Unknown token type encountered in signature.");
str = W("<UNKNOWN>");
}
appendStrW(out, str);
break;
case ELEMENT_TYPE_SZARRAY:
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
appendStrW(out, W("[]"));
break;
case ELEMENT_TYPE_ARRAY:
{
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
unsigned rank = CorSigUncompressData(typePtr);
PREFIX_ASSUME(rank <= 0xffffff);
// <TODO>TODO what is the syntax for the rank 0 case? </TODO>
if (rank == 0)
{
appendStrW(out, W("[??]"));
}
else
{
_ASSERTE(rank != 0);
int* lowerBounds = (int*) _alloca(sizeof(int)*2*rank);
int* sizes = &lowerBounds[rank];
memset(lowerBounds, 0, sizeof(int)*2*rank);
unsigned numSizes = CorSigUncompressData(typePtr);
_ASSERTE(numSizes <= rank);
unsigned int i;
for(i =0; i < numSizes; i++)
sizes[i] = CorSigUncompressData(typePtr);
unsigned numLowBounds = CorSigUncompressData(typePtr);
_ASSERTE(numLowBounds <= rank);
for(i = 0; i < numLowBounds; i++)
lowerBounds[i] = CorSigUncompressData(typePtr);
appendStrW(out, W("["));
for(i = 0; i < rank; i++)
{
if (sizes[i] != 0 && lowerBounds[i] != 0)
{
appendStrNumW(out, lowerBounds[i]);
appendStrW(out, W("..."));
appendStrNumW(out, lowerBounds[i] + sizes[i] + 1);
}
if (i < rank-1)
appendStrW(out, W(","));
}
appendStrW(out, W("]"));
}
}
break;
case ELEMENT_TYPE_MVAR:
appendStrW(out, W("!!"));
appendStrNumW(out, CorSigUncompressData(typePtr));
break;
case ELEMENT_TYPE_VAR:
appendStrW(out, W("!"));
appendStrNumW(out, CorSigUncompressData(typePtr));
break;
case ELEMENT_TYPE_GENERICINST:
{
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
unsigned ntypars = CorSigUncompressData(typePtr);
appendStrW(out, W("<"));
for (unsigned i = 0; i < ntypars; i++)
{
if (i > 0)
appendStrW(out, W(","));
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
}
appendStrW(out, W(">"));
}
break;
case ELEMENT_TYPE_MODULE_ZAPSIG:
appendStrW(out, W("[module#"));
appendStrNumW(out, CorSigUncompressData(typePtr));
appendStrW(out, W(", token#"));
typePtr += CorSigUncompressToken(typePtr, &tk);
appendStrHexW(out, tk);
appendStrW(out, W("]"));
break;
case ELEMENT_TYPE_FNPTR:
appendStrW(out, W("fnptr "));
PrettyPrintSigWorker(typePtr, (typeEnd - typePtr), W(""), out, pIMDI);
break;
case ELEMENT_TYPE_NATIVE_VALUETYPE_ZAPSIG:
appendStrW(out, W("native "));
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
break;
// Modifiers or depedant types
case ELEMENT_TYPE_PINNED:
str = W(" pinned");
goto MODIFIER;
case ELEMENT_TYPE_PTR:
str = W("*");
goto MODIFIER;
case ELEMENT_TYPE_BYREF:
str = W("&");
goto MODIFIER;
MODIFIER:
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
appendStrW(out, str);
break;
default:
case ELEMENT_TYPE_SENTINEL:
case ELEMENT_TYPE_END:
_ASSERTE(!"Unknown Type");
return(typePtr);
break;
}
return(typePtr);
} // static PCCOR_SIGNATURE PrettyPrintType()
//*****************************************************************************
// Converts a com signature to a text signature.
//
// Note that this function DOES NULL terminate the result signature string.
//*****************************************************************************
LPCWSTR PrettyPrintSigLegacy(
PCCOR_SIGNATURE typePtr, // type to convert,
unsigned typeLen, // length of type
const WCHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMetaDataImport * pIMDI) // Import api to use.
{
return PrettyPrintSigWorker(typePtr, typeLen, name, out, pIMDI);
} // LPCWSTR PrettyPrintSigLegacy()
LPCWSTR PrettyPrintSigWorker(
PCCOR_SIGNATURE & typePtr, // type to convert,
size_t typeLen, // length of type
const WCHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMetaDataImport * pIMDI) // Import api to use.
{
out->Shrink(0);
unsigned numTyArgs = 0;
unsigned numArgs;
PCCOR_SIGNATURE typeEnd = typePtr + typeLen; // End of the signature.
if (name != 0) // 0 means a local var sig
{
// get the calling convention out
unsigned callConv = CorSigUncompressData(typePtr);
if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD))
{
PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
if (name != 0 && *name != 0)
{
appendStrW(out, W(" "));
appendStrW(out, name);
}
return(asStringW(out));
}
if (callConv & IMAGE_CEE_CS_CALLCONV_HASTHIS)
appendStrW(out, W("instance "));
if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
{
appendStrW(out, W("generic "));
numTyArgs = CorSigUncompressData(typePtr);
}
static const WCHAR * const callConvNames[IMAGE_CEE_CS_CALLCONV_MAX] =
{
W(""),
W("unmanaged cdecl "),
W("unmanaged stdcall "),
W("unmanaged thiscall "),
W("unmanaged fastcall "),
W("vararg "),
W("<error> "),
W("<error> "),
W(""),
W(""),
W(""),
W("native vararg ")
};
if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) < IMAGE_CEE_CS_CALLCONV_MAX)
{
appendStrW(out, callConvNames[callConv & IMAGE_CEE_CS_CALLCONV_MASK]);
}
numArgs = CorSigUncompressData(typePtr);
// do return type
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
}
else
{
numArgs = CorSigUncompressData(typePtr);
}
if (name != 0 && *name != 0)
{
appendStrW(out, W(" "));
appendStrW(out, name);
}
appendStrW(out, W("("));
bool needComma = false;
while (numArgs)
{
if (typePtr >= typeEnd)
break;
if (*typePtr == ELEMENT_TYPE_SENTINEL)
{
if (needComma)
appendStrW(out, W(","));
appendStrW(out, W("..."));
typePtr++;
}
else
{
if (numArgs <= 0)
break;
if (needComma)
appendStrW(out, W(","));
typePtr = PrettyPrintType(typePtr, (typeEnd - typePtr), out, pIMDI);
--numArgs;
}
needComma = true;
}
appendStrW(out, W(")"));
return (asStringW(out));
} // LPCWSTR PrettyPrintSigWorker()
// Internal implementation of PrettyPrintSig().
HRESULT PrettyPrintSigWorkerInternal(
PCCOR_SIGNATURE & typePtr, // type to convert,
size_t typeLen, // length of type
const CHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMDInternalImport * pIMDI); // Import api to use.
static HRESULT PrettyPrintClass(
PCCOR_SIGNATURE &typePtr, // type to convert
PCCOR_SIGNATURE typeEnd, // end of the signature.
CQuickBytes *out, // where to put the pretty printed string
IMDInternalImport *pIMDI); // ptr to IMDInternal class with ComSig
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
#endif
//*****************************************************************************
//*****************************************************************************
// pretty prints 'type' to the buffer 'out' returns a pointer to the next type,
// or 0 on a format failure
__checkReturn
static HRESULT PrettyPrintTypeA(
PCCOR_SIGNATURE &typePtr, // type to convert,
size_t typeLen, // Maximum length of the type.
CQuickBytes *out, // where to put the pretty printed string
IMDInternalImport *pIMDI) // ptr to IMDInternal class with ComSig
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
mdToken tk; // A type's token.
const CHAR *str; // Temporary string.
HRESULT hr; // A result.
PCCOR_SIGNATURE typeEnd = typePtr + typeLen; // End of the signature.
unsigned __int8 elt = *typePtr++;
switch(elt) {
case ELEMENT_TYPE_VOID:
str = "void";
goto APPEND;
case ELEMENT_TYPE_BOOLEAN:
str = "bool";
goto APPEND;
case ELEMENT_TYPE_CHAR:
str = "wchar";
goto APPEND;
case ELEMENT_TYPE_I1:
str = "int8";
goto APPEND;
case ELEMENT_TYPE_U1:
str = "unsigned int8";
goto APPEND;
case ELEMENT_TYPE_I2:
str = "int16";
goto APPEND;
case ELEMENT_TYPE_U2:
str = "unsigned int16";
goto APPEND;
case ELEMENT_TYPE_I4:
str = "int32";
goto APPEND;
case ELEMENT_TYPE_U4:
str = "unsigned int32";
goto APPEND;
case ELEMENT_TYPE_I8:
str = "int64";
goto APPEND;
case ELEMENT_TYPE_U8:
str = "unsigned int64";
goto APPEND;
case ELEMENT_TYPE_R4:
str = "float32";
goto APPEND;
case ELEMENT_TYPE_R8:
str = "float64";
goto APPEND;
case ELEMENT_TYPE_U:
str = "unsigned int";
goto APPEND;
case ELEMENT_TYPE_I:
str = "int";
goto APPEND;
case ELEMENT_TYPE_OBJECT:
str = "class System.Object";
goto APPEND;
case ELEMENT_TYPE_STRING:
str = "class System.String";
goto APPEND;
case ELEMENT_TYPE_CANON_ZAPSIG:
str = "class System.__Canon";
goto APPEND;
case ELEMENT_TYPE_TYPEDBYREF:
str = "refany";
goto APPEND;
APPEND:
IfFailGo(appendStrA(out, str));
break;
case ELEMENT_TYPE_INTERNAL:
void* pMT;
pMT = *((void* UNALIGNED *)typePtr);
typePtr += sizeof(void*);
CHAR tempBuffer[64];
sprintf_s(tempBuffer, 64, "pMT: %p", pMT);
IfFailGo(appendStrA(out, tempBuffer));
break;
case ELEMENT_TYPE_VALUETYPE:
str = "value class ";
goto DO_CLASS;
case ELEMENT_TYPE_CLASS:
str = "class ";
goto DO_CLASS;
DO_CLASS:
IfFailGo(appendStrA(out, str));
IfFailGo(PrettyPrintClass(typePtr, typeEnd, out, pIMDI));
break;
case ELEMENT_TYPE_CMOD_REQD:
str = "required_modifier ";
goto CMOD;
case ELEMENT_TYPE_CMOD_OPT:
str = "optional_modifier ";
goto CMOD;
CMOD:
IfFailGo(appendStrA(out, str));
IfFailGo(PrettyPrintClass(typePtr, typeEnd, out, pIMDI));
IfFailGo(appendStrA(out, " "));
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
break;
case ELEMENT_TYPE_SZARRAY:
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
IfFailGo(appendStrA(out, "[]"));
break;
case ELEMENT_TYPE_ARRAY:
{
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
unsigned rank = CorSigUncompressData(typePtr);
PREFIX_ASSUME(rank <= 0xffffff);
// <TODO>TODO what is the syntax for the rank 0 case? </TODO>
if (rank == 0)
{
IfFailGo(appendStrA(out, "[??]"));
}
else
{
_ASSERTE(rank != 0);
int* lowerBounds = (int*) _alloca(sizeof(int)*2*rank);
int* sizes = &lowerBounds[rank];
memset(lowerBounds, 0, sizeof(int)*2*rank);
unsigned numSizes = CorSigUncompressData(typePtr);
_ASSERTE(numSizes <= rank);
unsigned int i;
for(i =0; i < numSizes; i++)
sizes[i] = CorSigUncompressData(typePtr);
unsigned numLowBounds = CorSigUncompressData(typePtr);
_ASSERTE(numLowBounds <= rank);
for(i = 0; i < numLowBounds; i++)
lowerBounds[i] = CorSigUncompressData(typePtr);
IfFailGo(appendStrA(out, "["));
for(i = 0; i < rank; i++)
{
if (sizes[i] != 0 && lowerBounds[i] != 0)
{
appendStrNumA(out, lowerBounds[i]);
IfFailGo(appendStrA(out, "..."));
appendStrNumA(out, lowerBounds[i] + sizes[i] + 1);
}
if (i < rank-1)
IfFailGo(appendStrA(out, ","));
}
IfFailGo(appendStrA(out, "]"));
}
}
break;
case ELEMENT_TYPE_MVAR:
IfFailGo(appendStrA(out, "!!"));
appendStrNumA(out, CorSigUncompressData(typePtr));
break;
case ELEMENT_TYPE_VAR:
IfFailGo(appendStrA(out, "!"));
appendStrNumA(out, CorSigUncompressData(typePtr));
break;
case ELEMENT_TYPE_GENERICINST:
{
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
unsigned ntypars = CorSigUncompressData(typePtr);
IfFailGo(appendStrA(out, "<"));
for (unsigned i = 0; i < ntypars; i++)
{
if (i > 0)
IfFailGo(appendStrA(out, ","));
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
}
IfFailGo(appendStrA(out, ">"));
}
break;
case ELEMENT_TYPE_MODULE_ZAPSIG:
IfFailGo(appendStrA(out, "[module#"));
appendStrNumA(out, CorSigUncompressData(typePtr));
IfFailGo(appendStrA(out, ", token#"));
typePtr += CorSigUncompressToken(typePtr, &tk);
IfFailGo(appendStrHexA(out, tk));
IfFailGo(appendStrA(out, "]"));
break;
case ELEMENT_TYPE_FNPTR:
IfFailGo(appendStrA(out, "fnptr "));
IfFailGo(PrettyPrintSigWorkerInternal(typePtr, (typeEnd - typePtr), "", out,pIMDI));
break;
case ELEMENT_TYPE_NATIVE_VALUETYPE_ZAPSIG:
IfFailGo(appendStrA(out, "native "));
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
break;
// Modifiers or dependent types
case ELEMENT_TYPE_PINNED:
str = " pinned";
goto MODIFIER;
case ELEMENT_TYPE_PTR:
str = "*";
goto MODIFIER;
case ELEMENT_TYPE_BYREF:
str = "&";
goto MODIFIER;
MODIFIER:
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
IfFailGo(appendStrA(out, str));
break;
default:
case ELEMENT_TYPE_SENTINEL:
case ELEMENT_TYPE_END:
hr = E_INVALIDARG;
break;
}
ErrExit:
return hr;
} // PrettyPrintTypeA
#ifdef _PREFAST_
#pragma warning(pop)
#endif
// pretty prints the class 'type' to the buffer 'out'
static HRESULT PrettyPrintClass(
PCCOR_SIGNATURE &typePtr, // type to convert
PCCOR_SIGNATURE typeEnd, // end of the signature.
CQuickBytes *out, // where to put the pretty printed string
IMDInternalImport *pIMDI) // ptr to IMDInternal class with ComSig
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
mdToken tk;
const CHAR *str; // type's token.
LPCUTF8 pNS; // type's namespace.
LPCUTF8 pN; // type's name.
HRESULT hr; // result
IfFailGo(CorSigUncompressToken_EndPtr(typePtr, typeEnd, &tk));
str = "<UNKNOWN>";
if (TypeFromToken(tk) == mdtTypeSpec)
{
ULONG cSig;
PCCOR_SIGNATURE sig;
IfFailGo(pIMDI->GetSigFromToken(tk, &cSig, &sig));
IfFailGo(PrettyPrintTypeA(sig, cSig, out, pIMDI));
}
else
{
if (TypeFromToken(tk) == mdtTypeRef)
{
//<TODO>@consider: assembly name?</TODO>
if (FAILED(pIMDI->GetNameOfTypeRef(tk, &pNS, &pN)))
{
pNS = pN = "Invalid TypeRef record";
}
}
else
{
_ASSERTE(TypeFromToken(tk) == mdtTypeDef);
if (FAILED(pIMDI->GetNameOfTypeDef(tk, &pN, &pNS)))
{
pNS = pN = "Invalid TypeDef record";
}
}
if (pNS && *pNS)
{
IfFailGo(appendStrA(out, pNS));
IfFailGo(appendStrA(out, NAMESPACE_SEPARATOR_STR));
}
IfFailGo(appendStrA(out, pN));
}
return S_OK;
ErrExit:
return hr;
} // static HRESULT PrettyPrintClass()
//*****************************************************************************
// Converts a com signature to a text signature.
//
// Note that this function DOES NULL terminate the result signature string.
//*****************************************************************************
HRESULT PrettyPrintSigInternalLegacy(
PCCOR_SIGNATURE typePtr, // type to convert,
unsigned typeLen, // length of type
const CHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMDInternalImport * pIMDI) // Import api to use.
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
return PrettyPrintSigWorkerInternal(typePtr, typeLen, name, out, pIMDI);
} // HRESULT PrettyPrintSigInternalLegacy()
HRESULT PrettyPrintSigWorkerInternal(
PCCOR_SIGNATURE & typePtr, // type to convert,
size_t typeLen, // length of type
const CHAR * name, // can be "", the name of the method for this sig
CQuickBytes * out, // where to put the pretty printed string
IMDInternalImport * pIMDI) // Import api to use.
{
CONTRACTL
{
NOTHROW;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END
HRESULT hr = S_OK;
unsigned numArgs; // Count of arugments to function, or count of local vars.
unsigned numTyArgs = 0;
PCCOR_SIGNATURE typeEnd = typePtr + typeLen;
bool needComma = false;
out->Shrink(0);
if (name != 0) // 0 means a local var sig
{
// get the calling convention out
unsigned callConv = CorSigUncompressData(typePtr);
if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD))
{
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
if (name != 0 && *name != 0)
{
IfFailGo(appendStrA(out, " "));
IfFailGo(appendStrA(out, name));
}
goto ErrExit;
}
if (callConv & IMAGE_CEE_CS_CALLCONV_HASTHIS)
IfFailGo(appendStrA(out, "instance "));
if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
{
IfFailGo(appendStrA(out, "generic "));
numTyArgs = CorSigUncompressData(typePtr);
}
static const CHAR* const callConvNames[IMAGE_CEE_CS_CALLCONV_MAX] =
{
"",
"unmanaged cdecl ",
"unmanaged stdcall ",
"unmanaged thiscall ",
"unmanaged fastcall ",
"vararg ",
"<error> ",
"<error> ",
"",
"",
"",
"native vararg "
};
if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) < IMAGE_CEE_CS_CALLCONV_MAX)
{
appendStrA(out, callConvNames[callConv & IMAGE_CEE_CS_CALLCONV_MASK]);
}
numArgs = CorSigUncompressData(typePtr);
// do return type
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
}
else
{
numArgs = CorSigUncompressData(typePtr);
}
if (name != 0 && *name != 0)
{
IfFailGo(appendStrA(out, " "));
IfFailGo(appendStrA(out, name));
}
IfFailGo(appendStrA(out, "("));
while (numArgs)
{
if (typePtr >= typeEnd)
break;
if (*typePtr == ELEMENT_TYPE_SENTINEL)
{
if (needComma)
IfFailGo(appendStrA(out, ","));
IfFailGo(appendStrA(out, "..."));
++typePtr;
}
else
{
if (needComma)
IfFailGo(appendStrA(out, ","));
IfFailGo(PrettyPrintTypeA(typePtr, (typeEnd - typePtr), out, pIMDI));
--numArgs;
}
needComma = true;
}
IfFailGo(appendStrA(out, ")"));
if (asStringA(out) == 0)
IfFailGo(E_OUTOFMEMORY);
ErrExit:
return hr;
} // HRESULT PrettyPrintSigWorkerInternal()
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/strncpy/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose:
** Test to see that you can copy a portion of a string into a new buffer.
** Also check that the strncpy function doesn't overflow when it is used.
** Finally check that if the number of characters given is greater than the
** amount to copy, that the destination buffer is padded with NULLs.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(c_runtime_strncpy_test1_paltest_strncpy_test1, "c_runtime/strncpy/test1/paltest_strncpy_test1")
{
char dest[80];
char *result = "foobar";
char *str = "foobar\0baz";
char *ret;
int i;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
for (i=0; i<80; i++)
{
dest[i] = 'x';
}
ret = strncpy(dest, str, 3);
if (ret != dest)
{
Fail("Expected strncpy to return %p, got %p!\n", dest, ret);
}
if (strncmp(dest, result, 3) != 0)
{
Fail("Expected strncpy to give \"%s\", got \"%s\"!\n", result, dest);
}
if (dest[3] != 'x')
{
Fail("strncpy overflowed!\n");
}
ret = strncpy(dest, str, 40);
if (ret != dest)
{
Fail("Expected strncpy to return %p, got %p!\n", dest, ret);
}
if (strcmp(dest, result) != 0)
{
Fail("Expected strncpy to give \"%s\", got \"%s\"!\n", result, dest);
}
for (i=strlen(str); i<40; i++)
{
if (dest[i] != 0)
{
Fail("strncpy failed to pad the destination with NULLs!\n");
}
}
if (dest[40] != 'x')
{
Fail("strncpy overflowed!\n");
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose:
** Test to see that you can copy a portion of a string into a new buffer.
** Also check that the strncpy function doesn't overflow when it is used.
** Finally check that if the number of characters given is greater than the
** amount to copy, that the destination buffer is padded with NULLs.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(c_runtime_strncpy_test1_paltest_strncpy_test1, "c_runtime/strncpy/test1/paltest_strncpy_test1")
{
char dest[80];
char *result = "foobar";
char *str = "foobar\0baz";
char *ret;
int i;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
for (i=0; i<80; i++)
{
dest[i] = 'x';
}
ret = strncpy(dest, str, 3);
if (ret != dest)
{
Fail("Expected strncpy to return %p, got %p!\n", dest, ret);
}
if (strncmp(dest, result, 3) != 0)
{
Fail("Expected strncpy to give \"%s\", got \"%s\"!\n", result, dest);
}
if (dest[3] != 'x')
{
Fail("strncpy overflowed!\n");
}
ret = strncpy(dest, str, 40);
if (ret != dest)
{
Fail("Expected strncpy to return %p, got %p!\n", dest, ret);
}
if (strcmp(dest, result) != 0)
{
Fail("Expected strncpy to give \"%s\", got \"%s\"!\n", result, dest);
}
for (i=strlen(str); i<40; i++)
{
if (dest[i] != 0)
{
Fail("strncpy failed to pad the destination with NULLs!\n");
}
}
if (dest[40] != 'x')
{
Fail("strncpy overflowed!\n");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/mono/mono/mini/debugger-agent-external.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#ifndef __MONO_DEBUGGER_AGENT_H__
#define __MONO_DEBUGGER_AGENT_H__
#include "mini.h"
#include "mono/component/debugger.h"
MONO_API void
mono_debugger_agent_init (void);
MONO_API void
mono_debugger_agent_parse_options (char *options);
MONO_API MONO_RT_EXTERNAL_ONLY gboolean
mono_debugger_agent_transport_handshake (void);
MONO_API void
mono_debugger_agent_register_transport (DebuggerTransport *trans);
MONO_COMPONENT_API DebuggerTransport *
mono_debugger_agent_get_transports (int *ntrans);
MONO_COMPONENT_API char *
mono_debugger_agent_get_sdb_options (void);
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#ifndef __MONO_DEBUGGER_AGENT_H__
#define __MONO_DEBUGGER_AGENT_H__
#include "mini.h"
#include "mono/component/debugger.h"
MONO_API void
mono_debugger_agent_init (void);
MONO_API void
mono_debugger_agent_parse_options (char *options);
MONO_API MONO_RT_EXTERNAL_ONLY gboolean
mono_debugger_agent_transport_handshake (void);
MONO_API void
mono_debugger_agent_register_transport (DebuggerTransport *trans);
MONO_COMPONENT_API DebuggerTransport *
mono_debugger_agent_get_transports (int *ntrans);
MONO_COMPONENT_API char *
mono_debugger_agent_get_sdb_options (void);
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/locale_info/GetLocaleInfoW/test2/test2.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test2.c
**
** Purpose: Tests that GetLocaleInfoW will correctly return the amount of
** buffer space required. Also tests that it correctly handles a
** buffer of insufficient space.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(locale_info_GetLocaleInfoW_test2_paltest_getlocaleinfow_test2, "locale_info/GetLocaleInfoW/test2/paltest_getlocaleinfow_test2")
{
WCHAR buffer[256] = { 0 };
int ret;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
ret = GetLocaleInfoW(LOCALE_NEUTRAL, LOCALE_SDECIMAL, buffer, 0);
if (ret != 2)
{
Fail("GetLocaleInfoW gave incorrect desired length for buffer.\n"
"Expected 2, got %d.\n", ret);
}
ret = GetLocaleInfoW(LOCALE_NEUTRAL, LOCALE_SDECIMAL, buffer, 1);
if (ret != 0)
{
Fail("GetLocaleInfoW expected to return 0, returned %d", ret);
}
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
{
Fail("GetLocaleInfoW failed to set last error to "
"ERROR_INSUFFICIENT_BUFFER!\n");
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test2.c
**
** Purpose: Tests that GetLocaleInfoW will correctly return the amount of
** buffer space required. Also tests that it correctly handles a
** buffer of insufficient space.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(locale_info_GetLocaleInfoW_test2_paltest_getlocaleinfow_test2, "locale_info/GetLocaleInfoW/test2/paltest_getlocaleinfow_test2")
{
WCHAR buffer[256] = { 0 };
int ret;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
ret = GetLocaleInfoW(LOCALE_NEUTRAL, LOCALE_SDECIMAL, buffer, 0);
if (ret != 2)
{
Fail("GetLocaleInfoW gave incorrect desired length for buffer.\n"
"Expected 2, got %d.\n", ret);
}
ret = GetLocaleInfoW(LOCALE_NEUTRAL, LOCALE_SDECIMAL, buffer, 1);
if (ret != 0)
{
Fail("GetLocaleInfoW expected to return 0, returned %d", ret);
}
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
{
Fail("GetLocaleInfoW failed to set last error to "
"ERROR_INSUFFICIENT_BUFFER!\n");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/hosts/coreshim/CoreShim.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "CoreShim.h"
#include <set>
#include <sstream>
#include <vector>
#include <mutex>
namespace
{
template<typename CT>
struct PathBuffer
{
PathBuffer()
: DefBuffer{}
, Buf{ DefBuffer }
, Len{ ARRAY_SIZE(DefBuffer) }
{ }
void SetLength(_In_ DWORD len)
{
if (len > Len)
{
Buf = BigBuffer.data();
Len = static_cast<DWORD>(BigBuffer.size());
}
}
void ExpandBuffer(_In_ DWORD factor = 2)
{
SetLength(Len * factor);
}
operator DWORD()
{
return Len;
}
operator CT *()
{
return Buf;
}
CT DefBuffer[MAX_PATH];
std::vector<CT> BigBuffer;
CT *Buf;
DWORD Len;
};
std::wstring GetExePath()
{
PathBuffer<WCHAR> buffer;
DWORD len = ::GetModuleFileNameW(nullptr, buffer, buffer);
while (::GetLastError() == ERROR_INSUFFICIENT_BUFFER)
{
buffer.ExpandBuffer();
len = ::GetModuleFileNameW(nullptr, buffer, buffer);
}
return std::wstring{ buffer.Buf, buffer.Buf + len };
}
bool TryGetEnvVar(_In_z_ const WCHAR* env, _Inout_ std::wstring& value)
{
DWORD len = ::GetEnvironmentVariableW(env, nullptr, 0);
if (len == 0)
return false;
PathBuffer<WCHAR> buffer;
buffer.SetLength(len);
(void)::GetEnvironmentVariableW(env, buffer, buffer);
value = static_cast<WCHAR *>(buffer.Buf);
return true;
}
std::wstring GetEnvVar(_In_z_ const WCHAR *env)
{
std::wstring value;
if (!TryGetEnvVar(env, value))
{
throw __HRESULT_FROM_WIN32(ERROR_ENVVAR_NOT_FOUND);
}
return value;
}
std::string ConvertWideToUtf8(_In_ const std::wstring &wide)
{
// [TODO] Properly convert to UTF-8
std::string narrow;
for (WCHAR p : wide)
narrow.push_back(static_cast<CHAR>(p));
return narrow;
}
coreclr *s_CoreClrInstance;
}
namespace Utility
{
HRESULT TryGetEnvVar(_In_z_ const WCHAR *env, _Inout_ std::wstring &envVar)
{
try
{
envVar = GetEnvVar(env);
}
catch (HRESULT hr)
{
return hr;
}
return S_OK;
}
HRESULT GetCoreShimDirectory(_Inout_ std::wstring &dir)
{
HMODULE hModule;
BOOL res = ::GetModuleHandleExW(
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCWSTR>(&TryGetEnvVar),
&hModule);
if (res == FALSE)
return HRESULT_FROM_WIN32(::GetLastError());
std::wstring path;
size_t dwModuleFileName = MAX_PATH / 2;
do
{
path.resize(dwModuleFileName * 2);
dwModuleFileName = GetModuleFileNameW(hModule, (LPWSTR)path.data(), static_cast<DWORD>(path.size()));
} while (dwModuleFileName == path.size());
if (dwModuleFileName == 0)
return HRESULT_FROM_WIN32(::GetLastError());
size_t idx = path.find_last_of(W('\\'));
if (idx == std::wstring::npos)
return E_UNEXPECTED;
path.resize(idx + 1);
dir = std::move(path);
return S_OK;
}
HRESULT GetCoreShimDirectory(_Inout_ std::string &dir)
{
HRESULT hr;
std::wstring dir_wide;
RETURN_IF_FAILED(GetCoreShimDirectory(dir_wide));
dir = ConvertWideToUtf8(dir_wide);
return S_OK;
}
}
bool TryLoadHostPolicy(const WCHAR* hostPolicyPath)
{
const WCHAR *hostpolicyName = W("hostpolicy.dll");
HMODULE hMod = ::GetModuleHandleW(hostpolicyName);
if (hMod != nullptr)
{
return true;
}
// Check if a hostpolicy exists and if it does, load it.
if (INVALID_FILE_ATTRIBUTES != ::GetFileAttributesW(hostPolicyPath))
{
hMod = ::LoadLibraryExW(hostPolicyPath, nullptr, LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR | LOAD_LIBRARY_SEARCH_DEFAULT_DIRS);
if (hMod == nullptr)
return false;
// Initialize the hostpolicy mock to a default state
using Set_corehost_resolve_component_dependencies_Values_fn = void(__cdecl *)(
int returnValue,
const WCHAR *assemblyPaths,
const WCHAR *nativeSearchPaths,
const WCHAR *resourceSearchPaths);
auto set_comp_depend_values = (Set_corehost_resolve_component_dependencies_Values_fn)
::GetProcAddress(hMod, "Set_corehost_resolve_component_dependencies_Values");
assert(set_comp_depend_values != nullptr);
set_comp_depend_values(0, W(""), W(""), W(""));
}
return true;
}
HRESULT coreclr::GetCoreClrInstance(_Outptr_ coreclr **instance, _In_opt_z_ const WCHAR *path)
{
if (s_CoreClrInstance != nullptr)
{
*instance = s_CoreClrInstance;
return S_FALSE;
}
const wchar_t* mockHostPolicyEnvVar = W("MOCK_HOSTPOLICY");
std::wstring hostPolicyPath;
if (TryGetEnvVar(mockHostPolicyEnvVar, hostPolicyPath))
{
if (!TryLoadHostPolicy(hostPolicyPath.c_str()))
{
return E_UNEXPECTED;
}
}
try
{
std::wstring pathLocal;
if (path == nullptr)
{
pathLocal = GetEnvVar(W("CORE_ROOT"));
}
else
{
pathLocal = { path };
}
pathLocal.append(W("\\coreclr.dll"));
AutoModule hmod = ::LoadLibraryExW(pathLocal.c_str() , nullptr, LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR | LOAD_LIBRARY_SEARCH_DEFAULT_DIRS);
if (hmod == nullptr)
return HRESULT_FROM_WIN32(::GetLastError());
s_CoreClrInstance = new coreclr{ std::move(hmod) };
}
catch (HRESULT hr)
{
return hr;
}
catch (const std::bad_alloc&)
{
return E_OUTOFMEMORY;
}
*instance = s_CoreClrInstance;
return S_OK;
}
HRESULT coreclr::CreateTpaList(_Inout_ std::string &tpaList, _In_opt_z_ const WCHAR *dir)
{
assert(tpaList.empty());
// Represents priority order
static const WCHAR * const tpaExtensions[] =
{
W(".ni.dll"),
W(".dll"),
W(".ni.exe"),
W(".exe"),
};
try
{
std::wstring w_dirLocal;
if (dir == nullptr)
{
w_dirLocal = GetEnvVar(W("CORE_ROOT"));
}
else
{
w_dirLocal = { dir };
}
std::string dirLocal = ConvertWideToUtf8(w_dirLocal);
w_dirLocal.append(W("\\*"));
std::set<std::wstring> addedAssemblies;
std::stringstream tpaStream;
// Walk the directory for each extension separately so assembly types
// are discovered in priority order - see above.
for (int extIndex = 0; extIndex < ARRAY_SIZE(tpaExtensions); extIndex++)
{
const WCHAR* ext = tpaExtensions[extIndex];
size_t extLength = ::wcslen(ext);
WIN32_FIND_DATAW ffd;
AutoFindFile sh = ::FindFirstFileW(w_dirLocal.c_str(), &ffd);
if (sh == nullptr)
break;
// For all entries in the directory
do
{
// Only examine non-directory entries
if (!(ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY))
{
std::wstring filename{ ffd.cFileName };
// Check if the extension matches
int extPos = static_cast<int>(filename.length() - extLength);
if ((extPos <= 0) || (filename.compare(extPos, extLength, ext) != 0))
{
continue;
}
std::wstring filenameWithoutExt{ filename.substr(0, extPos) };
// Only one type of a particular assembly instance should be inserted
// See extension list above.
if (addedAssemblies.find(filenameWithoutExt) == std::end(addedAssemblies))
{
addedAssemblies.insert(std::move(filenameWithoutExt));
std::string filename_utf8 = ConvertWideToUtf8(filename);
tpaStream << dirLocal << "\\" << filename_utf8 << ";";
}
}
} while (::FindNextFileW(sh, &ffd) != FALSE);
}
tpaList = tpaStream.str();
}
catch (HRESULT hr)
{
return hr;
}
catch (const std::bad_alloc&)
{
return E_OUTOFMEMORY;
}
return S_OK;
}
coreclr::coreclr(_Inout_ AutoModule hmod)
: _hmod{ std::move(hmod) }
, _attached{ false }
, _clrInst{ nullptr }
, _appDomainId{ std::numeric_limits<uint32_t>::max() }
{
_initialize = (decltype(_initialize))::GetProcAddress(_hmod, "coreclr_initialize");
assert(_initialize != nullptr);
_create_delegate = (decltype(_create_delegate))::GetProcAddress(_hmod, "coreclr_create_delegate");
assert(_create_delegate != nullptr);
_shutdown = (decltype(_shutdown))::GetProcAddress(_hmod, "coreclr_shutdown");
assert(_shutdown != nullptr);
}
coreclr::~coreclr()
{
if (_clrInst != nullptr && !_attached)
{
HRESULT hr = _shutdown(_clrInst, _appDomainId);
assert(SUCCEEDED(hr));
(void)hr;
}
}
HRESULT coreclr::Initialize(
_In_ int propertyCount,
_In_reads_(propertCount) const char **keys,
_In_reads_(propertCount) const char **values,
_In_opt_z_ const char *appDomainName)
{
if (_clrInst != nullptr)
return __HRESULT_FROM_WIN32(ERROR_ALREADY_EXISTS);
if (appDomainName == nullptr)
appDomainName = "CoreShim";
HRESULT hr;
// Check if this is hosted scenario - launched via CoreRun.exe
HMODULE mod = ::GetModuleHandleW(W("CoreRun.exe"));
if (mod != NULL)
{
using GetCurrentClrDetailsFunc = HRESULT(__cdecl *)(void **clrInstance, unsigned int *appDomainId);
auto getCurrentClrDetails = (GetCurrentClrDetailsFunc)::GetProcAddress(mod, "GetCurrentClrDetails");
RETURN_IF_FAILED(getCurrentClrDetails(&_clrInst, &_appDomainId));
if (_clrInst != nullptr)
{
_attached = true;
return S_OK;
}
}
try
{
const std::wstring exePathW = GetExePath();
const std::string exePath = ConvertWideToUtf8(exePathW);
RETURN_IF_FAILED(_initialize(exePath.c_str(), appDomainName, propertyCount, keys, values, &_clrInst, &_appDomainId));
}
catch (const std::bad_alloc&)
{
return E_OUTOFMEMORY;
}
return S_OK;
}
HRESULT coreclr::CreateDelegate(
_In_z_ const char *assembly,
_In_z_ const char *type,
_In_z_ const char *method,
_Out_ void **del)
{
if (_clrInst == nullptr)
return E_NOT_VALID_STATE;
HRESULT hr;
RETURN_IF_FAILED(_create_delegate(_clrInst, _appDomainId, assembly, type, method, del));
return S_OK;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "CoreShim.h"
#include <set>
#include <sstream>
#include <vector>
#include <mutex>
namespace
{
template<typename CT>
struct PathBuffer
{
PathBuffer()
: DefBuffer{}
, Buf{ DefBuffer }
, Len{ ARRAY_SIZE(DefBuffer) }
{ }
void SetLength(_In_ DWORD len)
{
if (len > Len)
{
Buf = BigBuffer.data();
Len = static_cast<DWORD>(BigBuffer.size());
}
}
void ExpandBuffer(_In_ DWORD factor = 2)
{
SetLength(Len * factor);
}
operator DWORD()
{
return Len;
}
operator CT *()
{
return Buf;
}
CT DefBuffer[MAX_PATH];
std::vector<CT> BigBuffer;
CT *Buf;
DWORD Len;
};
std::wstring GetExePath()
{
PathBuffer<WCHAR> buffer;
DWORD len = ::GetModuleFileNameW(nullptr, buffer, buffer);
while (::GetLastError() == ERROR_INSUFFICIENT_BUFFER)
{
buffer.ExpandBuffer();
len = ::GetModuleFileNameW(nullptr, buffer, buffer);
}
return std::wstring{ buffer.Buf, buffer.Buf + len };
}
bool TryGetEnvVar(_In_z_ const WCHAR* env, _Inout_ std::wstring& value)
{
DWORD len = ::GetEnvironmentVariableW(env, nullptr, 0);
if (len == 0)
return false;
PathBuffer<WCHAR> buffer;
buffer.SetLength(len);
(void)::GetEnvironmentVariableW(env, buffer, buffer);
value = static_cast<WCHAR *>(buffer.Buf);
return true;
}
std::wstring GetEnvVar(_In_z_ const WCHAR *env)
{
std::wstring value;
if (!TryGetEnvVar(env, value))
{
throw __HRESULT_FROM_WIN32(ERROR_ENVVAR_NOT_FOUND);
}
return value;
}
std::string ConvertWideToUtf8(_In_ const std::wstring &wide)
{
// [TODO] Properly convert to UTF-8
std::string narrow;
for (WCHAR p : wide)
narrow.push_back(static_cast<CHAR>(p));
return narrow;
}
coreclr *s_CoreClrInstance;
}
namespace Utility
{
HRESULT TryGetEnvVar(_In_z_ const WCHAR *env, _Inout_ std::wstring &envVar)
{
try
{
envVar = GetEnvVar(env);
}
catch (HRESULT hr)
{
return hr;
}
return S_OK;
}
HRESULT GetCoreShimDirectory(_Inout_ std::wstring &dir)
{
HMODULE hModule;
BOOL res = ::GetModuleHandleExW(
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCWSTR>(&TryGetEnvVar),
&hModule);
if (res == FALSE)
return HRESULT_FROM_WIN32(::GetLastError());
std::wstring path;
size_t dwModuleFileName = MAX_PATH / 2;
do
{
path.resize(dwModuleFileName * 2);
dwModuleFileName = GetModuleFileNameW(hModule, (LPWSTR)path.data(), static_cast<DWORD>(path.size()));
} while (dwModuleFileName == path.size());
if (dwModuleFileName == 0)
return HRESULT_FROM_WIN32(::GetLastError());
size_t idx = path.find_last_of(W('\\'));
if (idx == std::wstring::npos)
return E_UNEXPECTED;
path.resize(idx + 1);
dir = std::move(path);
return S_OK;
}
HRESULT GetCoreShimDirectory(_Inout_ std::string &dir)
{
HRESULT hr;
std::wstring dir_wide;
RETURN_IF_FAILED(GetCoreShimDirectory(dir_wide));
dir = ConvertWideToUtf8(dir_wide);
return S_OK;
}
}
bool TryLoadHostPolicy(const WCHAR* hostPolicyPath)
{
const WCHAR *hostpolicyName = W("hostpolicy.dll");
HMODULE hMod = ::GetModuleHandleW(hostpolicyName);
if (hMod != nullptr)
{
return true;
}
// Check if a hostpolicy exists and if it does, load it.
if (INVALID_FILE_ATTRIBUTES != ::GetFileAttributesW(hostPolicyPath))
{
hMod = ::LoadLibraryExW(hostPolicyPath, nullptr, LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR | LOAD_LIBRARY_SEARCH_DEFAULT_DIRS);
if (hMod == nullptr)
return false;
// Initialize the hostpolicy mock to a default state
using Set_corehost_resolve_component_dependencies_Values_fn = void(__cdecl *)(
int returnValue,
const WCHAR *assemblyPaths,
const WCHAR *nativeSearchPaths,
const WCHAR *resourceSearchPaths);
auto set_comp_depend_values = (Set_corehost_resolve_component_dependencies_Values_fn)
::GetProcAddress(hMod, "Set_corehost_resolve_component_dependencies_Values");
assert(set_comp_depend_values != nullptr);
set_comp_depend_values(0, W(""), W(""), W(""));
}
return true;
}
HRESULT coreclr::GetCoreClrInstance(_Outptr_ coreclr **instance, _In_opt_z_ const WCHAR *path)
{
if (s_CoreClrInstance != nullptr)
{
*instance = s_CoreClrInstance;
return S_FALSE;
}
const wchar_t* mockHostPolicyEnvVar = W("MOCK_HOSTPOLICY");
std::wstring hostPolicyPath;
if (TryGetEnvVar(mockHostPolicyEnvVar, hostPolicyPath))
{
if (!TryLoadHostPolicy(hostPolicyPath.c_str()))
{
return E_UNEXPECTED;
}
}
try
{
std::wstring pathLocal;
if (path == nullptr)
{
pathLocal = GetEnvVar(W("CORE_ROOT"));
}
else
{
pathLocal = { path };
}
pathLocal.append(W("\\coreclr.dll"));
AutoModule hmod = ::LoadLibraryExW(pathLocal.c_str() , nullptr, LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR | LOAD_LIBRARY_SEARCH_DEFAULT_DIRS);
if (hmod == nullptr)
return HRESULT_FROM_WIN32(::GetLastError());
s_CoreClrInstance = new coreclr{ std::move(hmod) };
}
catch (HRESULT hr)
{
return hr;
}
catch (const std::bad_alloc&)
{
return E_OUTOFMEMORY;
}
*instance = s_CoreClrInstance;
return S_OK;
}
HRESULT coreclr::CreateTpaList(_Inout_ std::string &tpaList, _In_opt_z_ const WCHAR *dir)
{
assert(tpaList.empty());
// Represents priority order
static const WCHAR * const tpaExtensions[] =
{
W(".ni.dll"),
W(".dll"),
W(".ni.exe"),
W(".exe"),
};
try
{
std::wstring w_dirLocal;
if (dir == nullptr)
{
w_dirLocal = GetEnvVar(W("CORE_ROOT"));
}
else
{
w_dirLocal = { dir };
}
std::string dirLocal = ConvertWideToUtf8(w_dirLocal);
w_dirLocal.append(W("\\*"));
std::set<std::wstring> addedAssemblies;
std::stringstream tpaStream;
// Walk the directory for each extension separately so assembly types
// are discovered in priority order - see above.
for (int extIndex = 0; extIndex < ARRAY_SIZE(tpaExtensions); extIndex++)
{
const WCHAR* ext = tpaExtensions[extIndex];
size_t extLength = ::wcslen(ext);
WIN32_FIND_DATAW ffd;
AutoFindFile sh = ::FindFirstFileW(w_dirLocal.c_str(), &ffd);
if (sh == nullptr)
break;
// For all entries in the directory
do
{
// Only examine non-directory entries
if (!(ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY))
{
std::wstring filename{ ffd.cFileName };
// Check if the extension matches
int extPos = static_cast<int>(filename.length() - extLength);
if ((extPos <= 0) || (filename.compare(extPos, extLength, ext) != 0))
{
continue;
}
std::wstring filenameWithoutExt{ filename.substr(0, extPos) };
// Only one type of a particular assembly instance should be inserted
// See extension list above.
if (addedAssemblies.find(filenameWithoutExt) == std::end(addedAssemblies))
{
addedAssemblies.insert(std::move(filenameWithoutExt));
std::string filename_utf8 = ConvertWideToUtf8(filename);
tpaStream << dirLocal << "\\" << filename_utf8 << ";";
}
}
} while (::FindNextFileW(sh, &ffd) != FALSE);
}
tpaList = tpaStream.str();
}
catch (HRESULT hr)
{
return hr;
}
catch (const std::bad_alloc&)
{
return E_OUTOFMEMORY;
}
return S_OK;
}
coreclr::coreclr(_Inout_ AutoModule hmod)
: _hmod{ std::move(hmod) }
, _attached{ false }
, _clrInst{ nullptr }
, _appDomainId{ std::numeric_limits<uint32_t>::max() }
{
_initialize = (decltype(_initialize))::GetProcAddress(_hmod, "coreclr_initialize");
assert(_initialize != nullptr);
_create_delegate = (decltype(_create_delegate))::GetProcAddress(_hmod, "coreclr_create_delegate");
assert(_create_delegate != nullptr);
_shutdown = (decltype(_shutdown))::GetProcAddress(_hmod, "coreclr_shutdown");
assert(_shutdown != nullptr);
}
coreclr::~coreclr()
{
if (_clrInst != nullptr && !_attached)
{
HRESULT hr = _shutdown(_clrInst, _appDomainId);
assert(SUCCEEDED(hr));
(void)hr;
}
}
HRESULT coreclr::Initialize(
_In_ int propertyCount,
_In_reads_(propertCount) const char **keys,
_In_reads_(propertCount) const char **values,
_In_opt_z_ const char *appDomainName)
{
if (_clrInst != nullptr)
return __HRESULT_FROM_WIN32(ERROR_ALREADY_EXISTS);
if (appDomainName == nullptr)
appDomainName = "CoreShim";
HRESULT hr;
// Check if this is hosted scenario - launched via CoreRun.exe
HMODULE mod = ::GetModuleHandleW(W("CoreRun.exe"));
if (mod != NULL)
{
using GetCurrentClrDetailsFunc = HRESULT(__cdecl *)(void **clrInstance, unsigned int *appDomainId);
auto getCurrentClrDetails = (GetCurrentClrDetailsFunc)::GetProcAddress(mod, "GetCurrentClrDetails");
RETURN_IF_FAILED(getCurrentClrDetails(&_clrInst, &_appDomainId));
if (_clrInst != nullptr)
{
_attached = true;
return S_OK;
}
}
try
{
const std::wstring exePathW = GetExePath();
const std::string exePath = ConvertWideToUtf8(exePathW);
RETURN_IF_FAILED(_initialize(exePath.c_str(), appDomainName, propertyCount, keys, values, &_clrInst, &_appDomainId));
}
catch (const std::bad_alloc&)
{
return E_OUTOFMEMORY;
}
return S_OK;
}
HRESULT coreclr::CreateDelegate(
_In_z_ const char *assembly,
_In_z_ const char *type,
_In_z_ const char *method,
_Out_ void **del)
{
if (_clrInst == nullptr)
return E_NOT_VALID_STATE;
HRESULT hr;
RETURN_IF_FAILED(_create_delegate(_clrInst, _appDomainId, assembly, type, method, del));
return S_OK;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/tests/Interop/PInvoke/BestFitMapping/LPStr/BestFitMappingNative.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <xplatform.h>
typedef struct TLPStr_Test_Struct
{
LPSTR pStr;
} LPStr_Test_Struct;
typedef struct TLPStr_Test_Class
{
LPSTR pStr;
} LPStr_Test_Class;
typedef struct TLPStrTestStructOfArrays
{
LPSTR pStr1;
LPSTR pStr2;
} LPStrTestStructOfArrays;
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_String(LPSTR pStr)
{
printf ("xx %s \n", pStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_String(LPSTR* ppStr)
{
printf ("yy %s \n", *ppStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_String(LPSTR* ppStr)
{
printf ("zz %s \n", *ppStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_StringBuilder(LPSTR pStr)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_StringBuilder(LPSTR* ppStr)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_StringBuilder(LPSTR* ppStr)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Struct_String (LPStr_Test_Struct strStruct)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_Struct_String (LPStr_Test_Struct* pSstrStruct)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_Struct_String (LPStr_Test_Struct* pStrStruct)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Array_String (LPSTR str[])
{
printf ("%s \n", str[0]);
printf ("%s \n", str[1]);
printf ("%s \n", str[2]);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_Array_String (LPSTR* str[])
{
printf ("%s \n", (*str)[0]);
printf ("%s \n", (*str)[1]);
printf ("%s \n", (*str)[2]);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_Array_String (LPSTR* str[])
{
printf ("%s \n", (*str)[0]);
printf ("%s \n", (*str)[1]);
printf ("%s \n", (*str)[2]);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Class_String (LPStr_Test_Class strClass)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_Class_String (LPStr_Test_Class* pSstrClass)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_Class_String (LPStr_Test_Class* pStrClass)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Array_Struct (LPStr_Test_Struct str[])
{
printf ("** %s \n", str[0].pStr);
printf ("** %s \n", str[1].pStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_Array_Struct (LPStr_Test_Struct* str[])
{
printf ("++ %s \n", (*str)[0].pStr);
printf ("++ %s \n", (*str)[1].pStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_Array_Struct (LPStr_Test_Struct* str[])
{
printf ("-- %s \n", (*str)[0].pStr);
printf ("-- %s \n", (*str)[1].pStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Struct_String_nothrow (LPStr_Test_Struct strStruct)
{
return TRUE;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <xplatform.h>
typedef struct TLPStr_Test_Struct
{
LPSTR pStr;
} LPStr_Test_Struct;
typedef struct TLPStr_Test_Class
{
LPSTR pStr;
} LPStr_Test_Class;
typedef struct TLPStrTestStructOfArrays
{
LPSTR pStr1;
LPSTR pStr2;
} LPStrTestStructOfArrays;
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_String(LPSTR pStr)
{
printf ("xx %s \n", pStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_String(LPSTR* ppStr)
{
printf ("yy %s \n", *ppStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_String(LPSTR* ppStr)
{
printf ("zz %s \n", *ppStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_StringBuilder(LPSTR pStr)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_StringBuilder(LPSTR* ppStr)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_StringBuilder(LPSTR* ppStr)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Struct_String (LPStr_Test_Struct strStruct)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_Struct_String (LPStr_Test_Struct* pSstrStruct)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_Struct_String (LPStr_Test_Struct* pStrStruct)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Array_String (LPSTR str[])
{
printf ("%s \n", str[0]);
printf ("%s \n", str[1]);
printf ("%s \n", str[2]);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_Array_String (LPSTR* str[])
{
printf ("%s \n", (*str)[0]);
printf ("%s \n", (*str)[1]);
printf ("%s \n", (*str)[2]);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_Array_String (LPSTR* str[])
{
printf ("%s \n", (*str)[0]);
printf ("%s \n", (*str)[1]);
printf ("%s \n", (*str)[2]);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Class_String (LPStr_Test_Class strClass)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_Class_String (LPStr_Test_Class* pSstrClass)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_Class_String (LPStr_Test_Class* pStrClass)
{
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Array_Struct (LPStr_Test_Struct str[])
{
printf ("** %s \n", str[0].pStr);
printf ("** %s \n", str[1].pStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InByRef_Array_Struct (LPStr_Test_Struct* str[])
{
printf ("++ %s \n", (*str)[0].pStr);
printf ("++ %s \n", (*str)[1].pStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_InOutByRef_Array_Struct (LPStr_Test_Struct* str[])
{
printf ("-- %s \n", (*str)[0].pStr);
printf ("-- %s \n", (*str)[1].pStr);
return TRUE;
}
extern "C" bool DLL_EXPORT STDMETHODCALLTYPE LPStrBuffer_In_Struct_String_nothrow (LPStr_Test_Struct strStruct)
{
return TRUE;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/src/libunwind/include/tdep-tilegx/dwarf-config.h | /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2014 Tilera Corp.
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef dwarf_config_h
#define dwarf_config_h
/* This is FIRST_PSEUDO_REGISTER in GCC, since DWARF_FRAME_REGISTERS is not
explicitly defined. */
#define DWARF_NUM_PRESERVED_REGS 188
#define DWARF_REGNUM_MAP_LENGTH (56 + 2)
/* Return TRUE if the ADDR_SPACE uses big-endian byte-order. */
#define dwarf_is_big_endian(addr_space) ((addr_space)->big_endian)
/* Convert a pointer to a dwarf_cursor structure to a pointer to
unw_cursor_t. */
#define dwarf_to_cursor(c) ((unw_cursor_t *) (c))
typedef struct dwarf_loc
{
unw_word_t val;
#ifndef UNW_LOCAL_ONLY
unw_word_t type; /* see DWARF_LOC_TYPE_* macros. */
#endif
} dwarf_loc_t;
#endif /* dwarf_config_h */
| /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2014 Tilera Corp.
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef dwarf_config_h
#define dwarf_config_h
/* This is FIRST_PSEUDO_REGISTER in GCC, since DWARF_FRAME_REGISTERS is not
explicitly defined. */
#define DWARF_NUM_PRESERVED_REGS 188
#define DWARF_REGNUM_MAP_LENGTH (56 + 2)
/* Return TRUE if the ADDR_SPACE uses big-endian byte-order. */
#define dwarf_is_big_endian(addr_space) ((addr_space)->big_endian)
/* Convert a pointer to a dwarf_cursor structure to a pointer to
unw_cursor_t. */
#define dwarf_to_cursor(c) ((unw_cursor_t *) (c))
typedef struct dwarf_loc
{
unw_word_t val;
#ifndef UNW_LOCAL_ONLY
unw_word_t type; /* see DWARF_LOC_TYPE_* macros. */
#endif
} dwarf_loc_t;
#endif /* dwarf_config_h */
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/src/libunwind/include/tdep-tilegx/libunwind_i.h | /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2014 Tilera Corp.
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef TILEGX_LIBUNWIND_I_H
#define TILEGX_LIBUNWIND_I_H
/* Target-dependent definitions that are internal to libunwind but need
to be shared with target-independent code. */
#include <stdlib.h>
#include <libunwind.h>
#include <stdatomic.h>
# include "elf64.h"
#include "mempool.h"
#include "dwarf.h"
typedef struct
{
/* no Tilegx-specific fast trace */
} unw_tdep_frame_t;
struct unw_addr_space
{
struct unw_accessors acc;
int big_endian;
tilegx_abi_t abi;
unsigned int addr_size;
unw_caching_policy_t caching_policy;
_Atomic uint32_t cache_generation;
unw_word_t dyn_generation; /* see dyn-common.h */
unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */
struct dwarf_rs_cache global_cache;
struct unw_debug_frame_list *debug_frames;
};
#define tdep_big_endian(as) ((as)->big_endian)
struct cursor
{
struct dwarf_cursor dwarf; /* must be first */
unw_word_t sigcontext_addr;
unw_word_t sigcontext_sp;
unw_word_t sigcontext_pc;
};
#define DWARF_GET_LOC(l) ((l).val)
#ifndef UNW_REMOTE_ONLY
typedef long tilegx_reg_t;
#endif
#ifdef UNW_LOCAL_ONLY
#define DWARF_NULL_LOC DWARF_LOC (0, 0)
#define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0)
#define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r) })
#define DWARF_IS_REG_LOC(l) 0
#define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) (intptr_t) \
tdep_uc_addr((c)->as_arg, (r)), 0))
#define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0)
#define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) (intptr_t) \
tdep_uc_addr((c)->as_arg, (r)), 0))
/* Tilegx has no FP. */
static inline int
dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val)
{
Debug (1, "Tielgx has no fp!\n");
abort();
return 0;
}
static inline int
dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val)
{
Debug (1, "Tielgx has no fp!\n");
abort();
return 0;
}
static inline int
dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val)
{
if (!DWARF_GET_LOC (loc))
return -1;
*val = *(tilegx_reg_t *) (intptr_t) DWARF_GET_LOC (loc);
return 0;
}
static inline int
dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val)
{
if (!DWARF_GET_LOC (loc))
return -1;
*(tilegx_reg_t *) (intptr_t) DWARF_GET_LOC (loc) = val;
return 0;
}
#else /* !UNW_LOCAL_ONLY */
#define DWARF_LOC_TYPE_FP (1 << 0)
#define DWARF_LOC_TYPE_REG (1 << 1)
#define DWARF_NULL_LOC DWARF_LOC (0, 0)
#define DWARF_IS_NULL_LOC(l) \
({ dwarf_loc_t _l = (l); _l.val == 0 && _l.type == 0; })
#define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) })
#define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0)
#define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0)
#define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG)
#define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0)
#define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \
| DWARF_LOC_TYPE_FP))
/* TILEGX has no fp. */
static inline int
dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val)
{
Debug (1, "Tielgx has no fp!\n");
abort();
return 0;
}
static inline int
dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val)
{
Debug (1, "Tielgx has no fp!\n");
abort();
return 0;
}
static inline int
dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val)
{
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
/* If a code-generator were to save a value of type unw_word_t in a
floating-point register, we would have to support this case. I
suppose it could happen with MMX registers, but does it really
happen? */
assert (!DWARF_IS_FP_LOC (loc));
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
}
static inline int
dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val)
{
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
/* If a code-generator were to save a value of type unw_word_t in a
floating-point register, we would have to support this case. I
suppose it could happen with MMX registers, but does it really
happen? */
assert (!DWARF_IS_FP_LOC (loc));
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
}
#endif /* !UNW_LOCAL_ONLY */
#define tdep_getcontext_trace unw_getcontext
#define tdep_init_done UNW_OBJ(init_done)
#define tdep_needs_initialization UNW_OBJ(needs_initialization)
#define tdep_init UNW_OBJ(init)
/* Platforms that support UNW_INFO_FORMAT_TABLE need to define
tdep_search_unwind_table. */
#define tdep_search_unwind_table dwarf_search_unwind_table
#define tdep_find_unwind_table dwarf_find_unwind_table
#define tdep_uc_addr UNW_ARCH_OBJ(uc_addr)
#define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image)
#define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path)
#define tdep_access_reg UNW_OBJ(access_reg)
#define tdep_access_fpreg UNW_OBJ(access_fpreg)
#define tdep_fetch_frame(c,ip,n) do {} while(0)
#define tdep_cache_frame(c) 0
#define tdep_reuse_frame(c,frame) do {} while(0)
#define tdep_stash_frame(c,rs) do {} while(0)
#define tdep_trace(cur,addr,n) (-UNW_ENOINFO)
#ifdef UNW_LOCAL_ONLY
#define tdep_find_proc_info(c,ip,n) \
dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \
(c)->as_arg)
#define tdep_put_unwind_info(as,pi,arg) \
dwarf_put_unwind_info((as), (pi), (arg))
#else
#define tdep_find_proc_info(c,ip,n) \
(*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \
(c)->as_arg)
#define tdep_put_unwind_info(as,pi,arg) \
(*(as)->acc.put_unwind_info)((as), (pi), (arg))
#endif
#define tdep_get_as(c) ((c)->dwarf.as)
#define tdep_get_as_arg(c) ((c)->dwarf.as_arg)
#define tdep_get_ip(c) ((c)->dwarf.ip)
extern atomic_bool tdep_init_done;
extern void tdep_init (void);
extern int tdep_search_unwind_table (unw_addr_space_t as,
unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi,
int need_unwind_info,
void *arg);
extern void *tdep_uc_addr (ucontext_t *uc, int reg);
extern int tdep_get_elf_image (struct elf_image *ei,
pid_t pid, unw_word_t ip,
unsigned long *segbase,
unsigned long *mapoff,
char *path, size_t pathlen);
extern void tdep_get_exe_image_path (char *path);
extern int tdep_access_reg (struct cursor *c,
unw_regnum_t reg,
unw_word_t *valp,
int write);
extern int tdep_access_fpreg (struct cursor *c,
unw_regnum_t reg,
unw_fpreg_t *valp,
int write);
#endif /* TILEGX_LIBUNWIND_I_H */
| /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2014 Tilera Corp.
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef TILEGX_LIBUNWIND_I_H
#define TILEGX_LIBUNWIND_I_H
/* Target-dependent definitions that are internal to libunwind but need
to be shared with target-independent code. */
#include <stdlib.h>
#include <libunwind.h>
#include <stdatomic.h>
# include "elf64.h"
#include "mempool.h"
#include "dwarf.h"
typedef struct
{
/* no Tilegx-specific fast trace */
} unw_tdep_frame_t;
struct unw_addr_space
{
struct unw_accessors acc;
int big_endian;
tilegx_abi_t abi;
unsigned int addr_size;
unw_caching_policy_t caching_policy;
_Atomic uint32_t cache_generation;
unw_word_t dyn_generation; /* see dyn-common.h */
unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */
struct dwarf_rs_cache global_cache;
struct unw_debug_frame_list *debug_frames;
};
#define tdep_big_endian(as) ((as)->big_endian)
struct cursor
{
struct dwarf_cursor dwarf; /* must be first */
unw_word_t sigcontext_addr;
unw_word_t sigcontext_sp;
unw_word_t sigcontext_pc;
};
#define DWARF_GET_LOC(l) ((l).val)
#ifndef UNW_REMOTE_ONLY
typedef long tilegx_reg_t;
#endif
#ifdef UNW_LOCAL_ONLY
#define DWARF_NULL_LOC DWARF_LOC (0, 0)
#define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0)
#define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r) })
#define DWARF_IS_REG_LOC(l) 0
#define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) (intptr_t) \
tdep_uc_addr((c)->as_arg, (r)), 0))
#define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0)
#define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) (intptr_t) \
tdep_uc_addr((c)->as_arg, (r)), 0))
/* Tilegx has no FP. */
static inline int
dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val)
{
Debug (1, "Tielgx has no fp!\n");
abort();
return 0;
}
static inline int
dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val)
{
Debug (1, "Tielgx has no fp!\n");
abort();
return 0;
}
static inline int
dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val)
{
if (!DWARF_GET_LOC (loc))
return -1;
*val = *(tilegx_reg_t *) (intptr_t) DWARF_GET_LOC (loc);
return 0;
}
static inline int
dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val)
{
if (!DWARF_GET_LOC (loc))
return -1;
*(tilegx_reg_t *) (intptr_t) DWARF_GET_LOC (loc) = val;
return 0;
}
#else /* !UNW_LOCAL_ONLY */
#define DWARF_LOC_TYPE_FP (1 << 0)
#define DWARF_LOC_TYPE_REG (1 << 1)
#define DWARF_NULL_LOC DWARF_LOC (0, 0)
#define DWARF_IS_NULL_LOC(l) \
({ dwarf_loc_t _l = (l); _l.val == 0 && _l.type == 0; })
#define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) })
#define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0)
#define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0)
#define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG)
#define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0)
#define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \
| DWARF_LOC_TYPE_FP))
/* TILEGX has no fp. */
static inline int
dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val)
{
Debug (1, "Tielgx has no fp!\n");
abort();
return 0;
}
static inline int
dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val)
{
Debug (1, "Tielgx has no fp!\n");
abort();
return 0;
}
static inline int
dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val)
{
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
/* If a code-generator were to save a value of type unw_word_t in a
floating-point register, we would have to support this case. I
suppose it could happen with MMX registers, but does it really
happen? */
assert (!DWARF_IS_FP_LOC (loc));
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
}
static inline int
dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val)
{
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
/* If a code-generator were to save a value of type unw_word_t in a
floating-point register, we would have to support this case. I
suppose it could happen with MMX registers, but does it really
happen? */
assert (!DWARF_IS_FP_LOC (loc));
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
}
#endif /* !UNW_LOCAL_ONLY */
#define tdep_getcontext_trace unw_getcontext
#define tdep_init_done UNW_OBJ(init_done)
#define tdep_needs_initialization UNW_OBJ(needs_initialization)
#define tdep_init UNW_OBJ(init)
/* Platforms that support UNW_INFO_FORMAT_TABLE need to define
tdep_search_unwind_table. */
#define tdep_search_unwind_table dwarf_search_unwind_table
#define tdep_find_unwind_table dwarf_find_unwind_table
#define tdep_uc_addr UNW_ARCH_OBJ(uc_addr)
#define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image)
#define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path)
#define tdep_access_reg UNW_OBJ(access_reg)
#define tdep_access_fpreg UNW_OBJ(access_fpreg)
#define tdep_fetch_frame(c,ip,n) do {} while(0)
#define tdep_cache_frame(c) 0
#define tdep_reuse_frame(c,frame) do {} while(0)
#define tdep_stash_frame(c,rs) do {} while(0)
#define tdep_trace(cur,addr,n) (-UNW_ENOINFO)
#ifdef UNW_LOCAL_ONLY
#define tdep_find_proc_info(c,ip,n) \
dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \
(c)->as_arg)
#define tdep_put_unwind_info(as,pi,arg) \
dwarf_put_unwind_info((as), (pi), (arg))
#else
#define tdep_find_proc_info(c,ip,n) \
(*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \
(c)->as_arg)
#define tdep_put_unwind_info(as,pi,arg) \
(*(as)->acc.put_unwind_info)((as), (pi), (arg))
#endif
#define tdep_get_as(c) ((c)->dwarf.as)
#define tdep_get_as_arg(c) ((c)->dwarf.as_arg)
#define tdep_get_ip(c) ((c)->dwarf.ip)
extern atomic_bool tdep_init_done;
extern void tdep_init (void);
extern int tdep_search_unwind_table (unw_addr_space_t as,
unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi,
int need_unwind_info,
void *arg);
extern void *tdep_uc_addr (ucontext_t *uc, int reg);
extern int tdep_get_elf_image (struct elf_image *ei,
pid_t pid, unw_word_t ip,
unsigned long *segbase,
unsigned long *mapoff,
char *path, size_t pathlen);
extern void tdep_get_exe_image_path (char *path);
extern int tdep_access_reg (struct cursor *c,
unw_regnum_t reg,
unw_word_t *valp,
int write);
extern int tdep_access_fpreg (struct cursor *c,
unw_regnum_t reg,
unw_fpreg_t *valp,
int write);
#endif /* TILEGX_LIBUNWIND_I_H */
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/tools/superpmi/superpmi-shared/lwmlist.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// lwmlist.h - List of all LightWeightMap in MethodContext.
// To use, #define LWM(map, key, value) to something.
// If you need to distinguish DenseLightWeightMap, #define DENSELWM(map, value) as well.
//----------------------------------------------------------
#ifndef LWM
#error Define LWM before including this file.
#endif
// If the key is needed, then DENSELWM must be defined.
#ifndef DENSELWM
#define DENSELWM(map, value) LWM(map, this_is_an_error, value)
#endif
LWM(AllocPgoInstrumentationBySchema, DWORDLONG, Agnostic_AllocPgoInstrumentationBySchema)
LWM(GetPgoInstrumentationResults, DWORDLONG, Agnostic_GetPgoInstrumentationResults)
LWM(AppendClassName, Agnostic_AppendClassName, DWORD)
LWM(AreTypesEquivalent, DLDL, DWORD)
LWM(AsCorInfoType, DWORDLONG, DWORD)
LWM(CanAccessClass, Agnostic_CanAccessClassIn, Agnostic_CanAccessClassOut)
LWM(CanAccessFamily, DLDL, DWORD)
LWM(CanCast, DLDL, DWORD)
LWM(CanGetCookieForPInvokeCalliSig, CanGetCookieForPInvokeCalliSigValue, DWORD)
LWM(CanGetVarArgsHandle, CanGetVarArgsHandleValue, DWORD)
LWM(CanInline, DLDL, Agnostic_CanInline)
LWM(CanInlineTypeCheck, DLD, DWORD)
LWM(CanTailCall, Agnostic_CanTailCall, DWORD)
LWM(CheckMethodModifier, Agnostic_CheckMethodModifier, DWORD)
LWM(CompareTypesForCast, DLDL, DWORD)
LWM(CompareTypesForEquality, DLDL, DWORD)
LWM(CompileMethod, DWORD, Agnostic_CompileMethod)
LWM(ConstructStringLiteral, DLD, DLD)
LWM(ConvertPInvokeCalliToCall, DLD, DWORDLONG)
LWM(EmbedClassHandle, DWORDLONG, DLDL)
LWM(EmbedFieldHandle, DWORDLONG, DLDL)
LWM(EmbedGenericHandle, Agnostic_EmbedGenericHandle, Agnostic_CORINFO_GENERICHANDLE_RESULT)
LWM(EmbedMethodHandle, DWORDLONG, DLDL)
LWM(EmbedModuleHandle, DWORDLONG, DLDL)
DENSELWM(EmptyStringLiteral, DLD)
DENSELWM(ErrorList, DWORD)
LWM(FilterException, DWORD, DWORD)
LWM(FindCallSiteSig, Agnostic_FindCallSiteSig, Agnostic_CORINFO_SIG_INFO)
LWM(FindNameOfToken, DLD, DLD)
LWM(FindSig, Agnostic_FindSig, Agnostic_CORINFO_SIG_INFO)
LWM(GetAddressOfPInvokeTarget, DWORDLONG, DLD)
LWM(GetAddrOfCaptureThreadGlobal, DWORD, DLDL)
LWM(GetArgClass, Agnostic_GetArgClass_Key, Agnostic_GetArgClass_Value)
LWM(GetArgNext, DWORDLONG, DWORDLONG)
LWM(GetArgType, Agnostic_GetArgType_Key, Agnostic_GetArgType_Value)
LWM(GetArrayInitializationData, DLD, DWORDLONG)
LWM(GetArrayRank, DWORDLONG, DWORD)
LWM(GetArrayIntrinsicID, DWORDLONG, DWORD)
LWM(GetBoundaries, DWORDLONG, Agnostic_GetBoundaries)
LWM(GetBoxHelper, DWORDLONG, DWORD)
LWM(GetBuiltinClass, DWORD, DWORDLONG)
LWM(GetCallInfo, Agnostic_GetCallInfo, Agnostic_CORINFO_CALL_INFO)
LWM(GetCastingHelper, Agnostic_GetCastingHelper, DWORD)
LWM(GetChildType, DWORDLONG, DLD)
LWM(GetClassAlignmentRequirement, DLD, DWORD)
LWM(GetClassAttribs, DWORDLONG, DWORD)
LWM(GetClassDomainID, DWORDLONG, DLD)
LWM(GetClassGClayout, DWORDLONG, Agnostic_GetClassGClayout)
LWM(GetClassModuleIdForStatics, DWORDLONG, Agnostic_GetClassModuleIdForStatics)
LWM(GetClassName, DWORDLONG, DWORD)
LWM(GetClassNameFromMetadata, DLD, DD)
LWM(GetTypeInstantiationArgument, DWORDLONG, DWORDLONG)
LWM(GetClassNumInstanceFields, DWORDLONG, DWORD)
LWM(GetClassSize, DWORDLONG, DWORD)
LWM(GetHeapClassSize, DWORDLONG, DWORD)
LWM(CanAllocateOnStack, DWORDLONG, DWORD)
LWM(GetCookieForPInvokeCalliSig, GetCookieForPInvokeCalliSigValue, DLDL)
LWM(GetDefaultComparerClass, DWORDLONG, DWORDLONG)
LWM(GetDefaultEqualityComparerClass, DWORDLONG, DWORDLONG)
LWM(GetDelegateCtor, Agnostic_GetDelegateCtorIn, Agnostic_GetDelegateCtorOut)
LWM(GetEEInfo, DWORD, Agnostic_CORINFO_EE_INFO)
LWM(GetEHinfo, DLD, Agnostic_CORINFO_EH_CLAUSE)
LWM(GetFieldAddress, DWORDLONG, Agnostic_GetFieldAddress)
LWM(GetStaticFieldCurrentClass, DWORDLONG, Agnostic_GetStaticFieldCurrentClass)
LWM(GetFieldClass, DWORDLONG, DWORDLONG)
LWM(GetFieldInClass, DLD, DWORDLONG)
LWM(GetFieldInfo, Agnostic_GetFieldInfo, Agnostic_CORINFO_FIELD_INFO)
LWM(GetFieldName, DWORDLONG, DD)
LWM(GetFieldOffset, DWORDLONG, DWORD)
LWM(GetFieldThreadLocalStoreID, DWORDLONG, DLD)
LWM(GetFieldType, DLDL, DLD)
LWM(GetFunctionEntryPoint, DLD, DLD)
LWM(GetFunctionFixedEntryPoint, DWORDLONG, Agnostic_CORINFO_CONST_LOOKUP)
LWM(GetGSCookie, DWORD, DLDL)
LWM(GetHelperFtn, DWORD, DLDL)
LWM(GetHelperName, DWORD, DWORD)
LWM(GetHFAType, DWORDLONG, DWORD)
LWM(GetInlinedCallFrameVptr, DWORD, DLDL)
LWM(GetIntConfigValue, Agnostic_ConfigIntInfo, DWORD)
LWM(GetJitFlags, DWORD, DD)
LWM(GetJitTimeLogFilename, DWORD, DWORD)
LWM(GetJustMyCodeHandle, DWORDLONG, DLDL)
LWM(GetLazyStringLiteralHelper, DWORDLONG, DWORD)
LWM(GetLocationOfThisType, DWORDLONG, Agnostic_CORINFO_LOOKUP_KIND)
LWM(IsIntrinsic, DWORDLONG, DWORD)
LWM(GetMethodAttribs, DWORDLONG, DWORD)
LWM(GetClassModule, DWORDLONG, DWORDLONG)
LWM(GetModuleAssembly, DWORDLONG, DWORDLONG)
LWM(GetAssemblyName, DWORDLONG, DWORD)
LWM(GetMethodClass, DWORDLONG, DWORDLONG)
LWM(GetMethodModule, DWORDLONG, DWORDLONG)
LWM(GetMethodDefFromMethod, DWORDLONG, DWORD)
LWM(GetMethodHash, DWORDLONG, DWORD)
LWM(GetMethodInfo, DWORDLONG, Agnostic_GetMethodInfo)
LWM(GetMethodName, DLD, DD)
LWM(GetMethodNameFromMetadata, Agnostic_CORINFO_METHODNAME_TOKENin, Agnostic_CORINFO_METHODNAME_TOKENout)
LWM(GetMethodSig, DLDL, Agnostic_CORINFO_SIG_INFO)
LWM(GetMethodSync, DWORDLONG, DLDL)
LWM(GetMethodVTableOffset, DWORDLONG, DDD)
LWM(GetNewArrHelper, DWORDLONG, DWORD)
LWM(GetNewHelper, Agnostic_GetNewHelper, DD)
LWM(GetOSRInfo, DWORD, Agnostic_GetOSRInfo)
LWM(GetParentType, DWORDLONG, DWORDLONG)
LWM(GetProfilingHandle, DWORD, Agnostic_GetProfilingHandle)
LWM(GetReadyToRunHelper, GetReadyToRunHelper_TOKENin, GetReadyToRunHelper_TOKENout)
LWM(GetReadyToRunDelegateCtorHelper, GetReadyToRunDelegateCtorHelper_TOKENIn, Agnostic_CORINFO_LOOKUP)
LWM(GetRelocTypeHint, DWORDLONG, DWORD)
LWM(GetExpectedTargetArchitecture, DWORD, DWORD)
LWM(GetSharedCCtorHelper, DWORDLONG, DWORD)
LWM(GetStringConfigValue, DWORD, DWORD)
LWM(GetSystemVAmd64PassStructInRegisterDescriptor, DWORDLONG, Agnostic_GetSystemVAmd64PassStructInRegisterDescriptor)
LWM(GetLoongArch64PassStructInRegisterFlags, DWORDLONG, DWORD)
LWM(GetTailCallHelpers, Agnostic_GetTailCallHelpers, Agnostic_CORINFO_TAILCALL_HELPERS)
LWM(UpdateEntryPointForTailCall, Agnostic_CORINFO_CONST_LOOKUP, Agnostic_CORINFO_CONST_LOOKUP)
LWM(GetThreadTLSIndex, DWORD, DLD)
LWM(GetTokenTypeAsHandle, GetTokenTypeAsHandleValue, DWORDLONG)
LWM(GetTypeForBox, DWORDLONG, DWORDLONG)
LWM(GetTypeForPrimitiveValueClass, DWORDLONG, DWORD)
LWM(GetTypeForPrimitiveNumericClass, DWORDLONG, DWORD)
LWM(GetUnboxedEntry, DWORDLONG, DLD);
LWM(GetUnBoxHelper, DWORDLONG, DWORD)
LWM(GetVarArgsHandle, GetVarArgsHandleValue, DLDL)
LWM(GetVars, DWORDLONG, Agnostic_GetVars)
LWM(InitClass, Agnostic_InitClass, DWORD)
LWM(IsCompatibleDelegate, Agnostic_IsCompatibleDelegate, DD)
LWM(IsDelegateCreationAllowed, DLDL, DWORD)
LWM(IsFieldStatic, DWORDLONG, DWORD)
LWM(IsIntrinsicType, DWORDLONG, DWORD)
LWM(IsSDArray, DWORDLONG, DWORD)
LWM(IsValidStringRef, DLD, DWORD)
LWM(GetStringLiteral, DLD, DD)
LWM(IsValidToken, DLD, DWORD)
LWM(IsValueClass, DWORDLONG, DWORD)
LWM(MergeClasses, DLDL, DWORDLONG)
LWM(IsMoreSpecificType, DLDL, DWORD)
LWM(PInvokeMarshalingRequired, MethodOrSigInfoValue, DWORD)
LWM(ResolveToken, Agnostic_CORINFO_RESOLVED_TOKENin, ResolveTokenValue)
LWM(ResolveVirtualMethod, Agnostic_ResolveVirtualMethodKey, Agnostic_ResolveVirtualMethodResult)
LWM(TryResolveToken, Agnostic_CORINFO_RESOLVED_TOKENin, TryResolveTokenValue)
LWM(SatisfiesClassConstraints, DWORDLONG, DWORD)
LWM(SatisfiesMethodConstraints, DLDL, DWORD)
LWM(GetUnmanagedCallConv, MethodOrSigInfoValue, DD)
LWM(DoesFieldBelongToClass, DLDL, DWORD)
DENSELWM(SigInstHandleMap, DWORDLONG)
#undef LWM
#undef DENSELWM
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// lwmlist.h - List of all LightWeightMap in MethodContext.
// To use, #define LWM(map, key, value) to something.
// If you need to distinguish DenseLightWeightMap, #define DENSELWM(map, value) as well.
//----------------------------------------------------------
#ifndef LWM
#error Define LWM before including this file.
#endif
// If the key is needed, then DENSELWM must be defined.
#ifndef DENSELWM
#define DENSELWM(map, value) LWM(map, this_is_an_error, value)
#endif
LWM(AllocPgoInstrumentationBySchema, DWORDLONG, Agnostic_AllocPgoInstrumentationBySchema)
LWM(GetPgoInstrumentationResults, DWORDLONG, Agnostic_GetPgoInstrumentationResults)
LWM(AppendClassName, Agnostic_AppendClassName, DWORD)
LWM(AreTypesEquivalent, DLDL, DWORD)
LWM(AsCorInfoType, DWORDLONG, DWORD)
LWM(CanAccessClass, Agnostic_CanAccessClassIn, Agnostic_CanAccessClassOut)
LWM(CanAccessFamily, DLDL, DWORD)
LWM(CanCast, DLDL, DWORD)
LWM(CanGetCookieForPInvokeCalliSig, CanGetCookieForPInvokeCalliSigValue, DWORD)
LWM(CanGetVarArgsHandle, CanGetVarArgsHandleValue, DWORD)
LWM(CanInline, DLDL, Agnostic_CanInline)
LWM(CanInlineTypeCheck, DLD, DWORD)
LWM(CanTailCall, Agnostic_CanTailCall, DWORD)
LWM(CheckMethodModifier, Agnostic_CheckMethodModifier, DWORD)
LWM(CompareTypesForCast, DLDL, DWORD)
LWM(CompareTypesForEquality, DLDL, DWORD)
LWM(CompileMethod, DWORD, Agnostic_CompileMethod)
LWM(ConstructStringLiteral, DLD, DLD)
LWM(ConvertPInvokeCalliToCall, DLD, DWORDLONG)
LWM(EmbedClassHandle, DWORDLONG, DLDL)
LWM(EmbedFieldHandle, DWORDLONG, DLDL)
LWM(EmbedGenericHandle, Agnostic_EmbedGenericHandle, Agnostic_CORINFO_GENERICHANDLE_RESULT)
LWM(EmbedMethodHandle, DWORDLONG, DLDL)
LWM(EmbedModuleHandle, DWORDLONG, DLDL)
DENSELWM(EmptyStringLiteral, DLD)
DENSELWM(ErrorList, DWORD)
LWM(FilterException, DWORD, DWORD)
LWM(FindCallSiteSig, Agnostic_FindCallSiteSig, Agnostic_CORINFO_SIG_INFO)
LWM(FindNameOfToken, DLD, DLD)
LWM(FindSig, Agnostic_FindSig, Agnostic_CORINFO_SIG_INFO)
LWM(GetAddressOfPInvokeTarget, DWORDLONG, DLD)
LWM(GetAddrOfCaptureThreadGlobal, DWORD, DLDL)
LWM(GetArgClass, Agnostic_GetArgClass_Key, Agnostic_GetArgClass_Value)
LWM(GetArgNext, DWORDLONG, DWORDLONG)
LWM(GetArgType, Agnostic_GetArgType_Key, Agnostic_GetArgType_Value)
LWM(GetArrayInitializationData, DLD, DWORDLONG)
LWM(GetArrayRank, DWORDLONG, DWORD)
LWM(GetArrayIntrinsicID, DWORDLONG, DWORD)
LWM(GetBoundaries, DWORDLONG, Agnostic_GetBoundaries)
LWM(GetBoxHelper, DWORDLONG, DWORD)
LWM(GetBuiltinClass, DWORD, DWORDLONG)
LWM(GetCallInfo, Agnostic_GetCallInfo, Agnostic_CORINFO_CALL_INFO)
LWM(GetCastingHelper, Agnostic_GetCastingHelper, DWORD)
LWM(GetChildType, DWORDLONG, DLD)
LWM(GetClassAlignmentRequirement, DLD, DWORD)
LWM(GetClassAttribs, DWORDLONG, DWORD)
LWM(GetClassDomainID, DWORDLONG, DLD)
LWM(GetClassGClayout, DWORDLONG, Agnostic_GetClassGClayout)
LWM(GetClassModuleIdForStatics, DWORDLONG, Agnostic_GetClassModuleIdForStatics)
LWM(GetClassName, DWORDLONG, DWORD)
LWM(GetClassNameFromMetadata, DLD, DD)
LWM(GetTypeInstantiationArgument, DWORDLONG, DWORDLONG)
LWM(GetClassNumInstanceFields, DWORDLONG, DWORD)
LWM(GetClassSize, DWORDLONG, DWORD)
LWM(GetHeapClassSize, DWORDLONG, DWORD)
LWM(CanAllocateOnStack, DWORDLONG, DWORD)
LWM(GetCookieForPInvokeCalliSig, GetCookieForPInvokeCalliSigValue, DLDL)
LWM(GetDefaultComparerClass, DWORDLONG, DWORDLONG)
LWM(GetDefaultEqualityComparerClass, DWORDLONG, DWORDLONG)
LWM(GetDelegateCtor, Agnostic_GetDelegateCtorIn, Agnostic_GetDelegateCtorOut)
LWM(GetEEInfo, DWORD, Agnostic_CORINFO_EE_INFO)
LWM(GetEHinfo, DLD, Agnostic_CORINFO_EH_CLAUSE)
LWM(GetFieldAddress, DWORDLONG, Agnostic_GetFieldAddress)
LWM(GetStaticFieldCurrentClass, DWORDLONG, Agnostic_GetStaticFieldCurrentClass)
LWM(GetFieldClass, DWORDLONG, DWORDLONG)
LWM(GetFieldInClass, DLD, DWORDLONG)
LWM(GetFieldInfo, Agnostic_GetFieldInfo, Agnostic_CORINFO_FIELD_INFO)
LWM(GetFieldName, DWORDLONG, DD)
LWM(GetFieldOffset, DWORDLONG, DWORD)
LWM(GetFieldThreadLocalStoreID, DWORDLONG, DLD)
LWM(GetFieldType, DLDL, DLD)
LWM(GetFunctionEntryPoint, DLD, DLD)
LWM(GetFunctionFixedEntryPoint, DWORDLONG, Agnostic_CORINFO_CONST_LOOKUP)
LWM(GetGSCookie, DWORD, DLDL)
LWM(GetHelperFtn, DWORD, DLDL)
LWM(GetHelperName, DWORD, DWORD)
LWM(GetHFAType, DWORDLONG, DWORD)
LWM(GetInlinedCallFrameVptr, DWORD, DLDL)
LWM(GetIntConfigValue, Agnostic_ConfigIntInfo, DWORD)
LWM(GetJitFlags, DWORD, DD)
LWM(GetJitTimeLogFilename, DWORD, DWORD)
LWM(GetJustMyCodeHandle, DWORDLONG, DLDL)
LWM(GetLazyStringLiteralHelper, DWORDLONG, DWORD)
LWM(GetLocationOfThisType, DWORDLONG, Agnostic_CORINFO_LOOKUP_KIND)
LWM(IsIntrinsic, DWORDLONG, DWORD)
LWM(GetMethodAttribs, DWORDLONG, DWORD)
LWM(GetClassModule, DWORDLONG, DWORDLONG)
LWM(GetModuleAssembly, DWORDLONG, DWORDLONG)
LWM(GetAssemblyName, DWORDLONG, DWORD)
LWM(GetMethodClass, DWORDLONG, DWORDLONG)
LWM(GetMethodModule, DWORDLONG, DWORDLONG)
LWM(GetMethodDefFromMethod, DWORDLONG, DWORD)
LWM(GetMethodHash, DWORDLONG, DWORD)
LWM(GetMethodInfo, DWORDLONG, Agnostic_GetMethodInfo)
LWM(GetMethodName, DLD, DD)
LWM(GetMethodNameFromMetadata, Agnostic_CORINFO_METHODNAME_TOKENin, Agnostic_CORINFO_METHODNAME_TOKENout)
LWM(GetMethodSig, DLDL, Agnostic_CORINFO_SIG_INFO)
LWM(GetMethodSync, DWORDLONG, DLDL)
LWM(GetMethodVTableOffset, DWORDLONG, DDD)
LWM(GetNewArrHelper, DWORDLONG, DWORD)
LWM(GetNewHelper, Agnostic_GetNewHelper, DD)
LWM(GetOSRInfo, DWORD, Agnostic_GetOSRInfo)
LWM(GetParentType, DWORDLONG, DWORDLONG)
LWM(GetProfilingHandle, DWORD, Agnostic_GetProfilingHandle)
LWM(GetReadyToRunHelper, GetReadyToRunHelper_TOKENin, GetReadyToRunHelper_TOKENout)
LWM(GetReadyToRunDelegateCtorHelper, GetReadyToRunDelegateCtorHelper_TOKENIn, Agnostic_CORINFO_LOOKUP)
LWM(GetRelocTypeHint, DWORDLONG, DWORD)
LWM(GetExpectedTargetArchitecture, DWORD, DWORD)
LWM(GetSharedCCtorHelper, DWORDLONG, DWORD)
LWM(GetStringConfigValue, DWORD, DWORD)
LWM(GetSystemVAmd64PassStructInRegisterDescriptor, DWORDLONG, Agnostic_GetSystemVAmd64PassStructInRegisterDescriptor)
LWM(GetLoongArch64PassStructInRegisterFlags, DWORDLONG, DWORD)
LWM(GetTailCallHelpers, Agnostic_GetTailCallHelpers, Agnostic_CORINFO_TAILCALL_HELPERS)
LWM(UpdateEntryPointForTailCall, Agnostic_CORINFO_CONST_LOOKUP, Agnostic_CORINFO_CONST_LOOKUP)
LWM(GetThreadTLSIndex, DWORD, DLD)
LWM(GetTokenTypeAsHandle, GetTokenTypeAsHandleValue, DWORDLONG)
LWM(GetTypeForBox, DWORDLONG, DWORDLONG)
LWM(GetTypeForPrimitiveValueClass, DWORDLONG, DWORD)
LWM(GetTypeForPrimitiveNumericClass, DWORDLONG, DWORD)
LWM(GetUnboxedEntry, DWORDLONG, DLD);
LWM(GetUnBoxHelper, DWORDLONG, DWORD)
LWM(GetVarArgsHandle, GetVarArgsHandleValue, DLDL)
LWM(GetVars, DWORDLONG, Agnostic_GetVars)
LWM(InitClass, Agnostic_InitClass, DWORD)
LWM(IsCompatibleDelegate, Agnostic_IsCompatibleDelegate, DD)
LWM(IsDelegateCreationAllowed, DLDL, DWORD)
LWM(IsFieldStatic, DWORDLONG, DWORD)
LWM(IsIntrinsicType, DWORDLONG, DWORD)
LWM(IsSDArray, DWORDLONG, DWORD)
LWM(IsValidStringRef, DLD, DWORD)
LWM(GetStringLiteral, DLD, DD)
LWM(IsValidToken, DLD, DWORD)
LWM(IsValueClass, DWORDLONG, DWORD)
LWM(MergeClasses, DLDL, DWORDLONG)
LWM(IsMoreSpecificType, DLDL, DWORD)
LWM(PInvokeMarshalingRequired, MethodOrSigInfoValue, DWORD)
LWM(ResolveToken, Agnostic_CORINFO_RESOLVED_TOKENin, ResolveTokenValue)
LWM(ResolveVirtualMethod, Agnostic_ResolveVirtualMethodKey, Agnostic_ResolveVirtualMethodResult)
LWM(TryResolveToken, Agnostic_CORINFO_RESOLVED_TOKENin, TryResolveTokenValue)
LWM(SatisfiesClassConstraints, DWORDLONG, DWORD)
LWM(SatisfiesMethodConstraints, DLDL, DWORD)
LWM(GetUnmanagedCallConv, MethodOrSigInfoValue, DD)
LWM(DoesFieldBelongToClass, DLDL, DWORD)
DENSELWM(SigInstHandleMap, DWORDLONG)
#undef LWM
#undef DENSELWM
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/stubhelpers.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: stubhelpers.h
//
#ifndef __STUBHELPERS_h__
#define __STUBHELPERS_h__
#include "fcall.h"
#include "clrvarargs.h"
// Maximum number of deferred byref validation entries - we will trigger gen0 GC if we reach this number.
#define BYREF_VALIDATION_LIST_MAX_SIZE (512 * 1024)
class StubHelpers
{
#ifdef VERIFY_HEAP
struct ByrefValidationEntry
{
void *pByref; // pointer to GC heap
MethodDesc *pMD; // interop MD this byref was passed to
};
static CQuickArray<ByrefValidationEntry> s_ByrefValidationEntries;
static SIZE_T s_ByrefValidationIndex;
static CrstStatic s_ByrefValidationLock;
static void ValidateObjectInternal(Object *pObjUNSAFE, BOOL fValidateNextObj);
static MethodDesc *ResolveInteropMethod(Object *pThisUNSAFE, MethodDesc *pMD);
static void FormatValidationMessage(MethodDesc *pMD, SString &ssErrorString);
public:
static void Init();
static void ProcessByrefValidationList();
#else // VERIFY_HEAP
public:
static void Init() { LIMITED_METHOD_CONTRACT; }
#endif // VERIFY_HEAP
//-------------------------------------------------------
// PInvoke stub helpers
//-------------------------------------------------------
#ifdef FEATURE_COMINTEROP
static FCDECL4(IUnknown*, GetCOMIPFromRCW, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget, CLR_BOOL* pfNeedsRelease);
static FCDECL2(void, ObjectMarshaler__ConvertToNative, Object* pSrcUNSAFE, VARIANT* pDest);
static FCDECL1(Object*, ObjectMarshaler__ConvertToManaged, VARIANT* pSrc);
static FCDECL1(void, ObjectMarshaler__ClearNative, VARIANT* pSrc);
static FCDECL4(IUnknown*, InterfaceMarshaler__ConvertToNative, Object* pObjUNSAFE, MethodTable* pItfMT, MethodTable* pClsMT, DWORD dwFlags);
static FCDECL4(Object*, InterfaceMarshaler__ConvertToManaged, IUnknown **ppUnk, MethodTable *pItfMT, MethodTable *pClsMT, DWORD dwFlags);
static FCDECL1(Object *, InterfaceMarshaler__ConvertToManagedWithoutUnboxing, IUnknown *pNative);
#endif // FEATURE_COMINTEROP
static FCDECL0(void, SetLastError );
static FCDECL0(void, ClearLastError );
static FCDECL1(void*, GetNDirectTarget, NDirectMethodDesc* pNMD);
static FCDECL1(void*, GetDelegateTarget, DelegateObject *pThisUNSAFE);
static FCDECL2(void, ThrowInteropParamException, UINT resID, UINT paramIdx);
static FCDECL1(Object*, GetHRExceptionObject, HRESULT hr);
#ifdef FEATURE_COMINTEROP
static FCDECL3(Object*, GetCOMHRExceptionObject, HRESULT hr, MethodDesc *pMD, Object *unsafe_pThis);
#endif // FEATURE_COMINTEROP
static FCDECL3(void*, CreateCustomMarshalerHelper, MethodDesc* pMD, mdToken paramToken, TypeHandle hndManagedType);
static FCDECL3(void, FmtClassUpdateNativeInternal, Object* pObjUNSAFE, BYTE* pbNative, OBJECTREF *ppCleanupWorkListOnStack);
static FCDECL2(void, FmtClassUpdateCLRInternal, Object* pObjUNSAFE, BYTE* pbNative);
static FCDECL2(void, LayoutDestroyNativeInternal, Object* pObjUNSAFE, BYTE* pbNative);
static FCDECL1(Object*, AllocateInternal, EnregisteredTypeHandle typeHnd);
static FCDECL3(void, MarshalToUnmanagedVaListInternal, va_list va, DWORD cbVaListSize, const VARARGS* pArgIterator);
static FCDECL2(void, MarshalToManagedVaListInternal, va_list va, VARARGS* pArgIterator);
static FCDECL0(void*, GetStubContext);
static FCDECL2(void, LogPinnedArgument, MethodDesc *localDesc, Object *nativeArg);
static FCDECL1(DWORD, CalcVaListSize, VARARGS *varargs);
static FCDECL3(void, ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD, Object *pThisUNSAFE);
static FCDECL3(void, ValidateByref, void *pByref, MethodDesc *pMD, Object *pThisUNSAFE);
#ifdef PROFILING_SUPPORTED
//-------------------------------------------------------
// Profiler helper
//-------------------------------------------------------
static FCDECL3(SIZE_T, ProfilerBeginTransitionCallback, SIZE_T pSecretParam, Thread* pThread, Object* unsafe_pThis);
static FCDECL2(void, ProfilerEndTransitionCallback, MethodDesc* pRealMD, Thread* pThread);
#endif
#ifdef FEATURE_ARRAYSTUB_AS_IL
static FCDECL2(void, ArrayTypeCheck, Object*, PtrArray*);
#endif
#ifdef FEATURE_MULTICASTSTUB_AS_IL
static FCDECL2(void, MulticastDebuggerTraceHelper, Object*, INT32);
#endif
static FCDECL0(void*, NextCallReturnAddress);
};
#ifdef FEATURE_COMINTEROP
extern "C" void QCALLTYPE InterfaceMarshaler__ClearNative(IUnknown * pUnk);
#endif
#endif // __STUBHELPERS_h__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: stubhelpers.h
//
#ifndef __STUBHELPERS_h__
#define __STUBHELPERS_h__
#include "fcall.h"
#include "clrvarargs.h"
// Maximum number of deferred byref validation entries - we will trigger gen0 GC if we reach this number.
#define BYREF_VALIDATION_LIST_MAX_SIZE (512 * 1024)
class StubHelpers
{
#ifdef VERIFY_HEAP
struct ByrefValidationEntry
{
void *pByref; // pointer to GC heap
MethodDesc *pMD; // interop MD this byref was passed to
};
static CQuickArray<ByrefValidationEntry> s_ByrefValidationEntries;
static SIZE_T s_ByrefValidationIndex;
static CrstStatic s_ByrefValidationLock;
static void ValidateObjectInternal(Object *pObjUNSAFE, BOOL fValidateNextObj);
static MethodDesc *ResolveInteropMethod(Object *pThisUNSAFE, MethodDesc *pMD);
static void FormatValidationMessage(MethodDesc *pMD, SString &ssErrorString);
public:
static void Init();
static void ProcessByrefValidationList();
#else // VERIFY_HEAP
public:
static void Init() { LIMITED_METHOD_CONTRACT; }
#endif // VERIFY_HEAP
//-------------------------------------------------------
// PInvoke stub helpers
//-------------------------------------------------------
#ifdef FEATURE_COMINTEROP
static FCDECL4(IUnknown*, GetCOMIPFromRCW, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget, CLR_BOOL* pfNeedsRelease);
static FCDECL2(void, ObjectMarshaler__ConvertToNative, Object* pSrcUNSAFE, VARIANT* pDest);
static FCDECL1(Object*, ObjectMarshaler__ConvertToManaged, VARIANT* pSrc);
static FCDECL1(void, ObjectMarshaler__ClearNative, VARIANT* pSrc);
static FCDECL4(IUnknown*, InterfaceMarshaler__ConvertToNative, Object* pObjUNSAFE, MethodTable* pItfMT, MethodTable* pClsMT, DWORD dwFlags);
static FCDECL4(Object*, InterfaceMarshaler__ConvertToManaged, IUnknown **ppUnk, MethodTable *pItfMT, MethodTable *pClsMT, DWORD dwFlags);
static FCDECL1(Object *, InterfaceMarshaler__ConvertToManagedWithoutUnboxing, IUnknown *pNative);
#endif // FEATURE_COMINTEROP
static FCDECL0(void, SetLastError );
static FCDECL0(void, ClearLastError );
static FCDECL1(void*, GetNDirectTarget, NDirectMethodDesc* pNMD);
static FCDECL1(void*, GetDelegateTarget, DelegateObject *pThisUNSAFE);
static FCDECL2(void, ThrowInteropParamException, UINT resID, UINT paramIdx);
static FCDECL1(Object*, GetHRExceptionObject, HRESULT hr);
#ifdef FEATURE_COMINTEROP
static FCDECL3(Object*, GetCOMHRExceptionObject, HRESULT hr, MethodDesc *pMD, Object *unsafe_pThis);
#endif // FEATURE_COMINTEROP
static FCDECL3(void*, CreateCustomMarshalerHelper, MethodDesc* pMD, mdToken paramToken, TypeHandle hndManagedType);
static FCDECL3(void, FmtClassUpdateNativeInternal, Object* pObjUNSAFE, BYTE* pbNative, OBJECTREF *ppCleanupWorkListOnStack);
static FCDECL2(void, FmtClassUpdateCLRInternal, Object* pObjUNSAFE, BYTE* pbNative);
static FCDECL2(void, LayoutDestroyNativeInternal, Object* pObjUNSAFE, BYTE* pbNative);
static FCDECL1(Object*, AllocateInternal, EnregisteredTypeHandle typeHnd);
static FCDECL3(void, MarshalToUnmanagedVaListInternal, va_list va, DWORD cbVaListSize, const VARARGS* pArgIterator);
static FCDECL2(void, MarshalToManagedVaListInternal, va_list va, VARARGS* pArgIterator);
static FCDECL0(void*, GetStubContext);
static FCDECL2(void, LogPinnedArgument, MethodDesc *localDesc, Object *nativeArg);
static FCDECL1(DWORD, CalcVaListSize, VARARGS *varargs);
static FCDECL3(void, ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD, Object *pThisUNSAFE);
static FCDECL3(void, ValidateByref, void *pByref, MethodDesc *pMD, Object *pThisUNSAFE);
#ifdef PROFILING_SUPPORTED
//-------------------------------------------------------
// Profiler helper
//-------------------------------------------------------
static FCDECL3(SIZE_T, ProfilerBeginTransitionCallback, SIZE_T pSecretParam, Thread* pThread, Object* unsafe_pThis);
static FCDECL2(void, ProfilerEndTransitionCallback, MethodDesc* pRealMD, Thread* pThread);
#endif
#ifdef FEATURE_ARRAYSTUB_AS_IL
static FCDECL2(void, ArrayTypeCheck, Object*, PtrArray*);
#endif
#ifdef FEATURE_MULTICASTSTUB_AS_IL
static FCDECL2(void, MulticastDebuggerTraceHelper, Object*, INT32);
#endif
static FCDECL0(void*, NextCallReturnAddress);
};
#ifdef FEATURE_COMINTEROP
extern "C" void QCALLTYPE InterfaceMarshaler__ClearNative(IUnknown * pUnk);
#endif
#endif // __STUBHELPERS_h__
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/hwintrinsic.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "jitpch.h"
#include "hwintrinsic.h"
#ifdef FEATURE_HW_INTRINSICS
static const HWIntrinsicInfo hwIntrinsicInfoArray[] = {
// clang-format off
#if defined(TARGET_XARCH)
#define HARDWARE_INTRINSIC(isa, name, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \
{NI_##isa##_##name, #name, InstructionSet_##isa, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast<HWIntrinsicFlag>(flag)},
#include "hwintrinsiclistxarch.h"
#elif defined (TARGET_ARM64)
#define HARDWARE_INTRINSIC(isa, name, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \
{NI_##isa##_##name, #name, InstructionSet_##isa, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast<HWIntrinsicFlag>(flag)},
#include "hwintrinsiclistarm64.h"
#else
#error Unsupported platform
#endif
// clang-format on
};
//------------------------------------------------------------------------
// lookup: Gets the HWIntrinsicInfo associated with a given NamedIntrinsic
//
// Arguments:
// id -- The NamedIntrinsic associated with the HWIntrinsic to lookup
//
// Return Value:
// The HWIntrinsicInfo associated with id
const HWIntrinsicInfo& HWIntrinsicInfo::lookup(NamedIntrinsic id)
{
assert(id != NI_Illegal);
assert(id > NI_HW_INTRINSIC_START);
assert(id < NI_HW_INTRINSIC_END);
return hwIntrinsicInfoArray[id - NI_HW_INTRINSIC_START - 1];
}
//------------------------------------------------------------------------
// getBaseJitTypeFromArgIfNeeded: Get simdBaseJitType of intrinsic from 1st or 2nd argument depending on the flag
//
// Arguments:
// intrinsic -- id of the intrinsic function.
// clsHnd -- class handle containing the intrinsic function.
// method -- method handle of the intrinsic function.
// sig -- signature of the intrinsic call.
// simdBaseJitType -- Predetermined simdBaseJitType, could be CORINFO_TYPE_UNDEF
//
// Return Value:
// The basetype of intrinsic of it can be fetched from 1st or 2nd argument, else return baseType unmodified.
//
CorInfoType Compiler::getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType)
{
if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic) || HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic))
{
CORINFO_ARG_LIST_HANDLE arg = sig->args;
if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic))
{
arg = info.compCompHnd->getArgNext(arg);
}
CORINFO_CLASS_HANDLE argClass = info.compCompHnd->getArgClass(sig, arg);
simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass);
if (simdBaseJitType == CORINFO_TYPE_UNDEF) // the argument is not a vector
{
CORINFO_CLASS_HANDLE tmpClass;
simdBaseJitType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass));
if (simdBaseJitType == CORINFO_TYPE_PTR)
{
simdBaseJitType = info.compCompHnd->getChildType(argClass, &tmpClass);
}
}
assert(simdBaseJitType != CORINFO_TYPE_UNDEF);
}
return simdBaseJitType;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType)
{
if (m_simdHandleCache == nullptr)
{
return NO_CLASS_HANDLE;
}
if (simdType == TYP_SIMD16)
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector128FloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector128DoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector128IntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector128UShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector128UByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector128ShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector128ByteHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector128LongHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector128UIntHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector128ULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->Vector128NIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->Vector128NUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
}
#ifdef TARGET_XARCH
else if (simdType == TYP_SIMD32)
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector256FloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector256DoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector256IntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector256UShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector256UByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector256ShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector256ByteHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector256LongHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector256UIntHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector256ULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->Vector256NIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->Vector256NUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
}
#endif // TARGET_XARCH
#ifdef TARGET_ARM64
else if (simdType == TYP_SIMD8)
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector64FloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector64DoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector64IntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector64UShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector64UByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector64ShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector64ByteHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector64UIntHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector64LongHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector64ULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->Vector64NIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->Vector64NUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
}
#endif // TARGET_ARM64
return NO_CLASS_HANDLE;
}
//------------------------------------------------------------------------
// vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID):
//
// Arguments:
// hwIntrinsicID -- The id for the HW intrinsic
//
// Return Value:
// Returns true if this intrinsic requires value numbering to add an
// extra SimdType argument that encodes the resulting type.
// If we don't do this overloaded versions can return the same VN
// leading to incorrect CSE subsitutions.
//
/* static */ bool Compiler::vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID)
{
int numArgs = HWIntrinsicInfo::lookupNumArgs(hwIntrinsicID);
// HW Intrinsic's with -1 for numArgs have a varying number of args, so we currently
// give themm a unique value number them, and don't add an extra argument.
//
if (numArgs == -1)
{
return false;
}
// We iterate over all of the different baseType's for this intrinsic in the HWIntrinsicInfo table
// We set diffInsCount to the number of instructions that can execute differently.
//
unsigned diffInsCount = 0;
#ifdef TARGET_XARCH
instruction lastIns = INS_invalid;
#endif
for (var_types baseType = TYP_BYTE; (baseType <= TYP_DOUBLE); baseType = (var_types)(baseType + 1))
{
instruction curIns = HWIntrinsicInfo::lookupIns(hwIntrinsicID, baseType);
if (curIns != INS_invalid)
{
#ifdef TARGET_XARCH
if (curIns != lastIns)
{
diffInsCount++;
// remember the last valid instruction that we saw
lastIns = curIns;
}
#elif defined(TARGET_ARM64)
// On ARM64 we use the same instruction and specify an insOpt arrangement
// so we always consider the instruction operation to be different
//
diffInsCount++;
#endif // TARGET
if (diffInsCount >= 2)
{
// We can early exit the loop now
break;
}
}
}
// If we see two (or more) different instructions we need the extra VNF_SimdType arg
return (diffInsCount >= 2);
}
//------------------------------------------------------------------------
// lookupId: Gets the NamedIntrinsic for a given method name and InstructionSet
//
// Arguments:
// comp -- The compiler
// sig -- The signature of the intrinsic
// className -- The name of the class associated with the HWIntrinsic to lookup
// methodName -- The name of the method associated with the HWIntrinsic to lookup
// enclosingClassName -- The name of the enclosing class of X64 classes
//
// Return Value:
// The NamedIntrinsic associated with methodName and isa
NamedIntrinsic HWIntrinsicInfo::lookupId(Compiler* comp,
CORINFO_SIG_INFO* sig,
const char* className,
const char* methodName,
const char* enclosingClassName)
{
// TODO-Throughput: replace sequential search by binary search
CORINFO_InstructionSet isa = lookupIsa(className, enclosingClassName);
if (isa == InstructionSet_ILLEGAL)
{
return NI_Illegal;
}
bool isIsaSupported = comp->compSupportsHWIntrinsic(isa);
bool isHardwareAcceleratedProp = (strcmp(methodName, "get_IsHardwareAccelerated") == 0);
#ifdef TARGET_XARCH
if (isHardwareAcceleratedProp)
{
// Special case: Some of Vector128/256 APIs are hardware accelerated with Sse1 and Avx1,
// but we want IsHardwareAccelerated to return true only when all of them are (there are
// still can be cases where e.g. Sse41 might give an additional boost for Vector128, but it's
// not important enough to bump the minimal Sse version here)
if (strcmp(className, "Vector128") == 0)
{
isa = InstructionSet_SSE2;
}
else if (strcmp(className, "Vector256") == 0)
{
isa = InstructionSet_AVX2;
}
}
#endif
if ((strcmp(methodName, "get_IsSupported") == 0) || isHardwareAcceleratedProp)
{
return isIsaSupported ? (comp->compExactlyDependsOn(isa) ? NI_IsSupported_True : NI_IsSupported_Dynamic)
: NI_IsSupported_False;
}
else if (!isIsaSupported)
{
return NI_Throw_PlatformNotSupportedException;
}
for (int i = 0; i < (NI_HW_INTRINSIC_END - NI_HW_INTRINSIC_START - 1); i++)
{
const HWIntrinsicInfo& intrinsicInfo = hwIntrinsicInfoArray[i];
if (isa != hwIntrinsicInfoArray[i].isa)
{
continue;
}
int numArgs = static_cast<unsigned>(intrinsicInfo.numArgs);
if ((numArgs != -1) && (sig->numArgs != static_cast<unsigned>(intrinsicInfo.numArgs)))
{
continue;
}
if (strcmp(methodName, intrinsicInfo.name) == 0)
{
return intrinsicInfo.id;
}
}
// There are several helper intrinsics that are implemented in managed code
// Those intrinsics will hit this code path and need to return NI_Illegal
return NI_Illegal;
}
//------------------------------------------------------------------------
// lookupSimdSize: Gets the SimdSize for a given HWIntrinsic and signature
//
// Arguments:
// id -- The ID associated with the HWIntrinsic to lookup
// sig -- The signature of the HWIntrinsic to lookup
//
// Return Value:
// The SIMD size for the HWIntrinsic associated with id and sig
//
// Remarks:
// This function is only used by the importer. After importation, we can
// get the SIMD size from the GenTreeHWIntrinsic node.
unsigned HWIntrinsicInfo::lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORINFO_SIG_INFO* sig)
{
unsigned simdSize = 0;
if (tryLookupSimdSize(id, &simdSize))
{
return simdSize;
}
CORINFO_CLASS_HANDLE typeHnd = nullptr;
if (HWIntrinsicInfo::BaseTypeFromFirstArg(id))
{
typeHnd = comp->info.compCompHnd->getArgClass(sig, sig->args);
}
else if (HWIntrinsicInfo::BaseTypeFromSecondArg(id))
{
CORINFO_ARG_LIST_HANDLE secondArg = comp->info.compCompHnd->getArgNext(sig->args);
typeHnd = comp->info.compCompHnd->getArgClass(sig, secondArg);
}
else
{
assert(JITtype2varType(sig->retType) == TYP_STRUCT);
typeHnd = sig->retTypeSigClass;
}
CorInfoType simdBaseJitType = comp->getBaseJitTypeAndSizeOfSIMDType(typeHnd, &simdSize);
assert((simdSize > 0) && (simdBaseJitType != CORINFO_TYPE_UNDEF));
return simdSize;
}
//------------------------------------------------------------------------
// isImmOp: Checks whether the HWIntrinsic node has an imm operand
//
// Arguments:
// id -- The NamedIntrinsic associated with the HWIntrinsic to lookup
// op -- The operand to check
//
// Return Value:
// true if the node has an imm operand; otherwise, false
bool HWIntrinsicInfo::isImmOp(NamedIntrinsic id, const GenTree* op)
{
#ifdef TARGET_XARCH
if (HWIntrinsicInfo::lookupCategory(id) != HW_Category_IMM)
{
return false;
}
if (!HWIntrinsicInfo::MaybeImm(id))
{
return true;
}
#elif defined(TARGET_ARM64)
if (!HWIntrinsicInfo::HasImmediateOperand(id))
{
return false;
}
#else
#error Unsupported platform
#endif
if (genActualType(op->TypeGet()) != TYP_INT)
{
return false;
}
return true;
}
//------------------------------------------------------------------------
// getArgForHWIntrinsic: pop an argument from the stack and validate its type
//
// Arguments:
// argType -- the required type of argument
// argClass -- the class handle of argType
// expectAddr -- if true indicates we are expecting type stack entry to be a TYP_BYREF.
// newobjThis -- For CEE_NEWOBJ, this is the temp grabbed for the allocated uninitalized object.
//
// Return Value:
// the validated argument
//
GenTree* Compiler::getArgForHWIntrinsic(var_types argType,
CORINFO_CLASS_HANDLE argClass,
bool expectAddr,
GenTree* newobjThis)
{
GenTree* arg = nullptr;
if (varTypeIsStruct(argType))
{
if (!varTypeIsSIMD(argType))
{
unsigned int argSizeBytes;
(void)getBaseJitTypeAndSizeOfSIMDType(argClass, &argSizeBytes);
argType = getSIMDTypeForSize(argSizeBytes);
}
assert(varTypeIsSIMD(argType));
if (newobjThis == nullptr)
{
arg = impSIMDPopStack(argType, expectAddr);
assert(varTypeIsSIMD(arg->TypeGet()));
}
else
{
assert((newobjThis->gtOper == GT_ADDR) && (newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR));
arg = newobjThis;
// push newobj result on type stack
unsigned tmp = arg->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum();
impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(argClass).NormaliseForStack());
}
}
else
{
assert(varTypeIsArithmetic(argType));
arg = impPopStack().val;
assert(varTypeIsArithmetic(arg->TypeGet()));
assert(genActualType(arg->gtType) == genActualType(argType));
}
return arg;
}
//------------------------------------------------------------------------
// addRangeCheckIfNeeded: add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic
//
// Arguments:
// intrinsic -- intrinsic ID
// immOp -- the immediate operand of the intrinsic
// mustExpand -- true if the compiler is compiling the fallback(GT_CALL) of this intrinsics
// immLowerBound -- lower incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic)
// immUpperBound -- upper incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic)
//
// Return Value:
// add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic, which would throw ArgumentOutOfRangeException
// when the imm-argument is not in the valid range
//
GenTree* Compiler::addRangeCheckIfNeeded(
NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound)
{
assert(immOp != nullptr);
// Full-range imm-intrinsics do not need the range-check
// because the imm-parameter of the intrinsic method is a byte.
// AVX2 Gather intrinsics no not need the range-check
// because their imm-parameter have discrete valid values that are handle by managed code
if (mustExpand && HWIntrinsicInfo::isImmOp(intrinsic, immOp)
#ifdef TARGET_XARCH
&& !HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic) && !HWIntrinsicInfo::HasFullRangeImm(intrinsic)
#endif
)
{
assert(!immOp->IsCnsIntOrI());
assert(varTypeIsUnsigned(immOp));
return addRangeCheckForHWIntrinsic(immOp, immLowerBound, immUpperBound);
}
else
{
return immOp;
}
}
//------------------------------------------------------------------------
// addRangeCheckForHWIntrinsic: add a GT_BOUNDS_CHECK node for an intrinsic
//
// Arguments:
// immOp -- the immediate operand of the intrinsic
// immLowerBound -- lower incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic)
// immUpperBound -- upper incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic)
//
// Return Value:
// add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic, which would throw ArgumentOutOfRangeException
// when the imm-argument is not in the valid range
//
GenTree* Compiler::addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound)
{
// Bounds check for value of an immediate operand
// (immLowerBound <= immOp) && (immOp <= immUpperBound)
//
// implemented as a single comparison in the form of
//
// if ((immOp - immLowerBound) >= (immUpperBound - immLowerBound + 1))
// {
// throw new ArgumentOutOfRangeException();
// }
//
// The value of (immUpperBound - immLowerBound + 1) is denoted as adjustedUpperBound.
const ssize_t adjustedUpperBound = (ssize_t)immUpperBound - immLowerBound + 1;
GenTree* adjustedUpperBoundNode = gtNewIconNode(adjustedUpperBound, TYP_INT);
GenTree* immOpDup = nullptr;
immOp = impCloneExpr(immOp, &immOpDup, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone an immediate operand for immediate value bounds check"));
if (immLowerBound != 0)
{
immOpDup = gtNewOperNode(GT_SUB, TYP_INT, immOpDup, gtNewIconNode(immLowerBound, TYP_INT));
}
GenTreeBoundsChk* hwIntrinsicChk =
new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(immOpDup, adjustedUpperBoundNode, SCK_ARG_RNG_EXCPN);
return gtNewOperNode(GT_COMMA, immOp->TypeGet(), hwIntrinsicChk, immOp);
}
//------------------------------------------------------------------------
// compSupportsHWIntrinsic: check whether a given instruction is enabled via configuration
//
// Arguments:
// isa - Instruction set
//
// Return Value:
// true iff the given instruction set is enabled via configuration (environment variables, etc.).
bool Compiler::compSupportsHWIntrinsic(CORINFO_InstructionSet isa)
{
return compHWIntrinsicDependsOn(isa) && (featureSIMD || HWIntrinsicInfo::isScalarIsa(isa)) &&
(
#ifdef DEBUG
JitConfig.EnableIncompleteISAClass() ||
#endif
HWIntrinsicInfo::isFullyImplementedIsa(isa));
}
//------------------------------------------------------------------------
// impIsTableDrivenHWIntrinsic:
//
// Arguments:
// intrinsicId - HW intrinsic id
// category - category of a HW intrinsic
//
// Return Value:
// returns true if this category can be table-driven in the importer
//
static bool impIsTableDrivenHWIntrinsic(NamedIntrinsic intrinsicId, HWIntrinsicCategory category)
{
return (category != HW_Category_Special) && HWIntrinsicInfo::RequiresCodegen(intrinsicId) &&
!HWIntrinsicInfo::HasSpecialImport(intrinsicId);
}
//------------------------------------------------------------------------
// isSupportedBaseType
//
// Arguments:
// intrinsicId - HW intrinsic id
// baseJitType - Base JIT type of the intrinsic.
//
// Return Value:
// returns true if the baseType is supported for given intrinsic.
//
static bool isSupportedBaseType(NamedIntrinsic intrinsic, CorInfoType baseJitType)
{
if (baseJitType == CORINFO_TYPE_UNDEF)
{
return false;
}
var_types baseType = JitType2PreciseVarType(baseJitType);
// We don't actually check the intrinsic outside of the false case as we expect
// the exposed managed signatures are either generic and support all types
// or they are explicit and support the type indicated.
if (varTypeIsArithmetic(baseType))
{
return true;
}
#ifdef TARGET_XARCH
assert((intrinsic == NI_Vector128_As) || (intrinsic == NI_Vector128_AsByte) ||
(intrinsic == NI_Vector128_AsDouble) || (intrinsic == NI_Vector128_AsInt16) ||
(intrinsic == NI_Vector128_AsInt32) || (intrinsic == NI_Vector128_AsInt64) ||
(intrinsic == NI_Vector128_AsSByte) || (intrinsic == NI_Vector128_AsSingle) ||
(intrinsic == NI_Vector128_AsUInt16) || (intrinsic == NI_Vector128_AsUInt32) ||
(intrinsic == NI_Vector128_AsUInt64) || (intrinsic == NI_Vector128_get_AllBitsSet) ||
(intrinsic == NI_Vector128_get_Count) || (intrinsic == NI_Vector128_get_Zero) ||
(intrinsic == NI_Vector128_GetElement) || (intrinsic == NI_Vector128_WithElement) ||
(intrinsic == NI_Vector128_ToScalar) || (intrinsic == NI_Vector128_ToVector256) ||
(intrinsic == NI_Vector128_ToVector256Unsafe) || (intrinsic == NI_Vector256_As) ||
(intrinsic == NI_Vector256_AsByte) || (intrinsic == NI_Vector256_AsDouble) ||
(intrinsic == NI_Vector256_AsInt16) || (intrinsic == NI_Vector256_AsInt32) ||
(intrinsic == NI_Vector256_AsInt64) || (intrinsic == NI_Vector256_AsSByte) ||
(intrinsic == NI_Vector256_AsSingle) || (intrinsic == NI_Vector256_AsUInt16) ||
(intrinsic == NI_Vector256_AsUInt32) || (intrinsic == NI_Vector256_AsUInt64) ||
(intrinsic == NI_Vector256_get_AllBitsSet) || (intrinsic == NI_Vector256_get_Count) ||
(intrinsic == NI_Vector256_get_Zero) || (intrinsic == NI_Vector256_GetElement) ||
(intrinsic == NI_Vector256_WithElement) || (intrinsic == NI_Vector256_GetLower) ||
(intrinsic == NI_Vector256_ToScalar));
#endif // TARGET_XARCH
#ifdef TARGET_ARM64
assert((intrinsic == NI_Vector64_As) || (intrinsic == NI_Vector64_AsByte) || (intrinsic == NI_Vector64_AsDouble) ||
(intrinsic == NI_Vector64_AsInt16) || (intrinsic == NI_Vector64_AsInt32) ||
(intrinsic == NI_Vector64_AsInt64) || (intrinsic == NI_Vector64_AsSByte) ||
(intrinsic == NI_Vector64_AsSingle) || (intrinsic == NI_Vector64_AsUInt16) ||
(intrinsic == NI_Vector64_AsUInt32) || (intrinsic == NI_Vector64_AsUInt64) ||
(intrinsic == NI_Vector64_get_AllBitsSet) || (intrinsic == NI_Vector64_get_Count) ||
(intrinsic == NI_Vector64_get_Zero) || (intrinsic == NI_Vector64_GetElement) ||
(intrinsic == NI_Vector64_ToScalar) || (intrinsic == NI_Vector64_ToVector128) ||
(intrinsic == NI_Vector64_ToVector128Unsafe) || (intrinsic == NI_Vector64_WithElement) ||
(intrinsic == NI_Vector128_As) || (intrinsic == NI_Vector128_AsByte) ||
(intrinsic == NI_Vector128_AsDouble) || (intrinsic == NI_Vector128_AsInt16) ||
(intrinsic == NI_Vector128_AsInt32) || (intrinsic == NI_Vector128_AsInt64) ||
(intrinsic == NI_Vector128_AsSByte) || (intrinsic == NI_Vector128_AsSingle) ||
(intrinsic == NI_Vector128_AsUInt16) || (intrinsic == NI_Vector128_AsUInt32) ||
(intrinsic == NI_Vector128_AsUInt64) || (intrinsic == NI_Vector128_get_AllBitsSet) ||
(intrinsic == NI_Vector128_get_Count) || (intrinsic == NI_Vector128_get_Zero) ||
(intrinsic == NI_Vector128_GetElement) || (intrinsic == NI_Vector128_GetLower) ||
(intrinsic == NI_Vector128_GetUpper) || (intrinsic == NI_Vector128_ToScalar) ||
(intrinsic == NI_Vector128_WithElement));
#endif // TARGET_ARM64
return false;
}
// HWIntrinsicSignatureReader: a helper class that "reads" a list of hardware intrinsic arguments and stores
// the corresponding argument type descriptors as the fields of the class instance.
//
struct HWIntrinsicSignatureReader final
{
// Read: enumerates the list of arguments of a hardware intrinsic and stores the CORINFO_CLASS_HANDLE
// and var_types values of each operand into the corresponding fields of the class instance.
//
// Arguments:
// compHnd -- an instance of COMP_HANDLE class.
// sig -- a hardware intrinsic signature.
//
void Read(COMP_HANDLE compHnd, CORINFO_SIG_INFO* sig)
{
CORINFO_ARG_LIST_HANDLE args = sig->args;
if (sig->numArgs > 0)
{
op1JitType = strip(compHnd->getArgType(sig, args, &op1ClsHnd));
if (sig->numArgs > 1)
{
args = compHnd->getArgNext(args);
op2JitType = strip(compHnd->getArgType(sig, args, &op2ClsHnd));
}
if (sig->numArgs > 2)
{
args = compHnd->getArgNext(args);
op3JitType = strip(compHnd->getArgType(sig, args, &op3ClsHnd));
}
if (sig->numArgs > 3)
{
args = compHnd->getArgNext(args);
op4JitType = strip(compHnd->getArgType(sig, args, &op4ClsHnd));
}
}
}
CORINFO_CLASS_HANDLE op1ClsHnd;
CORINFO_CLASS_HANDLE op2ClsHnd;
CORINFO_CLASS_HANDLE op3ClsHnd;
CORINFO_CLASS_HANDLE op4ClsHnd;
CorInfoType op1JitType;
CorInfoType op2JitType;
CorInfoType op3JitType;
CorInfoType op4JitType;
var_types GetOp1Type() const
{
return JITtype2varType(op1JitType);
}
var_types GetOp2Type() const
{
return JITtype2varType(op2JitType);
}
var_types GetOp3Type() const
{
return JITtype2varType(op3JitType);
}
var_types GetOp4Type() const
{
return JITtype2varType(op4JitType);
}
};
//------------------------------------------------------------------------
// impHWIntrinsic: Import a hardware intrinsic as a GT_HWINTRINSIC node if possible
//
// Arguments:
// intrinsic -- id of the intrinsic function.
// clsHnd -- class handle containing the intrinsic function.
// method -- method handle of the intrinsic function.
// sig -- signature of the intrinsic call
// mustExpand -- true if the intrinsic must return a GenTree*; otherwise, false
// Return Value:
// The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic
//
GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand)
{
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic);
int numArgs = sig->numArgs;
var_types retType = JITtype2varType(sig->retType);
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
if ((retType == TYP_STRUCT) && featureSIMD)
{
unsigned int sizeBytes;
simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes);
if (HWIntrinsicInfo::IsMultiReg(intrinsic))
{
assert(sizeBytes == 0);
}
else
{
assert(sizeBytes != 0);
// We want to return early here for cases where retType was TYP_STRUCT as per method signature and
// rather than deferring the decision after getting the simdBaseJitType of arg.
if (!isSupportedBaseType(intrinsic, simdBaseJitType))
{
return nullptr;
}
retType = getSIMDTypeForSize(sizeBytes);
}
}
simdBaseJitType = getBaseJitTypeFromArgIfNeeded(intrinsic, clsHnd, sig, simdBaseJitType);
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
if ((category == HW_Category_Scalar) || HWIntrinsicInfo::isScalarIsa(isa))
{
simdBaseJitType = sig->retType;
if (simdBaseJitType == CORINFO_TYPE_VOID)
{
simdBaseJitType = CORINFO_TYPE_UNDEF;
}
}
else
{
assert(featureSIMD);
unsigned int sizeBytes;
simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &sizeBytes);
assert((category == HW_Category_Special) || (category == HW_Category_Helper) || (sizeBytes != 0));
}
}
// Immediately return if the category is other than scalar/special and this is not a supported base type.
if ((category != HW_Category_Special) && (category != HW_Category_Scalar) && !HWIntrinsicInfo::isScalarIsa(isa) &&
!isSupportedBaseType(intrinsic, simdBaseJitType))
{
return nullptr;
}
var_types simdBaseType = TYP_UNKNOWN;
GenTree* immOp = nullptr;
if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
simdBaseType = JitType2PreciseVarType(simdBaseJitType);
}
HWIntrinsicSignatureReader sigReader;
sigReader.Read(info.compCompHnd, sig);
#ifdef TARGET_ARM64
if ((intrinsic == NI_AdvSimd_Insert) || (intrinsic == NI_AdvSimd_InsertScalar) ||
(intrinsic == NI_AdvSimd_LoadAndInsertScalar))
{
assert(sig->numArgs == 3);
immOp = impStackTop(1).val;
assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp));
}
else if (intrinsic == NI_AdvSimd_Arm64_InsertSelectedScalar)
{
// InsertSelectedScalar intrinsic has two immediate operands.
// Since all the remaining intrinsics on both platforms have only one immediate
// operand, in order to not complicate the shared logic even further we ensure here that
// 1) The second immediate operand immOp2 is constant and
// 2) its value belongs to [0, sizeof(op3) / sizeof(op3.BaseType)).
// If either is false, we should fallback to the managed implementation Insert(dst, dstIdx, Extract(src,
// srcIdx)).
// The check for the first immediate operand immOp will use the same logic as other intrinsics that have an
// immediate operand.
GenTree* immOp2 = nullptr;
assert(sig->numArgs == 4);
immOp = impStackTop(2).val;
immOp2 = impStackTop().val;
assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp));
assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp2));
if (!immOp2->IsCnsIntOrI())
{
assert(HWIntrinsicInfo::NoJmpTableImm(intrinsic));
return impNonConstFallback(intrinsic, retType, simdBaseJitType);
}
unsigned int otherSimdSize = 0;
CorInfoType otherBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &otherSimdSize);
var_types otherBaseType = JitType2PreciseVarType(otherBaseJitType);
assert(otherBaseJitType == simdBaseJitType);
int immLowerBound2 = 0;
int immUpperBound2 = 0;
HWIntrinsicInfo::lookupImmBounds(intrinsic, otherSimdSize, otherBaseType, &immLowerBound2, &immUpperBound2);
const int immVal2 = (int)immOp2->AsIntCon()->IconValue();
if ((immVal2 < immLowerBound2) || (immVal2 > immUpperBound2))
{
assert(!mustExpand);
return nullptr;
}
}
else
#endif
if ((sig->numArgs > 0) && HWIntrinsicInfo::isImmOp(intrinsic, impStackTop().val))
{
// NOTE: The following code assumes that for all intrinsics
// taking an immediate operand, that operand will be last.
immOp = impStackTop().val;
}
const unsigned simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig);
int immLowerBound = 0;
int immUpperBound = 0;
bool hasFullRangeImm = false;
if (immOp != nullptr)
{
#ifdef TARGET_XARCH
immUpperBound = HWIntrinsicInfo::lookupImmUpperBound(intrinsic);
hasFullRangeImm = HWIntrinsicInfo::HasFullRangeImm(intrinsic);
#elif defined(TARGET_ARM64)
if (category == HW_Category_SIMDByIndexedElement)
{
CorInfoType indexedElementBaseJitType;
var_types indexedElementBaseType;
unsigned int indexedElementSimdSize = 0;
if (numArgs == 3)
{
indexedElementBaseJitType =
getBaseJitTypeAndSizeOfSIMDType(sigReader.op2ClsHnd, &indexedElementSimdSize);
indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType);
}
else
{
assert(numArgs == 4);
indexedElementBaseJitType =
getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &indexedElementSimdSize);
indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType);
if (intrinsic == NI_Dp_DotProductBySelectedQuadruplet)
{
assert(((simdBaseType == TYP_INT) && (indexedElementBaseType == TYP_BYTE)) ||
((simdBaseType == TYP_UINT) && (indexedElementBaseType == TYP_UBYTE)));
// The second source operand of sdot, udot instructions is an indexed 32-bit element.
indexedElementBaseJitType = simdBaseJitType;
indexedElementBaseType = simdBaseType;
}
}
assert(indexedElementBaseType == simdBaseType);
HWIntrinsicInfo::lookupImmBounds(intrinsic, indexedElementSimdSize, simdBaseType, &immLowerBound,
&immUpperBound);
}
else
{
HWIntrinsicInfo::lookupImmBounds(intrinsic, simdSize, simdBaseType, &immLowerBound, &immUpperBound);
}
#endif
if (!hasFullRangeImm && immOp->IsCnsIntOrI())
{
const int ival = (int)immOp->AsIntCon()->IconValue();
bool immOutOfRange;
#ifdef TARGET_XARCH
if (HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic))
{
immOutOfRange = (ival != 1) && (ival != 2) && (ival != 4) && (ival != 8);
}
else
#endif
{
immOutOfRange = (ival < immLowerBound) || (ival > immUpperBound);
}
if (immOutOfRange)
{
assert(!mustExpand);
// The imm-HWintrinsics that do not accept all imm8 values may throw
// ArgumentOutOfRangeException when the imm argument is not in the valid range
return nullptr;
}
}
else if (!immOp->IsCnsIntOrI())
{
if (HWIntrinsicInfo::NoJmpTableImm(intrinsic))
{
return impNonConstFallback(intrinsic, retType, simdBaseJitType);
}
else if (!mustExpand)
{
// When the imm-argument is not a constant and we are not being forced to expand, we need to
// return nullptr so a GT_CALL to the intrinsic method is emitted instead. The
// intrinsic method is recursive and will be forced to expand, at which point
// we emit some less efficient fallback code.
return nullptr;
}
}
}
if (HWIntrinsicInfo::IsFloatingPointUsed(intrinsic))
{
// Set `compFloatingPointUsed` to cover the scenario where an intrinsic is operating on SIMD fields, but
// where no SIMD local vars are in use. This is the same logic as is used for FEATURE_SIMD.
compFloatingPointUsed = true;
}
// table-driven importer of simple intrinsics
if (impIsTableDrivenHWIntrinsic(intrinsic, category))
{
const bool isScalar = (category == HW_Category_Scalar);
assert(numArgs >= 0);
if (!isScalar && ((HWIntrinsicInfo::lookupIns(intrinsic, simdBaseType) == INS_invalid) ||
((simdSize != 8) && (simdSize != 16) && (simdSize != 32))))
{
assert(!"Unexpected HW Intrinsic");
return nullptr;
}
GenTree* op1 = nullptr;
GenTree* op2 = nullptr;
GenTree* op3 = nullptr;
GenTree* op4 = nullptr;
GenTreeHWIntrinsic* retNode = nullptr;
switch (numArgs)
{
case 0:
assert(!isScalar);
retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize);
break;
case 1:
op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
if ((category == HW_Category_MemoryLoad) && op1->OperIs(GT_CAST))
{
// Although the API specifies a pointer, if what we have is a BYREF, that's what
// we really want, so throw away the cast.
if (op1->gtGetOp1()->TypeGet() == TYP_BYREF)
{
op1 = op1->gtGetOp1();
}
}
retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, intrinsic)
: gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
#if defined(TARGET_XARCH)
switch (intrinsic)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
// These intrinsics have both pointer and vector overloads
// We want to be able to differentiate between them so lets
// just track the aux type as a ptr or undefined, depending
CorInfoType auxiliaryType = CORINFO_TYPE_UNDEF;
if (!varTypeIsSIMD(op1->TypeGet()))
{
auxiliaryType = CORINFO_TYPE_PTR;
}
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(auxiliaryType);
break;
}
default:
{
break;
}
}
#endif // TARGET_XARCH
break;
case 2:
op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound);
op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, intrinsic)
: gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize);
#ifdef TARGET_XARCH
if ((intrinsic == NI_SSE42_Crc32) || (intrinsic == NI_SSE42_X64_Crc32))
{
// TODO-XArch-Cleanup: currently we use the simdBaseJitType to bring the type of the second argument
// to the code generator. May encode the overload info in other way.
retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType);
}
#elif defined(TARGET_ARM64)
switch (intrinsic)
{
case NI_Crc32_ComputeCrc32:
case NI_Crc32_ComputeCrc32C:
case NI_Crc32_Arm64_ComputeCrc32:
case NI_Crc32_Arm64_ComputeCrc32C:
retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType);
break;
case NI_AdvSimd_AddWideningUpper:
case NI_AdvSimd_SubtractWideningUpper:
assert(varTypeIsSIMD(op1->TypeGet()));
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op1ClsHnd));
break;
case NI_AdvSimd_Arm64_AddSaturateScalar:
assert(varTypeIsSIMD(op2->TypeGet()));
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd));
break;
case NI_ArmBase_Arm64_MultiplyHigh:
if (sig->retType == CORINFO_TYPE_ULONG)
{
retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_ULONG);
}
else
{
assert(sig->retType == CORINFO_TYPE_LONG);
retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_LONG);
}
break;
default:
break;
}
#endif
break;
case 3:
op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd);
op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
#ifdef TARGET_ARM64
if (intrinsic == NI_AdvSimd_LoadAndInsertScalar)
{
op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound);
if (op1->OperIs(GT_CAST))
{
// Although the API specifies a pointer, if what we have is a BYREF, that's what
// we really want, so throw away the cast.
if (op1->gtGetOp1()->TypeGet() == TYP_BYREF)
{
op1 = op1->gtGetOp1();
}
}
}
else if ((intrinsic == NI_AdvSimd_Insert) || (intrinsic == NI_AdvSimd_InsertScalar))
{
op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound);
}
else
#endif
{
op3 = addRangeCheckIfNeeded(intrinsic, op3, mustExpand, immLowerBound, immUpperBound);
}
retNode = isScalar
? gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic)
: gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize);
#ifdef TARGET_XARCH
if ((intrinsic == NI_AVX2_GatherVector128) || (intrinsic == NI_AVX2_GatherVector256))
{
assert(varTypeIsSIMD(op2->TypeGet()));
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd));
}
#endif
break;
#ifdef TARGET_ARM64
case 4:
op4 = getArgForHWIntrinsic(sigReader.GetOp4Type(), sigReader.op4ClsHnd);
op4 = addRangeCheckIfNeeded(intrinsic, op4, mustExpand, immLowerBound, immUpperBound);
op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd);
op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
assert(!isScalar);
retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize);
break;
#endif
default:
return nullptr;
}
const bool isMemoryStore = retNode->OperIsMemoryStore();
if (isMemoryStore || retNode->OperIsMemoryLoad())
{
if (isMemoryStore)
{
// A MemoryStore operation is an assignment
retNode->gtFlags |= GTF_ASG;
}
// This operation contains an implicit indirection
// it could point into the global heap or
// it could throw a null reference exception.
//
retNode->gtFlags |= (GTF_GLOB_REF | GTF_EXCEPT);
}
return retNode;
}
return impSpecialIntrinsic(intrinsic, clsHnd, method, sig, simdBaseJitType, retType, simdSize);
}
#endif // FEATURE_HW_INTRINSICS
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "jitpch.h"
#include "hwintrinsic.h"
#ifdef FEATURE_HW_INTRINSICS
static const HWIntrinsicInfo hwIntrinsicInfoArray[] = {
// clang-format off
#if defined(TARGET_XARCH)
#define HARDWARE_INTRINSIC(isa, name, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \
{NI_##isa##_##name, #name, InstructionSet_##isa, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast<HWIntrinsicFlag>(flag)},
#include "hwintrinsiclistxarch.h"
#elif defined (TARGET_ARM64)
#define HARDWARE_INTRINSIC(isa, name, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \
{NI_##isa##_##name, #name, InstructionSet_##isa, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast<HWIntrinsicFlag>(flag)},
#include "hwintrinsiclistarm64.h"
#else
#error Unsupported platform
#endif
// clang-format on
};
//------------------------------------------------------------------------
// lookup: Gets the HWIntrinsicInfo associated with a given NamedIntrinsic
//
// Arguments:
// id -- The NamedIntrinsic associated with the HWIntrinsic to lookup
//
// Return Value:
// The HWIntrinsicInfo associated with id
const HWIntrinsicInfo& HWIntrinsicInfo::lookup(NamedIntrinsic id)
{
assert(id != NI_Illegal);
assert(id > NI_HW_INTRINSIC_START);
assert(id < NI_HW_INTRINSIC_END);
return hwIntrinsicInfoArray[id - NI_HW_INTRINSIC_START - 1];
}
//------------------------------------------------------------------------
// getBaseJitTypeFromArgIfNeeded: Get simdBaseJitType of intrinsic from 1st or 2nd argument depending on the flag
//
// Arguments:
// intrinsic -- id of the intrinsic function.
// clsHnd -- class handle containing the intrinsic function.
// method -- method handle of the intrinsic function.
// sig -- signature of the intrinsic call.
// simdBaseJitType -- Predetermined simdBaseJitType, could be CORINFO_TYPE_UNDEF
//
// Return Value:
// The basetype of intrinsic of it can be fetched from 1st or 2nd argument, else return baseType unmodified.
//
CorInfoType Compiler::getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType)
{
if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic) || HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic))
{
CORINFO_ARG_LIST_HANDLE arg = sig->args;
if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic))
{
arg = info.compCompHnd->getArgNext(arg);
}
CORINFO_CLASS_HANDLE argClass = info.compCompHnd->getArgClass(sig, arg);
simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass);
if (simdBaseJitType == CORINFO_TYPE_UNDEF) // the argument is not a vector
{
CORINFO_CLASS_HANDLE tmpClass;
simdBaseJitType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass));
if (simdBaseJitType == CORINFO_TYPE_PTR)
{
simdBaseJitType = info.compCompHnd->getChildType(argClass, &tmpClass);
}
}
assert(simdBaseJitType != CORINFO_TYPE_UNDEF);
}
return simdBaseJitType;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType)
{
if (m_simdHandleCache == nullptr)
{
return NO_CLASS_HANDLE;
}
if (simdType == TYP_SIMD16)
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector128FloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector128DoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector128IntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector128UShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector128UByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector128ShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector128ByteHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector128LongHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector128UIntHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector128ULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->Vector128NIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->Vector128NUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
}
#ifdef TARGET_XARCH
else if (simdType == TYP_SIMD32)
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector256FloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector256DoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector256IntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector256UShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector256UByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector256ShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector256ByteHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector256LongHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector256UIntHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector256ULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->Vector256NIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->Vector256NUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
}
#endif // TARGET_XARCH
#ifdef TARGET_ARM64
else if (simdType == TYP_SIMD8)
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector64FloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector64DoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector64IntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector64UShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector64UByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector64ShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector64ByteHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector64UIntHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector64LongHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector64ULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->Vector64NIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->Vector64NUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
}
#endif // TARGET_ARM64
return NO_CLASS_HANDLE;
}
//------------------------------------------------------------------------
// vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID):
//
// Arguments:
// hwIntrinsicID -- The id for the HW intrinsic
//
// Return Value:
// Returns true if this intrinsic requires value numbering to add an
// extra SimdType argument that encodes the resulting type.
// If we don't do this overloaded versions can return the same VN
// leading to incorrect CSE subsitutions.
//
/* static */ bool Compiler::vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID)
{
int numArgs = HWIntrinsicInfo::lookupNumArgs(hwIntrinsicID);
// HW Intrinsic's with -1 for numArgs have a varying number of args, so we currently
// give themm a unique value number them, and don't add an extra argument.
//
if (numArgs == -1)
{
return false;
}
// We iterate over all of the different baseType's for this intrinsic in the HWIntrinsicInfo table
// We set diffInsCount to the number of instructions that can execute differently.
//
unsigned diffInsCount = 0;
#ifdef TARGET_XARCH
instruction lastIns = INS_invalid;
#endif
for (var_types baseType = TYP_BYTE; (baseType <= TYP_DOUBLE); baseType = (var_types)(baseType + 1))
{
instruction curIns = HWIntrinsicInfo::lookupIns(hwIntrinsicID, baseType);
if (curIns != INS_invalid)
{
#ifdef TARGET_XARCH
if (curIns != lastIns)
{
diffInsCount++;
// remember the last valid instruction that we saw
lastIns = curIns;
}
#elif defined(TARGET_ARM64)
// On ARM64 we use the same instruction and specify an insOpt arrangement
// so we always consider the instruction operation to be different
//
diffInsCount++;
#endif // TARGET
if (diffInsCount >= 2)
{
// We can early exit the loop now
break;
}
}
}
// If we see two (or more) different instructions we need the extra VNF_SimdType arg
return (diffInsCount >= 2);
}
//------------------------------------------------------------------------
// lookupId: Gets the NamedIntrinsic for a given method name and InstructionSet
//
// Arguments:
// comp -- The compiler
// sig -- The signature of the intrinsic
// className -- The name of the class associated with the HWIntrinsic to lookup
// methodName -- The name of the method associated with the HWIntrinsic to lookup
// enclosingClassName -- The name of the enclosing class of X64 classes
//
// Return Value:
// The NamedIntrinsic associated with methodName and isa
NamedIntrinsic HWIntrinsicInfo::lookupId(Compiler* comp,
CORINFO_SIG_INFO* sig,
const char* className,
const char* methodName,
const char* enclosingClassName)
{
// TODO-Throughput: replace sequential search by binary search
CORINFO_InstructionSet isa = lookupIsa(className, enclosingClassName);
if (isa == InstructionSet_ILLEGAL)
{
return NI_Illegal;
}
bool isIsaSupported = comp->compSupportsHWIntrinsic(isa);
bool isHardwareAcceleratedProp = (strcmp(methodName, "get_IsHardwareAccelerated") == 0);
#ifdef TARGET_XARCH
if (isHardwareAcceleratedProp)
{
// Special case: Some of Vector128/256 APIs are hardware accelerated with Sse1 and Avx1,
// but we want IsHardwareAccelerated to return true only when all of them are (there are
// still can be cases where e.g. Sse41 might give an additional boost for Vector128, but it's
// not important enough to bump the minimal Sse version here)
if (strcmp(className, "Vector128") == 0)
{
isa = InstructionSet_SSE2;
}
else if (strcmp(className, "Vector256") == 0)
{
isa = InstructionSet_AVX2;
}
}
#endif
if ((strcmp(methodName, "get_IsSupported") == 0) || isHardwareAcceleratedProp)
{
return isIsaSupported ? (comp->compExactlyDependsOn(isa) ? NI_IsSupported_True : NI_IsSupported_Dynamic)
: NI_IsSupported_False;
}
else if (!isIsaSupported)
{
return NI_Throw_PlatformNotSupportedException;
}
for (int i = 0; i < (NI_HW_INTRINSIC_END - NI_HW_INTRINSIC_START - 1); i++)
{
const HWIntrinsicInfo& intrinsicInfo = hwIntrinsicInfoArray[i];
if (isa != hwIntrinsicInfoArray[i].isa)
{
continue;
}
int numArgs = static_cast<unsigned>(intrinsicInfo.numArgs);
if ((numArgs != -1) && (sig->numArgs != static_cast<unsigned>(intrinsicInfo.numArgs)))
{
continue;
}
if (strcmp(methodName, intrinsicInfo.name) == 0)
{
return intrinsicInfo.id;
}
}
// There are several helper intrinsics that are implemented in managed code
// Those intrinsics will hit this code path and need to return NI_Illegal
return NI_Illegal;
}
//------------------------------------------------------------------------
// lookupSimdSize: Gets the SimdSize for a given HWIntrinsic and signature
//
// Arguments:
// id -- The ID associated with the HWIntrinsic to lookup
// sig -- The signature of the HWIntrinsic to lookup
//
// Return Value:
// The SIMD size for the HWIntrinsic associated with id and sig
//
// Remarks:
// This function is only used by the importer. After importation, we can
// get the SIMD size from the GenTreeHWIntrinsic node.
unsigned HWIntrinsicInfo::lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORINFO_SIG_INFO* sig)
{
unsigned simdSize = 0;
if (tryLookupSimdSize(id, &simdSize))
{
return simdSize;
}
CORINFO_CLASS_HANDLE typeHnd = nullptr;
if (HWIntrinsicInfo::BaseTypeFromFirstArg(id))
{
typeHnd = comp->info.compCompHnd->getArgClass(sig, sig->args);
}
else if (HWIntrinsicInfo::BaseTypeFromSecondArg(id))
{
CORINFO_ARG_LIST_HANDLE secondArg = comp->info.compCompHnd->getArgNext(sig->args);
typeHnd = comp->info.compCompHnd->getArgClass(sig, secondArg);
}
else
{
assert(JITtype2varType(sig->retType) == TYP_STRUCT);
typeHnd = sig->retTypeSigClass;
}
CorInfoType simdBaseJitType = comp->getBaseJitTypeAndSizeOfSIMDType(typeHnd, &simdSize);
assert((simdSize > 0) && (simdBaseJitType != CORINFO_TYPE_UNDEF));
return simdSize;
}
//------------------------------------------------------------------------
// isImmOp: Checks whether the HWIntrinsic node has an imm operand
//
// Arguments:
// id -- The NamedIntrinsic associated with the HWIntrinsic to lookup
// op -- The operand to check
//
// Return Value:
// true if the node has an imm operand; otherwise, false
bool HWIntrinsicInfo::isImmOp(NamedIntrinsic id, const GenTree* op)
{
#ifdef TARGET_XARCH
if (HWIntrinsicInfo::lookupCategory(id) != HW_Category_IMM)
{
return false;
}
if (!HWIntrinsicInfo::MaybeImm(id))
{
return true;
}
#elif defined(TARGET_ARM64)
if (!HWIntrinsicInfo::HasImmediateOperand(id))
{
return false;
}
#else
#error Unsupported platform
#endif
if (genActualType(op->TypeGet()) != TYP_INT)
{
return false;
}
return true;
}
//------------------------------------------------------------------------
// getArgForHWIntrinsic: pop an argument from the stack and validate its type
//
// Arguments:
// argType -- the required type of argument
// argClass -- the class handle of argType
// expectAddr -- if true indicates we are expecting type stack entry to be a TYP_BYREF.
// newobjThis -- For CEE_NEWOBJ, this is the temp grabbed for the allocated uninitalized object.
//
// Return Value:
// the validated argument
//
GenTree* Compiler::getArgForHWIntrinsic(var_types argType,
CORINFO_CLASS_HANDLE argClass,
bool expectAddr,
GenTree* newobjThis)
{
GenTree* arg = nullptr;
if (varTypeIsStruct(argType))
{
if (!varTypeIsSIMD(argType))
{
unsigned int argSizeBytes;
(void)getBaseJitTypeAndSizeOfSIMDType(argClass, &argSizeBytes);
argType = getSIMDTypeForSize(argSizeBytes);
}
assert(varTypeIsSIMD(argType));
if (newobjThis == nullptr)
{
arg = impSIMDPopStack(argType, expectAddr);
assert(varTypeIsSIMD(arg->TypeGet()));
}
else
{
assert((newobjThis->gtOper == GT_ADDR) && (newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR));
arg = newobjThis;
// push newobj result on type stack
unsigned tmp = arg->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum();
impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(argClass).NormaliseForStack());
}
}
else
{
assert(varTypeIsArithmetic(argType));
arg = impPopStack().val;
assert(varTypeIsArithmetic(arg->TypeGet()));
assert(genActualType(arg->gtType) == genActualType(argType));
}
return arg;
}
//------------------------------------------------------------------------
// addRangeCheckIfNeeded: add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic
//
// Arguments:
// intrinsic -- intrinsic ID
// immOp -- the immediate operand of the intrinsic
// mustExpand -- true if the compiler is compiling the fallback(GT_CALL) of this intrinsics
// immLowerBound -- lower incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic)
// immUpperBound -- upper incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic)
//
// Return Value:
// add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic, which would throw ArgumentOutOfRangeException
// when the imm-argument is not in the valid range
//
GenTree* Compiler::addRangeCheckIfNeeded(
NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound)
{
assert(immOp != nullptr);
// Full-range imm-intrinsics do not need the range-check
// because the imm-parameter of the intrinsic method is a byte.
// AVX2 Gather intrinsics no not need the range-check
// because their imm-parameter have discrete valid values that are handle by managed code
if (mustExpand && HWIntrinsicInfo::isImmOp(intrinsic, immOp)
#ifdef TARGET_XARCH
&& !HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic) && !HWIntrinsicInfo::HasFullRangeImm(intrinsic)
#endif
)
{
assert(!immOp->IsCnsIntOrI());
assert(varTypeIsUnsigned(immOp));
return addRangeCheckForHWIntrinsic(immOp, immLowerBound, immUpperBound);
}
else
{
return immOp;
}
}
//------------------------------------------------------------------------
// addRangeCheckForHWIntrinsic: add a GT_BOUNDS_CHECK node for an intrinsic
//
// Arguments:
// immOp -- the immediate operand of the intrinsic
// immLowerBound -- lower incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic)
// immUpperBound -- upper incl. bound for a value of the immediate operand (for a non-full-range imm-intrinsic)
//
// Return Value:
// add a GT_BOUNDS_CHECK node for non-full-range imm-intrinsic, which would throw ArgumentOutOfRangeException
// when the imm-argument is not in the valid range
//
GenTree* Compiler::addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound)
{
// Bounds check for value of an immediate operand
// (immLowerBound <= immOp) && (immOp <= immUpperBound)
//
// implemented as a single comparison in the form of
//
// if ((immOp - immLowerBound) >= (immUpperBound - immLowerBound + 1))
// {
// throw new ArgumentOutOfRangeException();
// }
//
// The value of (immUpperBound - immLowerBound + 1) is denoted as adjustedUpperBound.
const ssize_t adjustedUpperBound = (ssize_t)immUpperBound - immLowerBound + 1;
GenTree* adjustedUpperBoundNode = gtNewIconNode(adjustedUpperBound, TYP_INT);
GenTree* immOpDup = nullptr;
immOp = impCloneExpr(immOp, &immOpDup, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone an immediate operand for immediate value bounds check"));
if (immLowerBound != 0)
{
immOpDup = gtNewOperNode(GT_SUB, TYP_INT, immOpDup, gtNewIconNode(immLowerBound, TYP_INT));
}
GenTreeBoundsChk* hwIntrinsicChk =
new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(immOpDup, adjustedUpperBoundNode, SCK_ARG_RNG_EXCPN);
return gtNewOperNode(GT_COMMA, immOp->TypeGet(), hwIntrinsicChk, immOp);
}
//------------------------------------------------------------------------
// compSupportsHWIntrinsic: check whether a given instruction is enabled via configuration
//
// Arguments:
// isa - Instruction set
//
// Return Value:
// true iff the given instruction set is enabled via configuration (environment variables, etc.).
bool Compiler::compSupportsHWIntrinsic(CORINFO_InstructionSet isa)
{
return compHWIntrinsicDependsOn(isa) && (featureSIMD || HWIntrinsicInfo::isScalarIsa(isa)) &&
(
#ifdef DEBUG
JitConfig.EnableIncompleteISAClass() ||
#endif
HWIntrinsicInfo::isFullyImplementedIsa(isa));
}
//------------------------------------------------------------------------
// impIsTableDrivenHWIntrinsic:
//
// Arguments:
// intrinsicId - HW intrinsic id
// category - category of a HW intrinsic
//
// Return Value:
// returns true if this category can be table-driven in the importer
//
static bool impIsTableDrivenHWIntrinsic(NamedIntrinsic intrinsicId, HWIntrinsicCategory category)
{
return (category != HW_Category_Special) && HWIntrinsicInfo::RequiresCodegen(intrinsicId) &&
!HWIntrinsicInfo::HasSpecialImport(intrinsicId);
}
//------------------------------------------------------------------------
// isSupportedBaseType
//
// Arguments:
// intrinsicId - HW intrinsic id
// baseJitType - Base JIT type of the intrinsic.
//
// Return Value:
// returns true if the baseType is supported for given intrinsic.
//
static bool isSupportedBaseType(NamedIntrinsic intrinsic, CorInfoType baseJitType)
{
if (baseJitType == CORINFO_TYPE_UNDEF)
{
return false;
}
var_types baseType = JitType2PreciseVarType(baseJitType);
// We don't actually check the intrinsic outside of the false case as we expect
// the exposed managed signatures are either generic and support all types
// or they are explicit and support the type indicated.
if (varTypeIsArithmetic(baseType))
{
return true;
}
#ifdef TARGET_XARCH
assert((intrinsic == NI_Vector128_As) || (intrinsic == NI_Vector128_AsByte) ||
(intrinsic == NI_Vector128_AsDouble) || (intrinsic == NI_Vector128_AsInt16) ||
(intrinsic == NI_Vector128_AsInt32) || (intrinsic == NI_Vector128_AsInt64) ||
(intrinsic == NI_Vector128_AsSByte) || (intrinsic == NI_Vector128_AsSingle) ||
(intrinsic == NI_Vector128_AsUInt16) || (intrinsic == NI_Vector128_AsUInt32) ||
(intrinsic == NI_Vector128_AsUInt64) || (intrinsic == NI_Vector128_get_AllBitsSet) ||
(intrinsic == NI_Vector128_get_Count) || (intrinsic == NI_Vector128_get_Zero) ||
(intrinsic == NI_Vector128_GetElement) || (intrinsic == NI_Vector128_WithElement) ||
(intrinsic == NI_Vector128_ToScalar) || (intrinsic == NI_Vector128_ToVector256) ||
(intrinsic == NI_Vector128_ToVector256Unsafe) || (intrinsic == NI_Vector256_As) ||
(intrinsic == NI_Vector256_AsByte) || (intrinsic == NI_Vector256_AsDouble) ||
(intrinsic == NI_Vector256_AsInt16) || (intrinsic == NI_Vector256_AsInt32) ||
(intrinsic == NI_Vector256_AsInt64) || (intrinsic == NI_Vector256_AsSByte) ||
(intrinsic == NI_Vector256_AsSingle) || (intrinsic == NI_Vector256_AsUInt16) ||
(intrinsic == NI_Vector256_AsUInt32) || (intrinsic == NI_Vector256_AsUInt64) ||
(intrinsic == NI_Vector256_get_AllBitsSet) || (intrinsic == NI_Vector256_get_Count) ||
(intrinsic == NI_Vector256_get_Zero) || (intrinsic == NI_Vector256_GetElement) ||
(intrinsic == NI_Vector256_WithElement) || (intrinsic == NI_Vector256_GetLower) ||
(intrinsic == NI_Vector256_ToScalar));
#endif // TARGET_XARCH
#ifdef TARGET_ARM64
assert((intrinsic == NI_Vector64_As) || (intrinsic == NI_Vector64_AsByte) || (intrinsic == NI_Vector64_AsDouble) ||
(intrinsic == NI_Vector64_AsInt16) || (intrinsic == NI_Vector64_AsInt32) ||
(intrinsic == NI_Vector64_AsInt64) || (intrinsic == NI_Vector64_AsSByte) ||
(intrinsic == NI_Vector64_AsSingle) || (intrinsic == NI_Vector64_AsUInt16) ||
(intrinsic == NI_Vector64_AsUInt32) || (intrinsic == NI_Vector64_AsUInt64) ||
(intrinsic == NI_Vector64_get_AllBitsSet) || (intrinsic == NI_Vector64_get_Count) ||
(intrinsic == NI_Vector64_get_Zero) || (intrinsic == NI_Vector64_GetElement) ||
(intrinsic == NI_Vector64_ToScalar) || (intrinsic == NI_Vector64_ToVector128) ||
(intrinsic == NI_Vector64_ToVector128Unsafe) || (intrinsic == NI_Vector64_WithElement) ||
(intrinsic == NI_Vector128_As) || (intrinsic == NI_Vector128_AsByte) ||
(intrinsic == NI_Vector128_AsDouble) || (intrinsic == NI_Vector128_AsInt16) ||
(intrinsic == NI_Vector128_AsInt32) || (intrinsic == NI_Vector128_AsInt64) ||
(intrinsic == NI_Vector128_AsSByte) || (intrinsic == NI_Vector128_AsSingle) ||
(intrinsic == NI_Vector128_AsUInt16) || (intrinsic == NI_Vector128_AsUInt32) ||
(intrinsic == NI_Vector128_AsUInt64) || (intrinsic == NI_Vector128_get_AllBitsSet) ||
(intrinsic == NI_Vector128_get_Count) || (intrinsic == NI_Vector128_get_Zero) ||
(intrinsic == NI_Vector128_GetElement) || (intrinsic == NI_Vector128_GetLower) ||
(intrinsic == NI_Vector128_GetUpper) || (intrinsic == NI_Vector128_ToScalar) ||
(intrinsic == NI_Vector128_WithElement));
#endif // TARGET_ARM64
return false;
}
// HWIntrinsicSignatureReader: a helper class that "reads" a list of hardware intrinsic arguments and stores
// the corresponding argument type descriptors as the fields of the class instance.
//
struct HWIntrinsicSignatureReader final
{
// Read: enumerates the list of arguments of a hardware intrinsic and stores the CORINFO_CLASS_HANDLE
// and var_types values of each operand into the corresponding fields of the class instance.
//
// Arguments:
// compHnd -- an instance of COMP_HANDLE class.
// sig -- a hardware intrinsic signature.
//
void Read(COMP_HANDLE compHnd, CORINFO_SIG_INFO* sig)
{
CORINFO_ARG_LIST_HANDLE args = sig->args;
if (sig->numArgs > 0)
{
op1JitType = strip(compHnd->getArgType(sig, args, &op1ClsHnd));
if (sig->numArgs > 1)
{
args = compHnd->getArgNext(args);
op2JitType = strip(compHnd->getArgType(sig, args, &op2ClsHnd));
}
if (sig->numArgs > 2)
{
args = compHnd->getArgNext(args);
op3JitType = strip(compHnd->getArgType(sig, args, &op3ClsHnd));
}
if (sig->numArgs > 3)
{
args = compHnd->getArgNext(args);
op4JitType = strip(compHnd->getArgType(sig, args, &op4ClsHnd));
}
}
}
CORINFO_CLASS_HANDLE op1ClsHnd;
CORINFO_CLASS_HANDLE op2ClsHnd;
CORINFO_CLASS_HANDLE op3ClsHnd;
CORINFO_CLASS_HANDLE op4ClsHnd;
CorInfoType op1JitType;
CorInfoType op2JitType;
CorInfoType op3JitType;
CorInfoType op4JitType;
var_types GetOp1Type() const
{
return JITtype2varType(op1JitType);
}
var_types GetOp2Type() const
{
return JITtype2varType(op2JitType);
}
var_types GetOp3Type() const
{
return JITtype2varType(op3JitType);
}
var_types GetOp4Type() const
{
return JITtype2varType(op4JitType);
}
};
//------------------------------------------------------------------------
// impHWIntrinsic: Import a hardware intrinsic as a GT_HWINTRINSIC node if possible
//
// Arguments:
// intrinsic -- id of the intrinsic function.
// clsHnd -- class handle containing the intrinsic function.
// method -- method handle of the intrinsic function.
// sig -- signature of the intrinsic call
// mustExpand -- true if the intrinsic must return a GenTree*; otherwise, false
// Return Value:
// The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic
//
GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand)
{
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic);
int numArgs = sig->numArgs;
var_types retType = JITtype2varType(sig->retType);
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
if ((retType == TYP_STRUCT) && featureSIMD)
{
unsigned int sizeBytes;
simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes);
if (HWIntrinsicInfo::IsMultiReg(intrinsic))
{
assert(sizeBytes == 0);
}
else
{
assert(sizeBytes != 0);
// We want to return early here for cases where retType was TYP_STRUCT as per method signature and
// rather than deferring the decision after getting the simdBaseJitType of arg.
if (!isSupportedBaseType(intrinsic, simdBaseJitType))
{
return nullptr;
}
retType = getSIMDTypeForSize(sizeBytes);
}
}
simdBaseJitType = getBaseJitTypeFromArgIfNeeded(intrinsic, clsHnd, sig, simdBaseJitType);
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
if ((category == HW_Category_Scalar) || HWIntrinsicInfo::isScalarIsa(isa))
{
simdBaseJitType = sig->retType;
if (simdBaseJitType == CORINFO_TYPE_VOID)
{
simdBaseJitType = CORINFO_TYPE_UNDEF;
}
}
else
{
assert(featureSIMD);
unsigned int sizeBytes;
simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &sizeBytes);
assert((category == HW_Category_Special) || (category == HW_Category_Helper) || (sizeBytes != 0));
}
}
// Immediately return if the category is other than scalar/special and this is not a supported base type.
if ((category != HW_Category_Special) && (category != HW_Category_Scalar) && !HWIntrinsicInfo::isScalarIsa(isa) &&
!isSupportedBaseType(intrinsic, simdBaseJitType))
{
return nullptr;
}
var_types simdBaseType = TYP_UNKNOWN;
GenTree* immOp = nullptr;
if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
simdBaseType = JitType2PreciseVarType(simdBaseJitType);
}
HWIntrinsicSignatureReader sigReader;
sigReader.Read(info.compCompHnd, sig);
#ifdef TARGET_ARM64
if ((intrinsic == NI_AdvSimd_Insert) || (intrinsic == NI_AdvSimd_InsertScalar) ||
(intrinsic == NI_AdvSimd_LoadAndInsertScalar))
{
assert(sig->numArgs == 3);
immOp = impStackTop(1).val;
assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp));
}
else if (intrinsic == NI_AdvSimd_Arm64_InsertSelectedScalar)
{
// InsertSelectedScalar intrinsic has two immediate operands.
// Since all the remaining intrinsics on both platforms have only one immediate
// operand, in order to not complicate the shared logic even further we ensure here that
// 1) The second immediate operand immOp2 is constant and
// 2) its value belongs to [0, sizeof(op3) / sizeof(op3.BaseType)).
// If either is false, we should fallback to the managed implementation Insert(dst, dstIdx, Extract(src,
// srcIdx)).
// The check for the first immediate operand immOp will use the same logic as other intrinsics that have an
// immediate operand.
GenTree* immOp2 = nullptr;
assert(sig->numArgs == 4);
immOp = impStackTop(2).val;
immOp2 = impStackTop().val;
assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp));
assert(HWIntrinsicInfo::isImmOp(intrinsic, immOp2));
if (!immOp2->IsCnsIntOrI())
{
assert(HWIntrinsicInfo::NoJmpTableImm(intrinsic));
return impNonConstFallback(intrinsic, retType, simdBaseJitType);
}
unsigned int otherSimdSize = 0;
CorInfoType otherBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &otherSimdSize);
var_types otherBaseType = JitType2PreciseVarType(otherBaseJitType);
assert(otherBaseJitType == simdBaseJitType);
int immLowerBound2 = 0;
int immUpperBound2 = 0;
HWIntrinsicInfo::lookupImmBounds(intrinsic, otherSimdSize, otherBaseType, &immLowerBound2, &immUpperBound2);
const int immVal2 = (int)immOp2->AsIntCon()->IconValue();
if ((immVal2 < immLowerBound2) || (immVal2 > immUpperBound2))
{
assert(!mustExpand);
return nullptr;
}
}
else
#endif
if ((sig->numArgs > 0) && HWIntrinsicInfo::isImmOp(intrinsic, impStackTop().val))
{
// NOTE: The following code assumes that for all intrinsics
// taking an immediate operand, that operand will be last.
immOp = impStackTop().val;
}
const unsigned simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig);
int immLowerBound = 0;
int immUpperBound = 0;
bool hasFullRangeImm = false;
if (immOp != nullptr)
{
#ifdef TARGET_XARCH
immUpperBound = HWIntrinsicInfo::lookupImmUpperBound(intrinsic);
hasFullRangeImm = HWIntrinsicInfo::HasFullRangeImm(intrinsic);
#elif defined(TARGET_ARM64)
if (category == HW_Category_SIMDByIndexedElement)
{
CorInfoType indexedElementBaseJitType;
var_types indexedElementBaseType;
unsigned int indexedElementSimdSize = 0;
if (numArgs == 3)
{
indexedElementBaseJitType =
getBaseJitTypeAndSizeOfSIMDType(sigReader.op2ClsHnd, &indexedElementSimdSize);
indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType);
}
else
{
assert(numArgs == 4);
indexedElementBaseJitType =
getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &indexedElementSimdSize);
indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType);
if (intrinsic == NI_Dp_DotProductBySelectedQuadruplet)
{
assert(((simdBaseType == TYP_INT) && (indexedElementBaseType == TYP_BYTE)) ||
((simdBaseType == TYP_UINT) && (indexedElementBaseType == TYP_UBYTE)));
// The second source operand of sdot, udot instructions is an indexed 32-bit element.
indexedElementBaseJitType = simdBaseJitType;
indexedElementBaseType = simdBaseType;
}
}
assert(indexedElementBaseType == simdBaseType);
HWIntrinsicInfo::lookupImmBounds(intrinsic, indexedElementSimdSize, simdBaseType, &immLowerBound,
&immUpperBound);
}
else
{
HWIntrinsicInfo::lookupImmBounds(intrinsic, simdSize, simdBaseType, &immLowerBound, &immUpperBound);
}
#endif
if (!hasFullRangeImm && immOp->IsCnsIntOrI())
{
const int ival = (int)immOp->AsIntCon()->IconValue();
bool immOutOfRange;
#ifdef TARGET_XARCH
if (HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic))
{
immOutOfRange = (ival != 1) && (ival != 2) && (ival != 4) && (ival != 8);
}
else
#endif
{
immOutOfRange = (ival < immLowerBound) || (ival > immUpperBound);
}
if (immOutOfRange)
{
assert(!mustExpand);
// The imm-HWintrinsics that do not accept all imm8 values may throw
// ArgumentOutOfRangeException when the imm argument is not in the valid range
return nullptr;
}
}
else if (!immOp->IsCnsIntOrI())
{
if (HWIntrinsicInfo::NoJmpTableImm(intrinsic))
{
return impNonConstFallback(intrinsic, retType, simdBaseJitType);
}
else if (!mustExpand)
{
// When the imm-argument is not a constant and we are not being forced to expand, we need to
// return nullptr so a GT_CALL to the intrinsic method is emitted instead. The
// intrinsic method is recursive and will be forced to expand, at which point
// we emit some less efficient fallback code.
return nullptr;
}
}
}
if (HWIntrinsicInfo::IsFloatingPointUsed(intrinsic))
{
// Set `compFloatingPointUsed` to cover the scenario where an intrinsic is operating on SIMD fields, but
// where no SIMD local vars are in use. This is the same logic as is used for FEATURE_SIMD.
compFloatingPointUsed = true;
}
// table-driven importer of simple intrinsics
if (impIsTableDrivenHWIntrinsic(intrinsic, category))
{
const bool isScalar = (category == HW_Category_Scalar);
assert(numArgs >= 0);
if (!isScalar && ((HWIntrinsicInfo::lookupIns(intrinsic, simdBaseType) == INS_invalid) ||
((simdSize != 8) && (simdSize != 16) && (simdSize != 32))))
{
assert(!"Unexpected HW Intrinsic");
return nullptr;
}
GenTree* op1 = nullptr;
GenTree* op2 = nullptr;
GenTree* op3 = nullptr;
GenTree* op4 = nullptr;
GenTreeHWIntrinsic* retNode = nullptr;
switch (numArgs)
{
case 0:
assert(!isScalar);
retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize);
break;
case 1:
op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
if ((category == HW_Category_MemoryLoad) && op1->OperIs(GT_CAST))
{
// Although the API specifies a pointer, if what we have is a BYREF, that's what
// we really want, so throw away the cast.
if (op1->gtGetOp1()->TypeGet() == TYP_BYREF)
{
op1 = op1->gtGetOp1();
}
}
retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, intrinsic)
: gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
#if defined(TARGET_XARCH)
switch (intrinsic)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
// These intrinsics have both pointer and vector overloads
// We want to be able to differentiate between them so lets
// just track the aux type as a ptr or undefined, depending
CorInfoType auxiliaryType = CORINFO_TYPE_UNDEF;
if (!varTypeIsSIMD(op1->TypeGet()))
{
auxiliaryType = CORINFO_TYPE_PTR;
}
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(auxiliaryType);
break;
}
default:
{
break;
}
}
#endif // TARGET_XARCH
break;
case 2:
op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound);
op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, intrinsic)
: gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize);
#ifdef TARGET_XARCH
if ((intrinsic == NI_SSE42_Crc32) || (intrinsic == NI_SSE42_X64_Crc32))
{
// TODO-XArch-Cleanup: currently we use the simdBaseJitType to bring the type of the second argument
// to the code generator. May encode the overload info in other way.
retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType);
}
#elif defined(TARGET_ARM64)
switch (intrinsic)
{
case NI_Crc32_ComputeCrc32:
case NI_Crc32_ComputeCrc32C:
case NI_Crc32_Arm64_ComputeCrc32:
case NI_Crc32_Arm64_ComputeCrc32C:
retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType);
break;
case NI_AdvSimd_AddWideningUpper:
case NI_AdvSimd_SubtractWideningUpper:
assert(varTypeIsSIMD(op1->TypeGet()));
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op1ClsHnd));
break;
case NI_AdvSimd_Arm64_AddSaturateScalar:
assert(varTypeIsSIMD(op2->TypeGet()));
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd));
break;
case NI_ArmBase_Arm64_MultiplyHigh:
if (sig->retType == CORINFO_TYPE_ULONG)
{
retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_ULONG);
}
else
{
assert(sig->retType == CORINFO_TYPE_LONG);
retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_LONG);
}
break;
default:
break;
}
#endif
break;
case 3:
op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd);
op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
#ifdef TARGET_ARM64
if (intrinsic == NI_AdvSimd_LoadAndInsertScalar)
{
op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound);
if (op1->OperIs(GT_CAST))
{
// Although the API specifies a pointer, if what we have is a BYREF, that's what
// we really want, so throw away the cast.
if (op1->gtGetOp1()->TypeGet() == TYP_BYREF)
{
op1 = op1->gtGetOp1();
}
}
}
else if ((intrinsic == NI_AdvSimd_Insert) || (intrinsic == NI_AdvSimd_InsertScalar))
{
op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound);
}
else
#endif
{
op3 = addRangeCheckIfNeeded(intrinsic, op3, mustExpand, immLowerBound, immUpperBound);
}
retNode = isScalar
? gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic)
: gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize);
#ifdef TARGET_XARCH
if ((intrinsic == NI_AVX2_GatherVector128) || (intrinsic == NI_AVX2_GatherVector256))
{
assert(varTypeIsSIMD(op2->TypeGet()));
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd));
}
#endif
break;
#ifdef TARGET_ARM64
case 4:
op4 = getArgForHWIntrinsic(sigReader.GetOp4Type(), sigReader.op4ClsHnd);
op4 = addRangeCheckIfNeeded(intrinsic, op4, mustExpand, immLowerBound, immUpperBound);
op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd);
op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
assert(!isScalar);
retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize);
break;
#endif
default:
return nullptr;
}
const bool isMemoryStore = retNode->OperIsMemoryStore();
if (isMemoryStore || retNode->OperIsMemoryLoad())
{
if (isMemoryStore)
{
// A MemoryStore operation is an assignment
retNode->gtFlags |= GTF_ASG;
}
// This operation contains an implicit indirection
// it could point into the global heap or
// it could throw a null reference exception.
//
retNode->gtFlags |= (GTF_GLOB_REF | GTF_EXCEPT);
}
return retNode;
}
return impSpecialIntrinsic(intrinsic, clsHnd, method, sig, simdBaseJitType, retType, simdSize);
}
#endif // FEATURE_HW_INTRINSICS
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/libraries/System.Diagnostics.FileVersionInfo/tests/NativeLibrary/dllmain.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
// Windows Header Files:
#include <windows.h>
BOOL APIENTRY DllMain( HMODULE hModule,
DWORD ul_reason_for_call,
LPVOID lpReserved
)
{
switch (ul_reason_for_call)
{
case DLL_PROCESS_ATTACH:
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
case DLL_PROCESS_DETACH:
break;
}
return TRUE;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
// Windows Header Files:
#include <windows.h>
BOOL APIENTRY DllMain( HMODULE hModule,
DWORD ul_reason_for_call,
LPVOID lpReserved
)
{
switch (ul_reason_for_call)
{
case DLL_PROCESS_ATTACH:
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
case DLL_PROCESS_DETACH:
break;
}
return TRUE;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/tools/superpmi/superpmi-shared/runtimedetails.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// RuntimeDetails.h - the collection of runtime includes that we need to access.
//----------------------------------------------------------
#ifndef _RuntimeDetails
#define _RuntimeDetails
#include <mscoree.h>
#include <corjit.h>
#include <utilcode.h>
#include <patchpointinfo.h>
// Jit Exports
typedef ICorJitCompiler*(__stdcall* PgetJit)();
typedef void(__stdcall* PjitStartup)(ICorJitHost* host);
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// RuntimeDetails.h - the collection of runtime includes that we need to access.
//----------------------------------------------------------
#ifndef _RuntimeDetails
#define _RuntimeDetails
#include <mscoree.h>
#include <corjit.h>
#include <utilcode.h>
#include <patchpointinfo.h>
// Jit Exports
typedef ICorJitCompiler*(__stdcall* PgetJit)();
typedef void(__stdcall* PjitStartup)(ICorJitHost* host);
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/sprintf_s/test9/test9.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test9.c
**
** Purpose: Test #9 for the sprintf_s function. Tests the integer
** specifier (%i).
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../sprintf_s.h"
/*
* Depends on memcmp and strlen
*/
PALTEST(c_runtime_sprintf_s_test9_paltest_sprintf_test9, "c_runtime/sprintf_s/test9/paltest_sprintf_test9")
{
int neg = -42;
int pos = 42;
INT64 l = 42;
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
DoNumTest("foo %i", pos, "foo 42");
DoNumTest("foo %li", 0xFFFF, "foo 65535");
DoNumTest("foo %hi", 0xFFFF, "foo -1");
DoNumTest("foo %Li", pos, "foo 42");
DoI64Test("foo %I64i", l, "42", "foo 42");
DoNumTest("foo %3i", pos, "foo 42");
DoNumTest("foo %-3i", pos, "foo 42 ");
DoNumTest("foo %.1i", pos, "foo 42");
DoNumTest("foo %.3i", pos, "foo 042");
DoNumTest("foo %03i", pos, "foo 042");
DoNumTest("foo %#i", pos, "foo 42");
DoNumTest("foo %+i", pos, "foo +42");
DoNumTest("foo % i", pos, "foo 42");
DoNumTest("foo %+i", neg, "foo -42");
DoNumTest("foo % i", neg, "foo -42");
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test9.c
**
** Purpose: Test #9 for the sprintf_s function. Tests the integer
** specifier (%i).
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../sprintf_s.h"
/*
* Depends on memcmp and strlen
*/
PALTEST(c_runtime_sprintf_s_test9_paltest_sprintf_test9, "c_runtime/sprintf_s/test9/paltest_sprintf_test9")
{
int neg = -42;
int pos = 42;
INT64 l = 42;
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
DoNumTest("foo %i", pos, "foo 42");
DoNumTest("foo %li", 0xFFFF, "foo 65535");
DoNumTest("foo %hi", 0xFFFF, "foo -1");
DoNumTest("foo %Li", pos, "foo 42");
DoI64Test("foo %I64i", l, "42", "foo 42");
DoNumTest("foo %3i", pos, "foo 42");
DoNumTest("foo %-3i", pos, "foo 42 ");
DoNumTest("foo %.1i", pos, "foo 42");
DoNumTest("foo %.3i", pos, "foo 042");
DoNumTest("foo %03i", pos, "foo 042");
DoNumTest("foo %#i", pos, "foo 42");
DoNumTest("foo %+i", pos, "foo +42");
DoNumTest("foo % i", pos, "foo 42");
DoNumTest("foo %+i", neg, "foo -42");
DoNumTest("foo % i", neg, "foo -42");
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/native/corehost/hostpolicy/breadcrumbs.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <thread>
#include <pal.h>
#include <utils.h>
#include <trace.h>
#include "breadcrumbs.h"
breadcrumb_writer_t::breadcrumb_writer_t(std::unordered_set<pal::string_t> &files)
{
assert(m_files.empty());
m_files.swap(files);
assert(files.empty());
if (!pal::get_default_breadcrumb_store(&m_breadcrumb_store))
{
m_breadcrumb_store.clear();
}
}
// Begin breadcrumb writing: launch a thread to write breadcrumbs.
std::shared_ptr<breadcrumb_writer_t> breadcrumb_writer_t::begin_write(std::unordered_set<pal::string_t> &files)
{
trace::verbose(_X("--- Begin breadcrumb write"));
auto instance = std::make_shared<breadcrumb_writer_t>(files);
if (instance->m_breadcrumb_store.empty())
{
trace::verbose(_X("Breadcrumb store was not obtained... skipping write."));
return nullptr;
}
// Add a reference to this object for the thread we will spawn
instance->m_threads_instance = instance;
instance->m_thread = std::thread(write_worker_callback, instance.get());
trace::verbose(_X("Breadcrumbs will be written using a background thread"));
return instance;
}
// Write the breadcrumbs. This method should be called
// only from the background thread.
void breadcrumb_writer_t::write_callback()
{
bool successful = true;
for (const auto& file : m_files)
{
pal::string_t file_path = m_breadcrumb_store;
pal::string_t file_name = _X("netcore,") + file;
append_path(&file_path, file_name.c_str());
if (!pal::file_exists(file_path))
{
if (!pal::touch_file(file_path))
{
successful = false;
}
}
}
trace::verbose(_X("--- End breadcrumb write %d"), successful);
// Clear reference to this object for the thread.
m_threads_instance.reset();
}
// ThreadProc for the background writer.
void breadcrumb_writer_t::write_worker_callback(breadcrumb_writer_t* p_this)
{
assert(p_this);
assert(p_this->m_threads_instance);
assert(p_this->m_threads_instance.get() == p_this);
try
{
trace::verbose(_X("Breadcrumb thread write callback..."));
p_this->write_callback();
}
catch (...)
{
trace::warning(_X("An unexpected exception was thrown while leaving breadcrumbs"));
}
}
// Wait for completion of the background tasks, if any.
void breadcrumb_writer_t::end_write()
{
if (m_thread.joinable())
{
trace::verbose(_X("Waiting for breadcrumb thread to exit..."));
// Block on the thread to exit.
m_thread.join();
}
trace::verbose(_X("Done waiting for breadcrumb thread to exit..."));
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <thread>
#include <pal.h>
#include <utils.h>
#include <trace.h>
#include "breadcrumbs.h"
breadcrumb_writer_t::breadcrumb_writer_t(std::unordered_set<pal::string_t> &files)
{
assert(m_files.empty());
m_files.swap(files);
assert(files.empty());
if (!pal::get_default_breadcrumb_store(&m_breadcrumb_store))
{
m_breadcrumb_store.clear();
}
}
// Begin breadcrumb writing: launch a thread to write breadcrumbs.
std::shared_ptr<breadcrumb_writer_t> breadcrumb_writer_t::begin_write(std::unordered_set<pal::string_t> &files)
{
trace::verbose(_X("--- Begin breadcrumb write"));
auto instance = std::make_shared<breadcrumb_writer_t>(files);
if (instance->m_breadcrumb_store.empty())
{
trace::verbose(_X("Breadcrumb store was not obtained... skipping write."));
return nullptr;
}
// Add a reference to this object for the thread we will spawn
instance->m_threads_instance = instance;
instance->m_thread = std::thread(write_worker_callback, instance.get());
trace::verbose(_X("Breadcrumbs will be written using a background thread"));
return instance;
}
// Write the breadcrumbs. This method should be called
// only from the background thread.
void breadcrumb_writer_t::write_callback()
{
bool successful = true;
for (const auto& file : m_files)
{
pal::string_t file_path = m_breadcrumb_store;
pal::string_t file_name = _X("netcore,") + file;
append_path(&file_path, file_name.c_str());
if (!pal::file_exists(file_path))
{
if (!pal::touch_file(file_path))
{
successful = false;
}
}
}
trace::verbose(_X("--- End breadcrumb write %d"), successful);
// Clear reference to this object for the thread.
m_threads_instance.reset();
}
// ThreadProc for the background writer.
void breadcrumb_writer_t::write_worker_callback(breadcrumb_writer_t* p_this)
{
assert(p_this);
assert(p_this->m_threads_instance);
assert(p_this->m_threads_instance.get() == p_this);
try
{
trace::verbose(_X("Breadcrumb thread write callback..."));
p_this->write_callback();
}
catch (...)
{
trace::warning(_X("An unexpected exception was thrown while leaving breadcrumbs"));
}
}
// Wait for completion of the background tasks, if any.
void breadcrumb_writer_t::end_write()
{
if (m_thread.joinable())
{
trace::verbose(_X("Waiting for breadcrumb thread to exit..."));
// Block on the thread to exit.
m_thread.join();
}
trace::verbose(_X("Done waiting for breadcrumb thread to exit..."));
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/debug/createdump/datatarget.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
class CrashInfo;
class DumpDataTarget : public ICLRDataTarget
{
private:
LONG m_ref; // reference count
CrashInfo& m_crashInfo;
// no public copy constructor
DumpDataTarget(const DumpDataTarget&) = delete;
void operator=(const DumpDataTarget&) = delete;
public:
DumpDataTarget(CrashInfo& crashInfo);
virtual ~DumpDataTarget();
//
// IUnknown
//
STDMETHOD(QueryInterface)(___in REFIID InterfaceId, ___out PVOID* Interface);
STDMETHOD_(ULONG, AddRef)();
STDMETHOD_(ULONG, Release)();
//
// ICLRDataTarget
//
virtual HRESULT STDMETHODCALLTYPE GetMachineType(
/* [out] */ ULONG32 *machine);
virtual HRESULT STDMETHODCALLTYPE GetPointerSize(
/* [out] */ ULONG32 *size);
virtual HRESULT STDMETHODCALLTYPE GetImageBase(
/* [string][in] */ LPCWSTR moduleName,
/* [out] */ CLRDATA_ADDRESS *baseAddress);
virtual HRESULT STDMETHODCALLTYPE ReadVirtual(
/* [in] */ CLRDATA_ADDRESS address,
/* [length_is][size_is][out] */ PBYTE buffer,
/* [in] */ ULONG32 size,
/* [optional][out] */ ULONG32 *done);
virtual HRESULT STDMETHODCALLTYPE WriteVirtual(
/* [in] */ CLRDATA_ADDRESS address,
/* [size_is][in] */ PBYTE buffer,
/* [in] */ ULONG32 size,
/* [optional][out] */ ULONG32 *done);
virtual HRESULT STDMETHODCALLTYPE GetTLSValue(
/* [in] */ ULONG32 threadID,
/* [in] */ ULONG32 index,
/* [out] */ CLRDATA_ADDRESS* value);
virtual HRESULT STDMETHODCALLTYPE SetTLSValue(
/* [in] */ ULONG32 threadID,
/* [in] */ ULONG32 index,
/* [in] */ CLRDATA_ADDRESS value);
virtual HRESULT STDMETHODCALLTYPE GetCurrentThreadID(
/* [out] */ ULONG32* threadID);
virtual HRESULT STDMETHODCALLTYPE GetThreadContext(
/* [in] */ ULONG32 threadID,
/* [in] */ ULONG32 contextFlags,
/* [in] */ ULONG32 contextSize,
/* [out, size_is(contextSize)] */ PBYTE context);
virtual HRESULT STDMETHODCALLTYPE SetThreadContext(
/* [in] */ ULONG32 threadID,
/* [in] */ ULONG32 contextSize,
/* [in, size_is(contextSize)] */ PBYTE context);
virtual HRESULT STDMETHODCALLTYPE Request(
/* [in] */ ULONG32 reqCode,
/* [in] */ ULONG32 inBufferSize,
/* [size_is][in] */ BYTE *inBuffer,
/* [in] */ ULONG32 outBufferSize,
/* [size_is][out] */ BYTE *outBuffer);
};
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
class CrashInfo;
class DumpDataTarget : public ICLRDataTarget
{
private:
LONG m_ref; // reference count
CrashInfo& m_crashInfo;
// no public copy constructor
DumpDataTarget(const DumpDataTarget&) = delete;
void operator=(const DumpDataTarget&) = delete;
public:
DumpDataTarget(CrashInfo& crashInfo);
virtual ~DumpDataTarget();
//
// IUnknown
//
STDMETHOD(QueryInterface)(___in REFIID InterfaceId, ___out PVOID* Interface);
STDMETHOD_(ULONG, AddRef)();
STDMETHOD_(ULONG, Release)();
//
// ICLRDataTarget
//
virtual HRESULT STDMETHODCALLTYPE GetMachineType(
/* [out] */ ULONG32 *machine);
virtual HRESULT STDMETHODCALLTYPE GetPointerSize(
/* [out] */ ULONG32 *size);
virtual HRESULT STDMETHODCALLTYPE GetImageBase(
/* [string][in] */ LPCWSTR moduleName,
/* [out] */ CLRDATA_ADDRESS *baseAddress);
virtual HRESULT STDMETHODCALLTYPE ReadVirtual(
/* [in] */ CLRDATA_ADDRESS address,
/* [length_is][size_is][out] */ PBYTE buffer,
/* [in] */ ULONG32 size,
/* [optional][out] */ ULONG32 *done);
virtual HRESULT STDMETHODCALLTYPE WriteVirtual(
/* [in] */ CLRDATA_ADDRESS address,
/* [size_is][in] */ PBYTE buffer,
/* [in] */ ULONG32 size,
/* [optional][out] */ ULONG32 *done);
virtual HRESULT STDMETHODCALLTYPE GetTLSValue(
/* [in] */ ULONG32 threadID,
/* [in] */ ULONG32 index,
/* [out] */ CLRDATA_ADDRESS* value);
virtual HRESULT STDMETHODCALLTYPE SetTLSValue(
/* [in] */ ULONG32 threadID,
/* [in] */ ULONG32 index,
/* [in] */ CLRDATA_ADDRESS value);
virtual HRESULT STDMETHODCALLTYPE GetCurrentThreadID(
/* [out] */ ULONG32* threadID);
virtual HRESULT STDMETHODCALLTYPE GetThreadContext(
/* [in] */ ULONG32 threadID,
/* [in] */ ULONG32 contextFlags,
/* [in] */ ULONG32 contextSize,
/* [out, size_is(contextSize)] */ PBYTE context);
virtual HRESULT STDMETHODCALLTYPE SetThreadContext(
/* [in] */ ULONG32 threadID,
/* [in] */ ULONG32 contextSize,
/* [in, size_is(contextSize)] */ PBYTE context);
virtual HRESULT STDMETHODCALLTYPE Request(
/* [in] */ ULONG32 reqCode,
/* [in] */ ULONG32 inBufferSize,
/* [size_is][in] */ BYTE *inBuffer,
/* [in] */ ULONG32 outBufferSize,
/* [size_is][out] */ BYTE *outBuffer);
};
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/native/libs/System.Security.Cryptography.Native/pal_ssl.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_crypto_types.h"
#include "pal_compiler.h"
#include "opensslshim.h"
/*
These values should be kept in sync with System.Security.Authentication.SslProtocols.
*/
typedef enum
{
PAL_SSL_NONE = 0,
PAL_SSL_SSL2 = 12,
PAL_SSL_SSL3 = 48,
PAL_SSL_TLS = 192,
PAL_SSL_TLS11 = 768,
PAL_SSL_TLS12 = 3072,
PAL_SSL_TLS13 = 12288,
} SslProtocols;
/*
These values should be kept in sync with System.Net.Security.EncryptionPolicy.
*/
typedef enum
{
RequireEncryption = 0,
AllowNoEncryption,
NoEncryption
} EncryptionPolicy;
/*
These values should be kept in sync with System.Security.Authentication.CipherAlgorithmType.
*/
typedef enum
{
CipherAlgorithmType_None = 0,
Null = 24576,
Des = 26113,
Rc2 = 26114,
TripleDes = 26115,
Aes128 = 26126,
Aes192 = 26127,
Aes256 = 26128,
Aes = 26129,
Rc4 = 26625,
// Algorithm constants which are not present in the managed CipherAlgorithmType enum.
SSL_IDEA = 229380,
SSL_CAMELLIA128 = 229381,
SSL_CAMELLIA256 = 229382,
SSL_eGOST2814789CNT = 229383,
SSL_SEED = 229384,
} CipherAlgorithmType;
/*
These values should be kept in sync with System.Security.Authentication.ExchangeAlgorithmType.
*/
typedef enum
{
ExchangeAlgorithmType_None,
RsaSign = 9216,
RsaKeyX = 41984,
DiffieHellman = 43522,
// ExchangeAlgorithm constants which are not present in the managed ExchangeAlgorithmType enum.
SSL_ECDH = 43525,
SSL_ECDSA = 41475,
SSL_ECDHE = 44550,
SSL_kPSK = 229390,
SSL_kGOST = 229391,
SSL_kSRP = 229392,
SSL_kKRB5 = 229393,
} ExchangeAlgorithmType;
/*
These values should be kept in sync with System.Security.Authentication.HashAlgorithmType.
*/
typedef enum
{
HashAlgorithmType_None = 0,
Md5 = 32771,
Sha1 = 32772,
// HashAlgorithm constants which are not present in the managed HashAlgorithmType enum.
SSL_SHA256 = 32780,
SSL_SHA384 = 32781,
SSL_GOST94 = 229410,
SSL_GOST89 = 229411,
SSL_AEAD = 229412,
} HashAlgorithmType;
typedef enum
{
MD5_HashKeySize = 8 * MD5_DIGEST_LENGTH,
SHA1_HashKeySize = 8 * SHA_DIGEST_LENGTH,
SHA256_HashKeySize = 8 * SHA256_DIGEST_LENGTH,
SHA384_HashKeySize = 8 * SHA384_DIGEST_LENGTH,
GOST_HashKeySize = 256,
Default = 0,
} DataHashSize;
typedef enum
{
PAL_SSL_ERROR_NONE = 0,
PAL_SSL_ERROR_SSL = 1,
PAL_SSL_ERROR_WANT_READ = 2,
PAL_SSL_ERROR_WANT_WRITE = 3,
PAL_SSL_ERROR_SYSCALL = 5,
PAL_SSL_ERROR_ZERO_RETURN = 6,
} SslErrorCode;
// the function pointer definition for the callback used in SslCtxSetAlpnSelectCb
typedef int32_t (*SslCtxSetAlpnCallback)(SSL* ssl,
const uint8_t** out,
uint8_t* outlen,
const uint8_t* in,
uint32_t inlen,
void* arg);
/*
Ensures that libssl is correctly initialized and ready to use.
*/
PALEXPORT void CryptoNative_EnsureLibSslInitialized(void);
/*
Shims the SSLv23_method method.
Returns the requested SSL_METHOD.
*/
PALEXPORT const SSL_METHOD* CryptoNative_SslV2_3Method(void);
/*
Shims the SSL_CTX_new method.
Returns the new SSL_CTX instance.
*/
PALEXPORT SSL_CTX* CryptoNative_SslCtxCreate(const SSL_METHOD* method);
/*
Sets the specified protocols in the SSL_CTX options.
*/
PALEXPORT void CryptoNative_SslCtxSetProtocolOptions(SSL_CTX* ctx, SslProtocols protocols);
/*
Sets internal callback for client certificate selection is set is positive.
It will unset callback if set is zero.
*/
PALEXPORT void CryptoNative_SslSetClientCertCallback(SSL* ssl, int set);
/*
Requests that client sends Post-Handshake Authentication extension in ClientHello.
*/
PALEXPORT void CryptoNative_SslSetPostHandshakeAuth(SSL* ssl, int32_t val);
/*=======
Sets session caching. 0 is disabled.
*/
PALEXPORT void CryptoNative_SslCtxSetCaching(SSL_CTX* ctx, int mode);
/*
Shims the SSL_new method.
Returns the new SSL instance.
*/
PALEXPORT SSL* CryptoNative_SslCreate(SSL_CTX* ctx);
/*
Shims the SSL_get_error method.
Returns the error code for the specified result.
*/
PALEXPORT int32_t CryptoNative_SslGetError(SSL* ssl, int32_t ret);
/*
Cleans up and deletes an SSL instance.
Implemented by calling SSL_free.
No-op if ssl is null.
The given X509 SSL is invalid after this call.
Always succeeds.
*/
PALEXPORT void CryptoNative_SslDestroy(SSL* ssl);
/*
Cleans up and deletes an SSL_CTX instance.
Implemented by calling SSL_CTX_free.
No-op if ctx is null.
The given X509 SSL_CTX is invalid after this call.
Always succeeds.
*/
PALEXPORT void CryptoNative_SslCtxDestroy(SSL_CTX* ctx);
/*
Shims the SSL_set_connect_state method.
*/
PALEXPORT void CryptoNative_SslSetConnectState(SSL* ssl);
/*
Shims the SSL_set_accept_state method.
*/
PALEXPORT void CryptoNative_SslSetAcceptState(SSL* ssl);
/*
Shims the SSL_get_version method.
Returns the protocol version string for the SSL instance.
*/
PALEXPORT const char* CryptoNative_SslGetVersion(SSL* ssl);
/*
Shims the SSL_write method.
Returns the positive number of bytes written when successful, 0 or a negative number
when an error is encountered.
*/
PALEXPORT int32_t CryptoNative_SslWrite(SSL* ssl, const void* buf, int32_t num, int32_t* error);
/*
Shims the SSL_read method.
Returns the positive number of bytes read when successful, 0 or a negative number
when an error is encountered.
*/
PALEXPORT int32_t CryptoNative_SslRead(SSL* ssl, void* buf, int32_t num, int32_t* error);
/*
Shims the SSL_renegotiate method (up to TLS 1.2), or SSL_verify_client_post_handshake (TLS 1.3)
Returns 1 when renegotiation/post-handshake authentication started; 0 on error.
*/
PALEXPORT int32_t CryptoNative_SslRenegotiate(SSL* ssl, int32_t* error);
/*
Shims the SSL_renegotiate_pending method.
Returns 1 when negotiation is requested; 0 once a handshake has finished.
*/
PALEXPORT int32_t CryptoNative_IsSslRenegotiatePending(SSL* ssl);
/*
Shims the SSL_shutdown method.
Returns:
1 if the shutdown was successfully completed;
0 if the shutdown is not yet finished;
<0 if the shutdown was not successful because a fatal error.
*/
PALEXPORT int32_t CryptoNative_SslShutdown(SSL* ssl);
/*
Shims the SSL_set_bio method.
*/
PALEXPORT void CryptoNative_SslSetBio(SSL* ssl, BIO* rbio, BIO* wbio);
/*
Shims the SSL_do_handshake method.
Returns:
1 if the handshake was successful;
0 if the handshake was not successful but was shut down controlled
and by the specifications of the TLS/SSL protocol;
<0 if the handshake was not successful because of a fatal error.
*/
PALEXPORT int32_t CryptoNative_SslDoHandshake(SSL* ssl, int32_t* error);
/*
Gets a value indicating whether the SSL_state is SSL_ST_OK.
Returns 1 if the state is OK, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_IsSslStateOK(SSL* ssl);
/*
Shims the SSL_get_peer_certificate method.
Returns the certificate presented by the peer.
*/
PALEXPORT X509* CryptoNative_SslGetPeerCertificate(SSL* ssl);
/*
Shims the SSL_get_peer_cert_chain method.
Returns the certificate chain presented by the peer.
*/
PALEXPORT X509Stack* CryptoNative_SslGetPeerCertChain(SSL* ssl);
/*
Shims the SSL_use_certificate method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslUseCertificate(SSL* ssl, X509* x);
/*
Shims the SSL_use_PrivateKey method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslUsePrivateKey(SSL* ssl, EVP_PKEY* pkey);
/*
Shims the SSL_CTX_use_certificate method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslCtxUseCertificate(SSL_CTX* ctx, X509* x);
/*
Shims the SSL_CTX_use_PrivateKey method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslCtxUsePrivateKey(SSL_CTX* ctx, EVP_PKEY* pkey);
/*
Shims the SSL_CTX_check_private_key method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslCtxCheckPrivateKey(SSL_CTX* ctx);
/*
Shims the SSL_CTX_set_quiet_shutdown method.
*/
PALEXPORT void CryptoNative_SslCtxSetQuietShutdown(SSL_CTX* ctx);
/*
Shims the SSL_set_quiet_shutdown method.
*/
PALEXPORT void CryptoNative_SslSetQuietShutdown(SSL* ctx, int mode);
/*
Shims the SSL_get_client_CA_list method.
Returns the list of CA names explicity set.
*/
PALEXPORT X509NameStack* CryptoNative_SslGetClientCAList(SSL* ssl);
/*
Shims the SSL_set_verify method.
*/
PALEXPORT void CryptoNative_SslSetVerifyPeer(SSL* ssl);
/*
Shims SSL_set_ex_data to attach application context.
*/
PALEXPORT int32_t CryptoNative_SslSetData(SSL* ssl, void *ptr);
/*
Shims SSL_get_ex_data to retrieve application context.
*/
PALEXPORT void* CryptoNative_SslGetData(SSL* ssl);
/*
Sets the specified encryption policy on the SSL_CTX.
*/
PALEXPORT int32_t CryptoNative_SslCtxSetEncryptionPolicy(SSL_CTX* ctx, EncryptionPolicy policy);
/*
Sets ciphers (< TLS 1.3) and cipher suites (TLS 1.3) on the SSL_CTX
*/
PALEXPORT int32_t CryptoNative_SslCtxSetCiphers(SSL_CTX* ctx, const char* cipherList, const char* cipherSuites);
PALEXPORT int32_t CryptoNative_SetCiphers(SSL* ssl, const char* cipherList, const char* cipherSuites);
/*
Determines if TLS 1.3 is supported by this OpenSSL implementation
*/
PALEXPORT int32_t CryptoNative_Tls13Supported(void);
/*
Shims the SSL_get_finished method.
*/
PALEXPORT int32_t CryptoNative_SslGetFinished(SSL* ssl, void* buf, int32_t count);
/*
Shims the SSL_get_peer_finished method.
*/
PALEXPORT int32_t CryptoNative_SslGetPeerFinished(SSL* ssl, void* buf, int32_t count);
/*
Returns true/false based on if existing ssl session was re-used or not.
Shims the SSL_session_reused macro.
*/
PALEXPORT int32_t CryptoNative_SslSessionReused(SSL* ssl);
/*
Adds the given certificate to the extra chain certificates associated with ctx.
libssl frees the x509 object.
Returns 1 if success and 0 in case of failure
*/
PALEXPORT int32_t CryptoNative_SslCtxAddExtraChainCert(SSL_CTX* ctx, X509* x509);
/*
Adds the given certificate to the extra chain certificates associated with ssl state.
libssl frees the x509 object.
Returns 1 if success and 0 in case of failure
*/
PALEXPORT int32_t CryptoNative_SslAddExtraChainCert(SSL* ssl, X509* x509);
/*
Adds the names of the given certificates to the list of acceptable issuers sent to
client when requesting a client certificate. Shims the SSL_add_client_CA function.
No transfer of ownership or refcount changes.
Returns 1 if success and 0 in case of failure
*/
PALEXPORT int32_t CryptoNative_SslAddClientCAs(SSL* ssl, X509** x509s, uint32_t count);
/*
Shims the ssl_ctx_set_alpn_select_cb method.
*/
PALEXPORT void CryptoNative_SslCtxSetAlpnSelectCb(SSL_CTX* ctx, SslCtxSetAlpnCallback cb, void *arg);
/*
Shims the ssl_set_alpn_protos method.
Returns 0 on success, non-zero on failure.
*/
PALEXPORT int32_t CryptoNative_SslSetAlpnProtos(SSL* ssl, const uint8_t* protos, uint32_t protos_len);
/*
Shims the ssl_get0_alpn_selected method.
*/
PALEXPORT void CryptoNative_SslGet0AlpnSelected(SSL* ssl, const uint8_t** protocol, uint32_t* len);
/*
Shims the SSL_set_tlsext_host_name method.
*/
PALEXPORT int32_t CryptoNative_SslSetTlsExtHostName(SSL* ssl, uint8_t* name);
/*
Shims the SSL_get_current_cipher and SSL_CIPHER_get_id.
*/
PALEXPORT int32_t CryptoNative_SslGetCurrentCipherId(SSL* ssl, int32_t* cipherId);
/*
Looks up a cipher by the IANA identifier, returns a shared string for the OpenSSL name for the cipher,
and emits a value indicating if the cipher belongs to the SSL2-TLS1.2 list, or the TLS1.3+ list.
*/
PALEXPORT const char* CryptoNative_GetOpenSslCipherSuiteName(SSL* ssl, int32_t cipherSuite, int32_t* isTls12OrLower);
/*
Checks if given protocol version is supported.
*/
PALEXPORT int32_t CryptoNative_OpenSslGetProtocolSupport(SslProtocols protocol);
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_crypto_types.h"
#include "pal_compiler.h"
#include "opensslshim.h"
/*
These values should be kept in sync with System.Security.Authentication.SslProtocols.
*/
typedef enum
{
PAL_SSL_NONE = 0,
PAL_SSL_SSL2 = 12,
PAL_SSL_SSL3 = 48,
PAL_SSL_TLS = 192,
PAL_SSL_TLS11 = 768,
PAL_SSL_TLS12 = 3072,
PAL_SSL_TLS13 = 12288,
} SslProtocols;
/*
These values should be kept in sync with System.Net.Security.EncryptionPolicy.
*/
typedef enum
{
RequireEncryption = 0,
AllowNoEncryption,
NoEncryption
} EncryptionPolicy;
/*
These values should be kept in sync with System.Security.Authentication.CipherAlgorithmType.
*/
typedef enum
{
CipherAlgorithmType_None = 0,
Null = 24576,
Des = 26113,
Rc2 = 26114,
TripleDes = 26115,
Aes128 = 26126,
Aes192 = 26127,
Aes256 = 26128,
Aes = 26129,
Rc4 = 26625,
// Algorithm constants which are not present in the managed CipherAlgorithmType enum.
SSL_IDEA = 229380,
SSL_CAMELLIA128 = 229381,
SSL_CAMELLIA256 = 229382,
SSL_eGOST2814789CNT = 229383,
SSL_SEED = 229384,
} CipherAlgorithmType;
/*
These values should be kept in sync with System.Security.Authentication.ExchangeAlgorithmType.
*/
typedef enum
{
ExchangeAlgorithmType_None,
RsaSign = 9216,
RsaKeyX = 41984,
DiffieHellman = 43522,
// ExchangeAlgorithm constants which are not present in the managed ExchangeAlgorithmType enum.
SSL_ECDH = 43525,
SSL_ECDSA = 41475,
SSL_ECDHE = 44550,
SSL_kPSK = 229390,
SSL_kGOST = 229391,
SSL_kSRP = 229392,
SSL_kKRB5 = 229393,
} ExchangeAlgorithmType;
/*
These values should be kept in sync with System.Security.Authentication.HashAlgorithmType.
*/
typedef enum
{
HashAlgorithmType_None = 0,
Md5 = 32771,
Sha1 = 32772,
// HashAlgorithm constants which are not present in the managed HashAlgorithmType enum.
SSL_SHA256 = 32780,
SSL_SHA384 = 32781,
SSL_GOST94 = 229410,
SSL_GOST89 = 229411,
SSL_AEAD = 229412,
} HashAlgorithmType;
typedef enum
{
MD5_HashKeySize = 8 * MD5_DIGEST_LENGTH,
SHA1_HashKeySize = 8 * SHA_DIGEST_LENGTH,
SHA256_HashKeySize = 8 * SHA256_DIGEST_LENGTH,
SHA384_HashKeySize = 8 * SHA384_DIGEST_LENGTH,
GOST_HashKeySize = 256,
Default = 0,
} DataHashSize;
typedef enum
{
PAL_SSL_ERROR_NONE = 0,
PAL_SSL_ERROR_SSL = 1,
PAL_SSL_ERROR_WANT_READ = 2,
PAL_SSL_ERROR_WANT_WRITE = 3,
PAL_SSL_ERROR_SYSCALL = 5,
PAL_SSL_ERROR_ZERO_RETURN = 6,
} SslErrorCode;
// the function pointer definition for the callback used in SslCtxSetAlpnSelectCb
typedef int32_t (*SslCtxSetAlpnCallback)(SSL* ssl,
const uint8_t** out,
uint8_t* outlen,
const uint8_t* in,
uint32_t inlen,
void* arg);
/*
Ensures that libssl is correctly initialized and ready to use.
*/
PALEXPORT void CryptoNative_EnsureLibSslInitialized(void);
/*
Shims the SSLv23_method method.
Returns the requested SSL_METHOD.
*/
PALEXPORT const SSL_METHOD* CryptoNative_SslV2_3Method(void);
/*
Shims the SSL_CTX_new method.
Returns the new SSL_CTX instance.
*/
PALEXPORT SSL_CTX* CryptoNative_SslCtxCreate(const SSL_METHOD* method);
/*
Sets the specified protocols in the SSL_CTX options.
*/
PALEXPORT void CryptoNative_SslCtxSetProtocolOptions(SSL_CTX* ctx, SslProtocols protocols);
/*
Sets internal callback for client certificate selection is set is positive.
It will unset callback if set is zero.
*/
PALEXPORT void CryptoNative_SslSetClientCertCallback(SSL* ssl, int set);
/*
Requests that client sends Post-Handshake Authentication extension in ClientHello.
*/
PALEXPORT void CryptoNative_SslSetPostHandshakeAuth(SSL* ssl, int32_t val);
/*=======
Sets session caching. 0 is disabled.
*/
PALEXPORT void CryptoNative_SslCtxSetCaching(SSL_CTX* ctx, int mode);
/*
Shims the SSL_new method.
Returns the new SSL instance.
*/
PALEXPORT SSL* CryptoNative_SslCreate(SSL_CTX* ctx);
/*
Shims the SSL_get_error method.
Returns the error code for the specified result.
*/
PALEXPORT int32_t CryptoNative_SslGetError(SSL* ssl, int32_t ret);
/*
Cleans up and deletes an SSL instance.
Implemented by calling SSL_free.
No-op if ssl is null.
The given X509 SSL is invalid after this call.
Always succeeds.
*/
PALEXPORT void CryptoNative_SslDestroy(SSL* ssl);
/*
Cleans up and deletes an SSL_CTX instance.
Implemented by calling SSL_CTX_free.
No-op if ctx is null.
The given X509 SSL_CTX is invalid after this call.
Always succeeds.
*/
PALEXPORT void CryptoNative_SslCtxDestroy(SSL_CTX* ctx);
/*
Shims the SSL_set_connect_state method.
*/
PALEXPORT void CryptoNative_SslSetConnectState(SSL* ssl);
/*
Shims the SSL_set_accept_state method.
*/
PALEXPORT void CryptoNative_SslSetAcceptState(SSL* ssl);
/*
Shims the SSL_get_version method.
Returns the protocol version string for the SSL instance.
*/
PALEXPORT const char* CryptoNative_SslGetVersion(SSL* ssl);
/*
Shims the SSL_write method.
Returns the positive number of bytes written when successful, 0 or a negative number
when an error is encountered.
*/
PALEXPORT int32_t CryptoNative_SslWrite(SSL* ssl, const void* buf, int32_t num, int32_t* error);
/*
Shims the SSL_read method.
Returns the positive number of bytes read when successful, 0 or a negative number
when an error is encountered.
*/
PALEXPORT int32_t CryptoNative_SslRead(SSL* ssl, void* buf, int32_t num, int32_t* error);
/*
Shims the SSL_renegotiate method (up to TLS 1.2), or SSL_verify_client_post_handshake (TLS 1.3)
Returns 1 when renegotiation/post-handshake authentication started; 0 on error.
*/
PALEXPORT int32_t CryptoNative_SslRenegotiate(SSL* ssl, int32_t* error);
/*
Shims the SSL_renegotiate_pending method.
Returns 1 when negotiation is requested; 0 once a handshake has finished.
*/
PALEXPORT int32_t CryptoNative_IsSslRenegotiatePending(SSL* ssl);
/*
Shims the SSL_shutdown method.
Returns:
1 if the shutdown was successfully completed;
0 if the shutdown is not yet finished;
<0 if the shutdown was not successful because a fatal error.
*/
PALEXPORT int32_t CryptoNative_SslShutdown(SSL* ssl);
/*
Shims the SSL_set_bio method.
*/
PALEXPORT void CryptoNative_SslSetBio(SSL* ssl, BIO* rbio, BIO* wbio);
/*
Shims the SSL_do_handshake method.
Returns:
1 if the handshake was successful;
0 if the handshake was not successful but was shut down controlled
and by the specifications of the TLS/SSL protocol;
<0 if the handshake was not successful because of a fatal error.
*/
PALEXPORT int32_t CryptoNative_SslDoHandshake(SSL* ssl, int32_t* error);
/*
Gets a value indicating whether the SSL_state is SSL_ST_OK.
Returns 1 if the state is OK, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_IsSslStateOK(SSL* ssl);
/*
Shims the SSL_get_peer_certificate method.
Returns the certificate presented by the peer.
*/
PALEXPORT X509* CryptoNative_SslGetPeerCertificate(SSL* ssl);
/*
Shims the SSL_get_peer_cert_chain method.
Returns the certificate chain presented by the peer.
*/
PALEXPORT X509Stack* CryptoNative_SslGetPeerCertChain(SSL* ssl);
/*
Shims the SSL_use_certificate method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslUseCertificate(SSL* ssl, X509* x);
/*
Shims the SSL_use_PrivateKey method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslUsePrivateKey(SSL* ssl, EVP_PKEY* pkey);
/*
Shims the SSL_CTX_use_certificate method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslCtxUseCertificate(SSL_CTX* ctx, X509* x);
/*
Shims the SSL_CTX_use_PrivateKey method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslCtxUsePrivateKey(SSL_CTX* ctx, EVP_PKEY* pkey);
/*
Shims the SSL_CTX_check_private_key method.
Returns 1 upon success, otherwise 0.
*/
PALEXPORT int32_t CryptoNative_SslCtxCheckPrivateKey(SSL_CTX* ctx);
/*
Shims the SSL_CTX_set_quiet_shutdown method.
*/
PALEXPORT void CryptoNative_SslCtxSetQuietShutdown(SSL_CTX* ctx);
/*
Shims the SSL_set_quiet_shutdown method.
*/
PALEXPORT void CryptoNative_SslSetQuietShutdown(SSL* ctx, int mode);
/*
Shims the SSL_get_client_CA_list method.
Returns the list of CA names explicity set.
*/
PALEXPORT X509NameStack* CryptoNative_SslGetClientCAList(SSL* ssl);
/*
Shims the SSL_set_verify method.
*/
PALEXPORT void CryptoNative_SslSetVerifyPeer(SSL* ssl);
/*
Shims SSL_set_ex_data to attach application context.
*/
PALEXPORT int32_t CryptoNative_SslSetData(SSL* ssl, void *ptr);
/*
Shims SSL_get_ex_data to retrieve application context.
*/
PALEXPORT void* CryptoNative_SslGetData(SSL* ssl);
/*
Sets the specified encryption policy on the SSL_CTX.
*/
PALEXPORT int32_t CryptoNative_SslCtxSetEncryptionPolicy(SSL_CTX* ctx, EncryptionPolicy policy);
/*
Sets ciphers (< TLS 1.3) and cipher suites (TLS 1.3) on the SSL_CTX
*/
PALEXPORT int32_t CryptoNative_SslCtxSetCiphers(SSL_CTX* ctx, const char* cipherList, const char* cipherSuites);
PALEXPORT int32_t CryptoNative_SetCiphers(SSL* ssl, const char* cipherList, const char* cipherSuites);
/*
Determines if TLS 1.3 is supported by this OpenSSL implementation
*/
PALEXPORT int32_t CryptoNative_Tls13Supported(void);
/*
Shims the SSL_get_finished method.
*/
PALEXPORT int32_t CryptoNative_SslGetFinished(SSL* ssl, void* buf, int32_t count);
/*
Shims the SSL_get_peer_finished method.
*/
PALEXPORT int32_t CryptoNative_SslGetPeerFinished(SSL* ssl, void* buf, int32_t count);
/*
Returns true/false based on if existing ssl session was re-used or not.
Shims the SSL_session_reused macro.
*/
PALEXPORT int32_t CryptoNative_SslSessionReused(SSL* ssl);
/*
Adds the given certificate to the extra chain certificates associated with ctx.
libssl frees the x509 object.
Returns 1 if success and 0 in case of failure
*/
PALEXPORT int32_t CryptoNative_SslCtxAddExtraChainCert(SSL_CTX* ctx, X509* x509);
/*
Adds the given certificate to the extra chain certificates associated with ssl state.
libssl frees the x509 object.
Returns 1 if success and 0 in case of failure
*/
PALEXPORT int32_t CryptoNative_SslAddExtraChainCert(SSL* ssl, X509* x509);
/*
Adds the names of the given certificates to the list of acceptable issuers sent to
client when requesting a client certificate. Shims the SSL_add_client_CA function.
No transfer of ownership or refcount changes.
Returns 1 if success and 0 in case of failure
*/
PALEXPORT int32_t CryptoNative_SslAddClientCAs(SSL* ssl, X509** x509s, uint32_t count);
/*
Shims the ssl_ctx_set_alpn_select_cb method.
*/
PALEXPORT void CryptoNative_SslCtxSetAlpnSelectCb(SSL_CTX* ctx, SslCtxSetAlpnCallback cb, void *arg);
/*
Shims the ssl_set_alpn_protos method.
Returns 0 on success, non-zero on failure.
*/
PALEXPORT int32_t CryptoNative_SslSetAlpnProtos(SSL* ssl, const uint8_t* protos, uint32_t protos_len);
/*
Shims the ssl_get0_alpn_selected method.
*/
PALEXPORT void CryptoNative_SslGet0AlpnSelected(SSL* ssl, const uint8_t** protocol, uint32_t* len);
/*
Shims the SSL_set_tlsext_host_name method.
*/
PALEXPORT int32_t CryptoNative_SslSetTlsExtHostName(SSL* ssl, uint8_t* name);
/*
Shims the SSL_get_current_cipher and SSL_CIPHER_get_id.
*/
PALEXPORT int32_t CryptoNative_SslGetCurrentCipherId(SSL* ssl, int32_t* cipherId);
/*
Looks up a cipher by the IANA identifier, returns a shared string for the OpenSSL name for the cipher,
and emits a value indicating if the cipher belongs to the SSL2-TLS1.2 list, or the TLS1.3+ list.
*/
PALEXPORT const char* CryptoNative_GetOpenSslCipherSuiteName(SSL* ssl, int32_t cipherSuite, int32_t* isTls12OrLower);
/*
Checks if given protocol version is supported.
*/
PALEXPORT int32_t CryptoNative_OpenSslGetProtocolSupport(SslProtocols protocol);
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/tests/JIT/Directed/pinning/object-pin/mirror.cpp | #if defined(_MSC_VER)
#define EXPORT_API extern "C" __declspec(dllexport)
#else
#define EXPORT_API extern "C" __attribute__((visibility("default")))
#ifdef HOST_64BIT
#define __int64 long
#else // HOST_64BIT
#define __int64 long long
#endif // HOST_64BIT
#define __int32 int
#define __int16 short int
#define __int8 char // assumes char is signed
#endif
#include <cstddef>
EXPORT_API unsigned __int32 Ret_Int(unsigned __int32 argVal){
unsigned __int32 retVal = (unsigned __int32)argVal;
return retVal;
}
EXPORT_API unsigned __int32 Ret_Ptr(void *argVal){
unsigned __int32 retVal = (unsigned __int32)(size_t)argVal;
return retVal;
}
EXPORT_API void Set_Val(__int32 *argVal, __int32 val){
*argVal = val;;
}
EXPORT_API void Mul_Val(__int32 *arg1,__int32 *arg2,__int32 *arg3){
*arg3 = (*arg1)*(*arg2);
}
| #if defined(_MSC_VER)
#define EXPORT_API extern "C" __declspec(dllexport)
#else
#define EXPORT_API extern "C" __attribute__((visibility("default")))
#ifdef HOST_64BIT
#define __int64 long
#else // HOST_64BIT
#define __int64 long long
#endif // HOST_64BIT
#define __int32 int
#define __int16 short int
#define __int8 char // assumes char is signed
#endif
#include <cstddef>
EXPORT_API unsigned __int32 Ret_Int(unsigned __int32 argVal){
unsigned __int32 retVal = (unsigned __int32)argVal;
return retVal;
}
EXPORT_API unsigned __int32 Ret_Ptr(void *argVal){
unsigned __int32 retVal = (unsigned __int32)(size_t)argVal;
return retVal;
}
EXPORT_API void Set_Val(__int32 *argVal, __int32 val){
*argVal = val;;
}
EXPORT_API void Mul_Val(__int32 *arg1,__int32 *arg2,__int32 *arg3){
*arg3 = (*arg1)*(*arg2);
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/prebuilt/idl/sospriv_i.cpp |
/* this ALWAYS GENERATED file contains the IIDs and CLSIDs */
/* link this file in with the server and any clients */
/* File created by MIDL compiler version 8.01.0622 */
/* at Mon Jan 18 19:14:07 2038
*/
/* Compiler settings for C:/ssd/runtime/src/coreclr/inc/sospriv.idl:
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 8.01.0622
protocol : dce , ms_ext, c_ext, robust
error checks: allocation ref bounds_check enum stub_data
VC __declspec() decoration level:
__declspec(uuid()), __declspec(selectany), __declspec(novtable)
DECLSPEC_UUID(), MIDL_INTERFACE()
*/
/* @@MIDL_FILE_HEADING( ) */
#pragma warning( disable: 4049 ) /* more than 64k source lines */
#ifdef __cplusplus
extern "C"{
#endif
#include <rpc.h>
#include <rpcndr.h>
#ifdef _MIDL_USE_GUIDDEF_
#ifndef INITGUID
#define INITGUID
#include <guiddef.h>
#undef INITGUID
#else
#include <guiddef.h>
#endif
#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8)
#else // !_MIDL_USE_GUIDDEF_
#ifndef __IID_DEFINED__
#define __IID_DEFINED__
typedef struct _IID
{
unsigned long x;
unsigned short s1;
unsigned short s2;
unsigned char c[8];
} IID;
#endif // __IID_DEFINED__
#ifndef CLSID_DEFINED
#define CLSID_DEFINED
typedef IID CLSID;
#endif // CLSID_DEFINED
#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
EXTERN_C __declspec(selectany) const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
#endif // !_MIDL_USE_GUIDDEF_
MIDL_DEFINE_GUID(IID, IID_ISOSEnum,0x286CA186,0xE763,0x4F61,0x97,0x60,0x48,0x7D,0x43,0xAE,0x43,0x41);
MIDL_DEFINE_GUID(IID, IID_ISOSHandleEnum,0x3E269830,0x4A2B,0x4301,0x8E,0xE2,0xD6,0x80,0x5B,0x29,0xB2,0xFA);
MIDL_DEFINE_GUID(IID, IID_ISOSStackRefErrorEnum,0x774F4E1B,0xFB7B,0x491B,0x97,0x6D,0xA8,0x13,0x0F,0xE3,0x55,0xE9);
MIDL_DEFINE_GUID(IID, IID_ISOSStackRefEnum,0x8FA642BD,0x9F10,0x4799,0x9A,0xA3,0x51,0x2A,0xE7,0x8C,0x77,0xEE);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface,0x436f00f2,0xb42a,0x4b9f,0x87,0x0c,0xe7,0x3d,0xb6,0x6a,0xe9,0x30);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface2,0xA16026EC,0x96F4,0x40BA,0x87,0xFB,0x55,0x75,0x98,0x6F,0xB7,0xAF);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface3,0xB08C5CDC,0xFD8A,0x49C5,0xAB,0x38,0x5F,0xEE,0xF3,0x52,0x35,0xB4);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface4,0x74B9D34C,0xA612,0x4B07,0x93,0xDD,0x54,0x62,0x17,0x8F,0xCE,0x11);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface5,0x127d6abe,0x6c86,0x4e48,0x8e,0x7b,0x22,0x07,0x81,0xc5,0x81,0x01);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface6,0x11206399,0x4B66,0x4EDB,0x98,0xEA,0x85,0x65,0x4E,0x59,0xAD,0x45);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface7,0xc1020dde,0xfe98,0x4536,0xa5,0x3b,0xf3,0x5a,0x74,0xc3,0x27,0xeb);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface8,0xc12f35a9,0xe55c,0x4520,0xa8,0x94,0xb3,0xdc,0x51,0x65,0xdf,0xce);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface9,0x4eca42d8,0x7e7b,0x4c8a,0xa1,0x16,0x7b,0xfb,0xf6,0x92,0x92,0x67);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface10,0x90B8FCC3,0x7251,0x4B0A,0xAE,0x3D,0x5C,0x13,0xA6,0x7E,0xC9,0xAA);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface11,0x96BA1DB9,0x14CD,0x4492,0x80,0x65,0x1C,0xAA,0xEC,0xF6,0xE5,0xCF);
#undef MIDL_DEFINE_GUID
#ifdef __cplusplus
}
#endif
|
/* this ALWAYS GENERATED file contains the IIDs and CLSIDs */
/* link this file in with the server and any clients */
/* File created by MIDL compiler version 8.01.0622 */
/* at Mon Jan 18 19:14:07 2038
*/
/* Compiler settings for C:/ssd/runtime/src/coreclr/inc/sospriv.idl:
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 8.01.0622
protocol : dce , ms_ext, c_ext, robust
error checks: allocation ref bounds_check enum stub_data
VC __declspec() decoration level:
__declspec(uuid()), __declspec(selectany), __declspec(novtable)
DECLSPEC_UUID(), MIDL_INTERFACE()
*/
/* @@MIDL_FILE_HEADING( ) */
#pragma warning( disable: 4049 ) /* more than 64k source lines */
#ifdef __cplusplus
extern "C"{
#endif
#include <rpc.h>
#include <rpcndr.h>
#ifdef _MIDL_USE_GUIDDEF_
#ifndef INITGUID
#define INITGUID
#include <guiddef.h>
#undef INITGUID
#else
#include <guiddef.h>
#endif
#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8)
#else // !_MIDL_USE_GUIDDEF_
#ifndef __IID_DEFINED__
#define __IID_DEFINED__
typedef struct _IID
{
unsigned long x;
unsigned short s1;
unsigned short s2;
unsigned char c[8];
} IID;
#endif // __IID_DEFINED__
#ifndef CLSID_DEFINED
#define CLSID_DEFINED
typedef IID CLSID;
#endif // CLSID_DEFINED
#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
EXTERN_C __declspec(selectany) const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
#endif // !_MIDL_USE_GUIDDEF_
MIDL_DEFINE_GUID(IID, IID_ISOSEnum,0x286CA186,0xE763,0x4F61,0x97,0x60,0x48,0x7D,0x43,0xAE,0x43,0x41);
MIDL_DEFINE_GUID(IID, IID_ISOSHandleEnum,0x3E269830,0x4A2B,0x4301,0x8E,0xE2,0xD6,0x80,0x5B,0x29,0xB2,0xFA);
MIDL_DEFINE_GUID(IID, IID_ISOSStackRefErrorEnum,0x774F4E1B,0xFB7B,0x491B,0x97,0x6D,0xA8,0x13,0x0F,0xE3,0x55,0xE9);
MIDL_DEFINE_GUID(IID, IID_ISOSStackRefEnum,0x8FA642BD,0x9F10,0x4799,0x9A,0xA3,0x51,0x2A,0xE7,0x8C,0x77,0xEE);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface,0x436f00f2,0xb42a,0x4b9f,0x87,0x0c,0xe7,0x3d,0xb6,0x6a,0xe9,0x30);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface2,0xA16026EC,0x96F4,0x40BA,0x87,0xFB,0x55,0x75,0x98,0x6F,0xB7,0xAF);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface3,0xB08C5CDC,0xFD8A,0x49C5,0xAB,0x38,0x5F,0xEE,0xF3,0x52,0x35,0xB4);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface4,0x74B9D34C,0xA612,0x4B07,0x93,0xDD,0x54,0x62,0x17,0x8F,0xCE,0x11);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface5,0x127d6abe,0x6c86,0x4e48,0x8e,0x7b,0x22,0x07,0x81,0xc5,0x81,0x01);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface6,0x11206399,0x4B66,0x4EDB,0x98,0xEA,0x85,0x65,0x4E,0x59,0xAD,0x45);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface7,0xc1020dde,0xfe98,0x4536,0xa5,0x3b,0xf3,0x5a,0x74,0xc3,0x27,0xeb);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface8,0xc12f35a9,0xe55c,0x4520,0xa8,0x94,0xb3,0xdc,0x51,0x65,0xdf,0xce);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface9,0x4eca42d8,0x7e7b,0x4c8a,0xa1,0x16,0x7b,0xfb,0xf6,0x92,0x92,0x67);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface10,0x90B8FCC3,0x7251,0x4B0A,0xAE,0x3D,0x5C,0x13,0xA6,0x7E,0xC9,0xAA);
MIDL_DEFINE_GUID(IID, IID_ISOSDacInterface11,0x96BA1DB9,0x14CD,0x4492,0x80,0x65,0x1C,0xAA,0xEC,0xF6,0xE5,0xCF);
#undef MIDL_DEFINE_GUID
#ifdef __cplusplus
}
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/comdynamic.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
////////////////////////////////////////////////////////////////////////////////
// COMDynamic.h
// This module defines the native methods that are used for Dynamic IL generation
////////////////////////////////////////////////////////////////////////////////
#ifndef _COMDYNAMIC_H_
#define _COMDYNAMIC_H_
#include "dbginterface.h"
typedef enum PEFileKinds {
Dll = 0x1,
ConsoleApplication = 0x2,
WindowApplication = 0x3,
} PEFileKinds;
struct ExceptionInstance;
// COMDynamicWrite
// These methods implement the dynamic IL creation process
// inside reflection.
// This class exists as a container for methods that need friend access to other types.
class COMDynamicWrite
{
public:
static INT32 DefineType(Module* pModule,
LPCWSTR wszFullName,
INT32 tkParent,
INT32 attributes,
INT32 tkEnclosingType,
INT32 * pInterfaceTokens);
static void TermCreateClass(Module* pModule, INT32 tk, QCall::ObjectHandleOnStack retType);
};
// This function will create the class's metadata definition
extern "C" INT32 QCALLTYPE TypeBuilder_DefineType(QCall::ModuleHandle pModule,
LPCWSTR wszFullName,
INT32 tkParent,
INT32 attributes,
INT32 tkEnclosingType,
INT32 * pInterfaceTokens);
extern "C" INT32 QCALLTYPE TypeBuilder_DefineGenericParam(QCall::ModuleHandle pModule,
LPCWSTR wszFullName,
INT32 tkParent,
INT32 attributes,
INT32 position,
INT32 * pConstraintTokens);
// This function will reset the parent class in metadata
extern "C" void QCALLTYPE TypeBuilder_SetParentType(QCall::ModuleHandle pModule, INT32 tdType, INT32 tkParent);
// This function will add another interface impl
extern "C" void QCALLTYPE TypeBuilder_AddInterfaceImpl(QCall::ModuleHandle pModule, INT32 tdType, INT32 tkInterface);
// This function will create a method within the class
extern "C" INT32 QCALLTYPE TypeBuilder_DefineMethod(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, LPCBYTE pSignature, INT32 sigLength, INT32 attributes);
extern "C" INT32 QCALLTYPE TypeBuilder_DefineMethodSpec(QCall::ModuleHandle pModule, INT32 tkParent, LPCBYTE pSignature, INT32 sigLength);
// This function will create a method within the class
extern "C" void QCALLTYPE TypeBuilder_SetMethodIL(QCall::ModuleHandle pModule,
INT32 tk,
BOOL fIsInitLocal,
LPCBYTE pBody,
INT32 cbBody,
LPCBYTE pLocalSig,
INT32 sigLength,
UINT16 maxStackSize,
ExceptionInstance * pExceptions,
INT32 numExceptions,
INT32 * pTokenFixups,
INT32 numTokenFixups);
extern "C" void QCALLTYPE TypeBuilder_TermCreateClass(QCall::ModuleHandle pModule, INT32 tk, QCall::ObjectHandleOnStack retType);
extern "C" mdFieldDef QCALLTYPE TypeBuilder_DefineField(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, LPCBYTE pSignature, INT32 sigLength, INT32 attr);
extern "C" void QCALLTYPE TypeBuilder_SetPInvokeData(QCall::ModuleHandle pModule, LPCWSTR wszDllName, LPCWSTR wszFunctionName, INT32 token, INT32 linkFlags);
extern "C" INT32 QCALLTYPE TypeBuilder_DefineProperty(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, INT32 attr, LPCBYTE pSignature, INT32 sigLength);
extern "C" INT32 QCALLTYPE TypeBuilder_DefineEvent(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, INT32 attr, INT32 tkEventType);
// functions to set Setter, Getter, Reset, TestDefault, and other methods
extern "C" void QCALLTYPE TypeBuilder_DefineMethodSemantics(QCall::ModuleHandle pModule, INT32 tkAssociation, INT32 attr, INT32 tkMethod);
// functions to set method's implementation flag
extern "C" void QCALLTYPE TypeBuilder_SetMethodImpl(QCall::ModuleHandle pModule, INT32 tkMethod, INT32 attr);
// functions to create MethodImpl record
extern "C" void QCALLTYPE TypeBuilder_DefineMethodImpl(QCall::ModuleHandle pModule, UINT32 tkType, UINT32 tkBody, UINT32 tkDecl);
// GetTokenFromSig's argument
extern "C" INT32 QCALLTYPE TypeBuilder_GetTokenFromSig(QCall::ModuleHandle pModule, LPCBYTE pSignature, INT32 sigLength);
// Set Field offset
extern "C" void QCALLTYPE TypeBuilder_SetFieldLayoutOffset(QCall::ModuleHandle pModule, INT32 tkField, INT32 iOffset);
// Set classlayout info
extern "C" void QCALLTYPE TypeBuilder_SetClassLayout(QCall::ModuleHandle pModule, INT32 tk, INT32 iPackSize, UINT32 iTotalSize);
// Set a custom attribute
extern "C" void QCALLTYPE TypeBuilder_DefineCustomAttribute(QCall::ModuleHandle pModule, INT32 token, INT32 conTok, LPCBYTE pBlob, INT32 cbBlob);
// functions to set ParamInfo
extern "C" INT32 QCALLTYPE TypeBuilder_SetParamInfo(QCall::ModuleHandle pModule, UINT32 tkMethod, UINT32 iSequence, UINT32 iAttributes, LPCWSTR wszParamName);
// functions to set default value
extern "C" void QCALLTYPE TypeBuilder_SetConstantValue(QCall::ModuleHandle pModule, UINT32 tk, DWORD valueType, LPVOID pValue);
#endif // _COMDYNAMIC_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
////////////////////////////////////////////////////////////////////////////////
// COMDynamic.h
// This module defines the native methods that are used for Dynamic IL generation
////////////////////////////////////////////////////////////////////////////////
#ifndef _COMDYNAMIC_H_
#define _COMDYNAMIC_H_
#include "dbginterface.h"
typedef enum PEFileKinds {
Dll = 0x1,
ConsoleApplication = 0x2,
WindowApplication = 0x3,
} PEFileKinds;
struct ExceptionInstance;
// COMDynamicWrite
// These methods implement the dynamic IL creation process
// inside reflection.
// This class exists as a container for methods that need friend access to other types.
class COMDynamicWrite
{
public:
static INT32 DefineType(Module* pModule,
LPCWSTR wszFullName,
INT32 tkParent,
INT32 attributes,
INT32 tkEnclosingType,
INT32 * pInterfaceTokens);
static void TermCreateClass(Module* pModule, INT32 tk, QCall::ObjectHandleOnStack retType);
};
// This function will create the class's metadata definition
extern "C" INT32 QCALLTYPE TypeBuilder_DefineType(QCall::ModuleHandle pModule,
LPCWSTR wszFullName,
INT32 tkParent,
INT32 attributes,
INT32 tkEnclosingType,
INT32 * pInterfaceTokens);
extern "C" INT32 QCALLTYPE TypeBuilder_DefineGenericParam(QCall::ModuleHandle pModule,
LPCWSTR wszFullName,
INT32 tkParent,
INT32 attributes,
INT32 position,
INT32 * pConstraintTokens);
// This function will reset the parent class in metadata
extern "C" void QCALLTYPE TypeBuilder_SetParentType(QCall::ModuleHandle pModule, INT32 tdType, INT32 tkParent);
// This function will add another interface impl
extern "C" void QCALLTYPE TypeBuilder_AddInterfaceImpl(QCall::ModuleHandle pModule, INT32 tdType, INT32 tkInterface);
// This function will create a method within the class
extern "C" INT32 QCALLTYPE TypeBuilder_DefineMethod(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, LPCBYTE pSignature, INT32 sigLength, INT32 attributes);
extern "C" INT32 QCALLTYPE TypeBuilder_DefineMethodSpec(QCall::ModuleHandle pModule, INT32 tkParent, LPCBYTE pSignature, INT32 sigLength);
// This function will create a method within the class
extern "C" void QCALLTYPE TypeBuilder_SetMethodIL(QCall::ModuleHandle pModule,
INT32 tk,
BOOL fIsInitLocal,
LPCBYTE pBody,
INT32 cbBody,
LPCBYTE pLocalSig,
INT32 sigLength,
UINT16 maxStackSize,
ExceptionInstance * pExceptions,
INT32 numExceptions,
INT32 * pTokenFixups,
INT32 numTokenFixups);
extern "C" void QCALLTYPE TypeBuilder_TermCreateClass(QCall::ModuleHandle pModule, INT32 tk, QCall::ObjectHandleOnStack retType);
extern "C" mdFieldDef QCALLTYPE TypeBuilder_DefineField(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, LPCBYTE pSignature, INT32 sigLength, INT32 attr);
extern "C" void QCALLTYPE TypeBuilder_SetPInvokeData(QCall::ModuleHandle pModule, LPCWSTR wszDllName, LPCWSTR wszFunctionName, INT32 token, INT32 linkFlags);
extern "C" INT32 QCALLTYPE TypeBuilder_DefineProperty(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, INT32 attr, LPCBYTE pSignature, INT32 sigLength);
extern "C" INT32 QCALLTYPE TypeBuilder_DefineEvent(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, INT32 attr, INT32 tkEventType);
// functions to set Setter, Getter, Reset, TestDefault, and other methods
extern "C" void QCALLTYPE TypeBuilder_DefineMethodSemantics(QCall::ModuleHandle pModule, INT32 tkAssociation, INT32 attr, INT32 tkMethod);
// functions to set method's implementation flag
extern "C" void QCALLTYPE TypeBuilder_SetMethodImpl(QCall::ModuleHandle pModule, INT32 tkMethod, INT32 attr);
// functions to create MethodImpl record
extern "C" void QCALLTYPE TypeBuilder_DefineMethodImpl(QCall::ModuleHandle pModule, UINT32 tkType, UINT32 tkBody, UINT32 tkDecl);
// GetTokenFromSig's argument
extern "C" INT32 QCALLTYPE TypeBuilder_GetTokenFromSig(QCall::ModuleHandle pModule, LPCBYTE pSignature, INT32 sigLength);
// Set Field offset
extern "C" void QCALLTYPE TypeBuilder_SetFieldLayoutOffset(QCall::ModuleHandle pModule, INT32 tkField, INT32 iOffset);
// Set classlayout info
extern "C" void QCALLTYPE TypeBuilder_SetClassLayout(QCall::ModuleHandle pModule, INT32 tk, INT32 iPackSize, UINT32 iTotalSize);
// Set a custom attribute
extern "C" void QCALLTYPE TypeBuilder_DefineCustomAttribute(QCall::ModuleHandle pModule, INT32 token, INT32 conTok, LPCBYTE pBlob, INT32 cbBlob);
// functions to set ParamInfo
extern "C" INT32 QCALLTYPE TypeBuilder_SetParamInfo(QCall::ModuleHandle pModule, UINT32 tkMethod, UINT32 iSequence, UINT32 iAttributes, LPCWSTR wszParamName);
// functions to set default value
extern "C" void QCALLTYPE TypeBuilder_SetConstantValue(QCall::ModuleHandle pModule, UINT32 tk, DWORD valueType, LPVOID pValue);
#endif // _COMDYNAMIC_H_
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/vm/custommarshalerinfo.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CustomMarshalerInfo.cpp
//
//
// Custom marshaler information used when marshaling
// a parameter with a custom marshaler.
//
#include "common.h"
#include "custommarshalerinfo.h"
#include "mlinfo.h"
#include "sigbuilder.h"
//==========================================================================
// Implementation of the custom marshaler info class.
//==========================================================================
CustomMarshalerInfo::CustomMarshalerInfo(LoaderAllocator *pLoaderAllocator, TypeHandle hndCustomMarshalerType, TypeHandle hndManagedType, LPCUTF8 strCookie, DWORD cCookieStrBytes)
: m_NativeSize(0)
, m_hndManagedType(hndManagedType)
, m_pLoaderAllocator(pLoaderAllocator)
, m_hndCustomMarshaler(NULL)
, m_pMarshalNativeToManagedMD(NULL)
, m_pMarshalManagedToNativeMD(NULL)
, m_pCleanUpNativeDataMD(NULL)
, m_pCleanUpManagedDataMD(NULL)
, m_bDataIsByValue(FALSE)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pLoaderAllocator));
}
CONTRACTL_END;
// Make sure the custom marshaller implements ICustomMarshaler.
if (!hndCustomMarshalerType.GetMethodTable()->CanCastToInterface(CoreLibBinder::GetClass(CLASS__ICUSTOM_MARSHALER)))
{
DefineFullyQualifiedNameForClassW()
COMPlusThrow(kApplicationException,
IDS_EE_ICUSTOMMARSHALERNOTIMPL,
GetFullyQualifiedNameForClassW(hndCustomMarshalerType.GetMethodTable()));
}
// Determine if this type is a value class.
m_bDataIsByValue = m_hndManagedType.GetMethodTable()->IsValueType();
// Custom marshalling of value classes is not currently supported.
if (m_bDataIsByValue)
COMPlusThrow(kNotSupportedException, W("NotSupported_ValueClassCM"));
// Run the <clinit> on the marshaler since it might not have run yet.
hndCustomMarshalerType.GetMethodTable()->EnsureInstanceActive();
hndCustomMarshalerType.GetMethodTable()->CheckRunClassInitThrowing();
// Create a COM+ string that will contain the string cookie.
STRINGREF CookieStringObj = StringObject::NewString(strCookie, cCookieStrBytes);
GCPROTECT_BEGIN(CookieStringObj);
// Load the method desc for the static method to retrieve the instance.
MethodDesc *pGetCustomMarshalerMD = GetCustomMarshalerMD(CustomMarshalerMethods_GetInstance, hndCustomMarshalerType);
// If the GetInstance method is generic, get an instantiating stub for it -
// the CallDescr infrastructure doesn't know how to pass secret generic arguments.
if (pGetCustomMarshalerMD->RequiresInstMethodTableArg())
{
pGetCustomMarshalerMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
pGetCustomMarshalerMD,
hndCustomMarshalerType.GetMethodTable(),
FALSE, // forceBoxedEntryPoint
Instantiation(), // methodInst
FALSE, // allowInstParam
FALSE); // forceRemotableMethod
_ASSERTE(!pGetCustomMarshalerMD->RequiresInstMethodTableArg());
}
MethodDescCallSite getCustomMarshaler(pGetCustomMarshalerMD, (OBJECTREF*)&CookieStringObj);
pGetCustomMarshalerMD->EnsureActive();
// Prepare the arguments that will be passed to GetCustomMarshaler.
ARG_SLOT GetCustomMarshalerArgs[] = {
ObjToArgSlot(CookieStringObj)
};
// Call the GetCustomMarshaler method to retrieve the custom marshaler to use.
OBJECTREF CustomMarshalerObj = NULL;
GCPROTECT_BEGIN(CustomMarshalerObj);
CustomMarshalerObj = getCustomMarshaler.Call_RetOBJECTREF(GetCustomMarshalerArgs);
if (!CustomMarshalerObj)
{
DefineFullyQualifiedNameForClassW()
COMPlusThrow(kApplicationException,
IDS_EE_NOCUSTOMMARSHALER,
GetFullyQualifiedNameForClassW(hndCustomMarshalerType.GetMethodTable()));
}
// Load the method desc's for all the methods in the ICustomMarshaler interface based on the type of the marshaler object.
TypeHandle customMarshalerObjType = CustomMarshalerObj->GetMethodTable();
m_pMarshalNativeToManagedMD = GetCustomMarshalerMD(CustomMarshalerMethods_MarshalNativeToManaged, customMarshalerObjType);
m_pMarshalManagedToNativeMD = GetCustomMarshalerMD(CustomMarshalerMethods_MarshalManagedToNative, customMarshalerObjType);
m_pCleanUpNativeDataMD = GetCustomMarshalerMD(CustomMarshalerMethods_CleanUpNativeData, customMarshalerObjType);
m_pCleanUpManagedDataMD = GetCustomMarshalerMD(CustomMarshalerMethods_CleanUpManagedData, customMarshalerObjType);
m_hndCustomMarshaler = pLoaderAllocator->AllocateHandle(CustomMarshalerObj);
GCPROTECT_END();
// Retrieve the size of the native data.
if (m_bDataIsByValue)
{
// <TODO>@TODO(DM): Call GetNativeDataSize() to retrieve the size of the native data.</TODO>
_ASSERTE(!"Value classes are not yet supported by the custom marshaler!");
}
else
{
m_NativeSize = sizeof(void *);
}
GCPROTECT_END();
}
CustomMarshalerInfo::~CustomMarshalerInfo()
{
WRAPPER_NO_CONTRACT;
if (m_pLoaderAllocator->IsAlive() && m_hndCustomMarshaler)
{
// Only free the LOADERHANDLE if the LoaderAllocator is still alive.
// If the loader allocator isn't alive, the handle has automatically
// been collected already.
m_pLoaderAllocator->FreeHandle(m_hndCustomMarshaler);
}
m_hndCustomMarshaler = NULL;
}
void *CustomMarshalerInfo::operator new(size_t size, LoaderHeap *pHeap)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(pHeap));
}
CONTRACTL_END;
return pHeap->AllocMem(S_SIZE_T(sizeof(CustomMarshalerInfo)));
}
void CustomMarshalerInfo::operator delete(void *pMem)
{
// Instances of this class are always allocated on the loader heap so
// the delete operator has nothing to do.
LIMITED_METHOD_CONTRACT;
}
OBJECTREF CustomMarshalerInfo::InvokeMarshalNativeToManagedMeth(void *pNative)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pNative, NULL_OK));
}
CONTRACTL_END;
if (!pNative)
return NULL;
OBJECTREF managedObject;
OBJECTREF customMarshaler = m_pLoaderAllocator->GetHandleValue(m_hndCustomMarshaler);
GCPROTECT_BEGIN (customMarshaler);
MethodDescCallSite marshalNativeToManaged(m_pMarshalNativeToManagedMD, &customMarshaler);
ARG_SLOT Args[] = {
ObjToArgSlot(customMarshaler),
PtrToArgSlot(pNative)
};
managedObject = marshalNativeToManaged.Call_RetOBJECTREF(Args);
GCPROTECT_END ();
return managedObject;
}
void *CustomMarshalerInfo::InvokeMarshalManagedToNativeMeth(OBJECTREF MngObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
void *RetVal = NULL;
if (!MngObj)
return NULL;
GCPROTECT_BEGIN (MngObj);
OBJECTREF customMarshaler = m_pLoaderAllocator->GetHandleValue(m_hndCustomMarshaler);
GCPROTECT_BEGIN (customMarshaler);
MethodDescCallSite marshalManagedToNative(m_pMarshalManagedToNativeMD, &customMarshaler);
ARG_SLOT Args[] = {
ObjToArgSlot(customMarshaler),
ObjToArgSlot(MngObj)
};
RetVal = marshalManagedToNative.Call_RetLPVOID(Args);
GCPROTECT_END ();
GCPROTECT_END ();
return RetVal;
}
void CustomMarshalerInfo::InvokeCleanUpNativeMeth(void *pNative)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pNative, NULL_OK));
}
CONTRACTL_END;
if (!pNative)
return;
OBJECTREF customMarshaler = m_pLoaderAllocator->GetHandleValue(m_hndCustomMarshaler);
GCPROTECT_BEGIN (customMarshaler);
MethodDescCallSite cleanUpNativeData(m_pCleanUpNativeDataMD, &customMarshaler);
ARG_SLOT Args[] = {
ObjToArgSlot(customMarshaler),
PtrToArgSlot(pNative)
};
cleanUpNativeData.Call(Args);
GCPROTECT_END();
}
void CustomMarshalerInfo::InvokeCleanUpManagedMeth(OBJECTREF MngObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
if (!MngObj)
return;
GCPROTECT_BEGIN (MngObj);
OBJECTREF customMarshaler = m_pLoaderAllocator->GetHandleValue(m_hndCustomMarshaler);
GCPROTECT_BEGIN (customMarshaler);
MethodDescCallSite cleanUpManagedData(m_pCleanUpManagedDataMD, &customMarshaler);
ARG_SLOT Args[] = {
ObjToArgSlot(customMarshaler),
ObjToArgSlot(MngObj)
};
cleanUpManagedData.Call(Args);
GCPROTECT_END ();
GCPROTECT_END ();
}
MethodDesc *CustomMarshalerInfo::GetCustomMarshalerMD(EnumCustomMarshalerMethods Method, TypeHandle hndCustomMarshalertype)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
MethodTable *pMT = hndCustomMarshalertype.AsMethodTable();
_ASSERTE(pMT->CanCastToInterface(CoreLibBinder::GetClass(CLASS__ICUSTOM_MARSHALER)));
MethodDesc *pMD = NULL;
switch (Method)
{
case CustomMarshalerMethods_MarshalNativeToManaged:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__MARSHAL_NATIVE_TO_MANAGED),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_MarshalManagedToNative:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__MARSHAL_MANAGED_TO_NATIVE),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_CleanUpNativeData:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__CLEANUP_NATIVE_DATA),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_CleanUpManagedData:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__CLEANUP_MANAGED_DATA),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_GetNativeDataSize:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__GET_NATIVE_DATA_SIZE),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_GetInstance:
// Must look this up by name since it's static
pMD = MemberLoader::FindMethod(pMT, "GetInstance", &gsig_SM_Str_RetICustomMarshaler);
if (!pMD)
{
DefineFullyQualifiedNameForClassW()
COMPlusThrow(kApplicationException,
IDS_EE_GETINSTANCENOTIMPL,
GetFullyQualifiedNameForClassW(pMT));
}
break;
default:
_ASSERTE(!"Unknown custom marshaler method");
}
_ASSERTE(pMD && "Unable to find specified CustomMarshaler method");
// Ensure that the value types in the signature are loaded.
MetaSig::EnsureSigValueTypesLoaded(pMD);
// Return the specified method desc.
return pMD;
}
//==========================================================================
// Implementation of the custom marshaler hashtable helper.
//==========================================================================
EEHashEntry_t * EECMHelperHashtableHelper::AllocateEntry(EECMHelperHashtableKey *pKey, BOOL bDeepCopy, void* pHeap)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(return NULL;);
}
CONTRACTL_END;
EEHashEntry_t *pEntry;
if (bDeepCopy)
{
S_SIZE_T cbEntry = S_SIZE_T(sizeof(EEHashEntry) - 1 + sizeof(EECMHelperHashtableKey));
cbEntry += S_SIZE_T(pKey->GetMarshalerTypeNameByteCount());
cbEntry += S_SIZE_T(pKey->GetCookieStringByteCount());
cbEntry += S_SIZE_T(pKey->GetMarshalerInstantiation().GetNumArgs()) * S_SIZE_T(sizeof(LPVOID));
cbEntry += S_SIZE_T(sizeof(LPVOID)); // For EECMHelperHashtableKey::m_invokingAssembly
if (cbEntry.IsOverflow())
return NULL;
pEntry = (EEHashEntry_t *) new (nothrow) BYTE[cbEntry.Value()];
if (!pEntry)
return NULL;
EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
pEntryKey->m_cMarshalerTypeNameBytes = pKey->GetMarshalerTypeNameByteCount();
pEntryKey->m_strMarshalerTypeName = (LPSTR) pEntry->Key + sizeof(EECMHelperHashtableKey);
pEntryKey->m_cCookieStrBytes = pKey->GetCookieStringByteCount();
pEntryKey->m_strCookie = (LPSTR) pEntry->Key + sizeof(EECMHelperHashtableKey) + pEntryKey->m_cMarshalerTypeNameBytes;
pEntryKey->m_Instantiation = Instantiation(
(TypeHandle *) (pEntryKey->m_strCookie + pEntryKey->m_cCookieStrBytes),
pKey->GetMarshalerInstantiation().GetNumArgs());
memcpy((void*)pEntryKey->m_strMarshalerTypeName, pKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeNameByteCount());
memcpy((void*)pEntryKey->m_strCookie, pKey->GetCookieString(), pKey->GetCookieStringByteCount());
memcpy((void*)pEntryKey->m_Instantiation.GetRawArgs(), pKey->GetMarshalerInstantiation().GetRawArgs(),
pEntryKey->m_Instantiation.GetNumArgs() * sizeof(LPVOID));
pEntryKey->m_invokingAssembly = pKey->GetInvokingAssembly();
}
else
{
pEntry = (EEHashEntry_t *)
new (nothrow) BYTE[sizeof(EEHashEntry) - 1 + sizeof(EECMHelperHashtableKey)];
if (!pEntry)
return NULL;
EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
pEntryKey->m_cMarshalerTypeNameBytes = pKey->GetMarshalerTypeNameByteCount();
pEntryKey->m_strMarshalerTypeName = pKey->GetMarshalerTypeName();
pEntryKey->m_cCookieStrBytes = pKey->GetCookieStringByteCount();
pEntryKey->m_strCookie = pKey->GetCookieString();
pEntryKey->m_Instantiation = Instantiation(pKey->GetMarshalerInstantiation());
pEntryKey->m_invokingAssembly = pKey->GetInvokingAssembly();
}
return pEntry;
}
void EECMHelperHashtableHelper::DeleteEntry(EEHashEntry_t *pEntry, void* pHeap)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pEntry));
}
CONTRACTL_END;
delete[] (BYTE*)pEntry;
}
BOOL EECMHelperHashtableHelper::CompareKeys(EEHashEntry_t *pEntry, EECMHelperHashtableKey *pKey)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pEntry));
PRECONDITION(CheckPointer(pKey));
}
CONTRACTL_END;
EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
if (pEntryKey->GetMarshalerTypeNameByteCount() != pKey->GetMarshalerTypeNameByteCount())
return FALSE;
if (memcmp(pEntryKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeName(), pEntryKey->GetMarshalerTypeNameByteCount()) != 0)
return FALSE;
if (pEntryKey->GetCookieStringByteCount() != pKey->GetCookieStringByteCount())
return FALSE;
if (memcmp(pEntryKey->GetCookieString(), pKey->GetCookieString(), pEntryKey->GetCookieStringByteCount()) != 0)
return FALSE;
DWORD dwNumTypeArgs = pEntryKey->GetMarshalerInstantiation().GetNumArgs();
if (dwNumTypeArgs != pKey->GetMarshalerInstantiation().GetNumArgs())
return FALSE;
for (DWORD i = 0; i < dwNumTypeArgs; i++)
{
if (pEntryKey->GetMarshalerInstantiation()[i] != pKey->GetMarshalerInstantiation()[i])
return FALSE;
}
if (pEntryKey->GetInvokingAssembly() != pKey->GetInvokingAssembly())
return FALSE;
return TRUE;
}
DWORD EECMHelperHashtableHelper::Hash(EECMHelperHashtableKey *pKey)
{
WRAPPER_NO_CONTRACT;
return (DWORD)
(HashBytes((const BYTE *) pKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeNameByteCount()) +
HashBytes((const BYTE *) pKey->GetCookieString(), pKey->GetCookieStringByteCount()) +
HashBytes((const BYTE *) pKey->GetMarshalerInstantiation().GetRawArgs(), pKey->GetMarshalerInstantiation().GetNumArgs() * sizeof(LPVOID)));
}
OBJECTREF CustomMarshalerHelper::InvokeMarshalNativeToManagedMeth(void *pNative)
{
WRAPPER_NO_CONTRACT;
return GetCustomMarshalerInfo()->InvokeMarshalNativeToManagedMeth(pNative);
}
void *CustomMarshalerHelper::InvokeMarshalManagedToNativeMeth(OBJECTREF MngObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
void *RetVal = NULL;
GCPROTECT_BEGIN(MngObj)
{
CustomMarshalerInfo *pCMInfo = GetCustomMarshalerInfo();
RetVal = pCMInfo->InvokeMarshalManagedToNativeMeth(MngObj);
}
GCPROTECT_END();
return RetVal;
}
void CustomMarshalerHelper::InvokeCleanUpNativeMeth(void *pNative)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
OBJECTREF ExceptionObj = NULL;
GCPROTECT_BEGIN(ExceptionObj)
{
EX_TRY
{
GetCustomMarshalerInfo()->InvokeCleanUpNativeMeth(pNative);
}
EX_CATCH
{
ExceptionObj = GET_THROWABLE();
}
EX_END_CATCH(SwallowAllExceptions);
}
GCPROTECT_END();
}
void CustomMarshalerHelper::InvokeCleanUpManagedMeth(OBJECTREF MngObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
GCPROTECT_BEGIN(MngObj)
{
CustomMarshalerInfo *pCMInfo = GetCustomMarshalerInfo();
pCMInfo->InvokeCleanUpManagedMeth(MngObj);
}
GCPROTECT_END();
}
void *NonSharedCustomMarshalerHelper::operator new(size_t size, LoaderHeap *pHeap)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(pHeap));
}
CONTRACTL_END;
return pHeap->AllocMem(S_SIZE_T(sizeof(NonSharedCustomMarshalerHelper)));
}
void NonSharedCustomMarshalerHelper::operator delete(void *pMem)
{
// Instances of this class are always allocated on the loader heap so
// the delete operator has nothing to do.
LIMITED_METHOD_CONTRACT;
}
SharedCustomMarshalerHelper::SharedCustomMarshalerHelper(Assembly *pAssembly, TypeHandle hndManagedType, LPCUTF8 strMarshalerTypeName, DWORD cMarshalerTypeNameBytes, LPCUTF8 strCookie, DWORD cCookieStrBytes)
: m_pAssembly(pAssembly)
, m_hndManagedType(hndManagedType)
, m_cMarshalerTypeNameBytes(cMarshalerTypeNameBytes)
, m_strMarshalerTypeName(strMarshalerTypeName)
, m_cCookieStrBytes(cCookieStrBytes)
, m_strCookie(strCookie)
{
WRAPPER_NO_CONTRACT;
}
void *SharedCustomMarshalerHelper::operator new(size_t size, LoaderHeap *pHeap)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(pHeap));
}
CONTRACTL_END;
return pHeap->AllocMem(S_SIZE_T(sizeof(SharedCustomMarshalerHelper)));
}
void SharedCustomMarshalerHelper::operator delete(void *pMem)
{
// Instances of this class are always allocated on the loader heap so
// the delete operator has nothing to do.
LIMITED_METHOD_CONTRACT;
}
CustomMarshalerInfo *SharedCustomMarshalerHelper::GetCustomMarshalerInfo()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// Retrieve the marshalling data for the current app domain.
EEMarshalingData *pMarshalingData = GetThread()->GetDomain()->GetLoaderAllocator()->GetMarshalingData();
// Retrieve the custom marshaling information for the current shared custom
// marshaling helper.
return pMarshalingData->GetCustomMarshalerInfo(this);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CustomMarshalerInfo.cpp
//
//
// Custom marshaler information used when marshaling
// a parameter with a custom marshaler.
//
#include "common.h"
#include "custommarshalerinfo.h"
#include "mlinfo.h"
#include "sigbuilder.h"
//==========================================================================
// Implementation of the custom marshaler info class.
//==========================================================================
CustomMarshalerInfo::CustomMarshalerInfo(LoaderAllocator *pLoaderAllocator, TypeHandle hndCustomMarshalerType, TypeHandle hndManagedType, LPCUTF8 strCookie, DWORD cCookieStrBytes)
: m_NativeSize(0)
, m_hndManagedType(hndManagedType)
, m_pLoaderAllocator(pLoaderAllocator)
, m_hndCustomMarshaler(NULL)
, m_pMarshalNativeToManagedMD(NULL)
, m_pMarshalManagedToNativeMD(NULL)
, m_pCleanUpNativeDataMD(NULL)
, m_pCleanUpManagedDataMD(NULL)
, m_bDataIsByValue(FALSE)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pLoaderAllocator));
}
CONTRACTL_END;
// Make sure the custom marshaller implements ICustomMarshaler.
if (!hndCustomMarshalerType.GetMethodTable()->CanCastToInterface(CoreLibBinder::GetClass(CLASS__ICUSTOM_MARSHALER)))
{
DefineFullyQualifiedNameForClassW()
COMPlusThrow(kApplicationException,
IDS_EE_ICUSTOMMARSHALERNOTIMPL,
GetFullyQualifiedNameForClassW(hndCustomMarshalerType.GetMethodTable()));
}
// Determine if this type is a value class.
m_bDataIsByValue = m_hndManagedType.GetMethodTable()->IsValueType();
// Custom marshalling of value classes is not currently supported.
if (m_bDataIsByValue)
COMPlusThrow(kNotSupportedException, W("NotSupported_ValueClassCM"));
// Run the <clinit> on the marshaler since it might not have run yet.
hndCustomMarshalerType.GetMethodTable()->EnsureInstanceActive();
hndCustomMarshalerType.GetMethodTable()->CheckRunClassInitThrowing();
// Create a COM+ string that will contain the string cookie.
STRINGREF CookieStringObj = StringObject::NewString(strCookie, cCookieStrBytes);
GCPROTECT_BEGIN(CookieStringObj);
// Load the method desc for the static method to retrieve the instance.
MethodDesc *pGetCustomMarshalerMD = GetCustomMarshalerMD(CustomMarshalerMethods_GetInstance, hndCustomMarshalerType);
// If the GetInstance method is generic, get an instantiating stub for it -
// the CallDescr infrastructure doesn't know how to pass secret generic arguments.
if (pGetCustomMarshalerMD->RequiresInstMethodTableArg())
{
pGetCustomMarshalerMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
pGetCustomMarshalerMD,
hndCustomMarshalerType.GetMethodTable(),
FALSE, // forceBoxedEntryPoint
Instantiation(), // methodInst
FALSE, // allowInstParam
FALSE); // forceRemotableMethod
_ASSERTE(!pGetCustomMarshalerMD->RequiresInstMethodTableArg());
}
MethodDescCallSite getCustomMarshaler(pGetCustomMarshalerMD, (OBJECTREF*)&CookieStringObj);
pGetCustomMarshalerMD->EnsureActive();
// Prepare the arguments that will be passed to GetCustomMarshaler.
ARG_SLOT GetCustomMarshalerArgs[] = {
ObjToArgSlot(CookieStringObj)
};
// Call the GetCustomMarshaler method to retrieve the custom marshaler to use.
OBJECTREF CustomMarshalerObj = NULL;
GCPROTECT_BEGIN(CustomMarshalerObj);
CustomMarshalerObj = getCustomMarshaler.Call_RetOBJECTREF(GetCustomMarshalerArgs);
if (!CustomMarshalerObj)
{
DefineFullyQualifiedNameForClassW()
COMPlusThrow(kApplicationException,
IDS_EE_NOCUSTOMMARSHALER,
GetFullyQualifiedNameForClassW(hndCustomMarshalerType.GetMethodTable()));
}
// Load the method desc's for all the methods in the ICustomMarshaler interface based on the type of the marshaler object.
TypeHandle customMarshalerObjType = CustomMarshalerObj->GetMethodTable();
m_pMarshalNativeToManagedMD = GetCustomMarshalerMD(CustomMarshalerMethods_MarshalNativeToManaged, customMarshalerObjType);
m_pMarshalManagedToNativeMD = GetCustomMarshalerMD(CustomMarshalerMethods_MarshalManagedToNative, customMarshalerObjType);
m_pCleanUpNativeDataMD = GetCustomMarshalerMD(CustomMarshalerMethods_CleanUpNativeData, customMarshalerObjType);
m_pCleanUpManagedDataMD = GetCustomMarshalerMD(CustomMarshalerMethods_CleanUpManagedData, customMarshalerObjType);
m_hndCustomMarshaler = pLoaderAllocator->AllocateHandle(CustomMarshalerObj);
GCPROTECT_END();
// Retrieve the size of the native data.
if (m_bDataIsByValue)
{
// <TODO>@TODO(DM): Call GetNativeDataSize() to retrieve the size of the native data.</TODO>
_ASSERTE(!"Value classes are not yet supported by the custom marshaler!");
}
else
{
m_NativeSize = sizeof(void *);
}
GCPROTECT_END();
}
CustomMarshalerInfo::~CustomMarshalerInfo()
{
WRAPPER_NO_CONTRACT;
if (m_pLoaderAllocator->IsAlive() && m_hndCustomMarshaler)
{
// Only free the LOADERHANDLE if the LoaderAllocator is still alive.
// If the loader allocator isn't alive, the handle has automatically
// been collected already.
m_pLoaderAllocator->FreeHandle(m_hndCustomMarshaler);
}
m_hndCustomMarshaler = NULL;
}
void *CustomMarshalerInfo::operator new(size_t size, LoaderHeap *pHeap)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(pHeap));
}
CONTRACTL_END;
return pHeap->AllocMem(S_SIZE_T(sizeof(CustomMarshalerInfo)));
}
void CustomMarshalerInfo::operator delete(void *pMem)
{
// Instances of this class are always allocated on the loader heap so
// the delete operator has nothing to do.
LIMITED_METHOD_CONTRACT;
}
OBJECTREF CustomMarshalerInfo::InvokeMarshalNativeToManagedMeth(void *pNative)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pNative, NULL_OK));
}
CONTRACTL_END;
if (!pNative)
return NULL;
OBJECTREF managedObject;
OBJECTREF customMarshaler = m_pLoaderAllocator->GetHandleValue(m_hndCustomMarshaler);
GCPROTECT_BEGIN (customMarshaler);
MethodDescCallSite marshalNativeToManaged(m_pMarshalNativeToManagedMD, &customMarshaler);
ARG_SLOT Args[] = {
ObjToArgSlot(customMarshaler),
PtrToArgSlot(pNative)
};
managedObject = marshalNativeToManaged.Call_RetOBJECTREF(Args);
GCPROTECT_END ();
return managedObject;
}
void *CustomMarshalerInfo::InvokeMarshalManagedToNativeMeth(OBJECTREF MngObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
void *RetVal = NULL;
if (!MngObj)
return NULL;
GCPROTECT_BEGIN (MngObj);
OBJECTREF customMarshaler = m_pLoaderAllocator->GetHandleValue(m_hndCustomMarshaler);
GCPROTECT_BEGIN (customMarshaler);
MethodDescCallSite marshalManagedToNative(m_pMarshalManagedToNativeMD, &customMarshaler);
ARG_SLOT Args[] = {
ObjToArgSlot(customMarshaler),
ObjToArgSlot(MngObj)
};
RetVal = marshalManagedToNative.Call_RetLPVOID(Args);
GCPROTECT_END ();
GCPROTECT_END ();
return RetVal;
}
void CustomMarshalerInfo::InvokeCleanUpNativeMeth(void *pNative)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pNative, NULL_OK));
}
CONTRACTL_END;
if (!pNative)
return;
OBJECTREF customMarshaler = m_pLoaderAllocator->GetHandleValue(m_hndCustomMarshaler);
GCPROTECT_BEGIN (customMarshaler);
MethodDescCallSite cleanUpNativeData(m_pCleanUpNativeDataMD, &customMarshaler);
ARG_SLOT Args[] = {
ObjToArgSlot(customMarshaler),
PtrToArgSlot(pNative)
};
cleanUpNativeData.Call(Args);
GCPROTECT_END();
}
void CustomMarshalerInfo::InvokeCleanUpManagedMeth(OBJECTREF MngObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
if (!MngObj)
return;
GCPROTECT_BEGIN (MngObj);
OBJECTREF customMarshaler = m_pLoaderAllocator->GetHandleValue(m_hndCustomMarshaler);
GCPROTECT_BEGIN (customMarshaler);
MethodDescCallSite cleanUpManagedData(m_pCleanUpManagedDataMD, &customMarshaler);
ARG_SLOT Args[] = {
ObjToArgSlot(customMarshaler),
ObjToArgSlot(MngObj)
};
cleanUpManagedData.Call(Args);
GCPROTECT_END ();
GCPROTECT_END ();
}
MethodDesc *CustomMarshalerInfo::GetCustomMarshalerMD(EnumCustomMarshalerMethods Method, TypeHandle hndCustomMarshalertype)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
MethodTable *pMT = hndCustomMarshalertype.AsMethodTable();
_ASSERTE(pMT->CanCastToInterface(CoreLibBinder::GetClass(CLASS__ICUSTOM_MARSHALER)));
MethodDesc *pMD = NULL;
switch (Method)
{
case CustomMarshalerMethods_MarshalNativeToManaged:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__MARSHAL_NATIVE_TO_MANAGED),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_MarshalManagedToNative:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__MARSHAL_MANAGED_TO_NATIVE),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_CleanUpNativeData:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__CLEANUP_NATIVE_DATA),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_CleanUpManagedData:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__CLEANUP_MANAGED_DATA),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_GetNativeDataSize:
pMD = pMT->GetMethodDescForInterfaceMethod(
CoreLibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__GET_NATIVE_DATA_SIZE),
TRUE /* throwOnConflict */);
break;
case CustomMarshalerMethods_GetInstance:
// Must look this up by name since it's static
pMD = MemberLoader::FindMethod(pMT, "GetInstance", &gsig_SM_Str_RetICustomMarshaler);
if (!pMD)
{
DefineFullyQualifiedNameForClassW()
COMPlusThrow(kApplicationException,
IDS_EE_GETINSTANCENOTIMPL,
GetFullyQualifiedNameForClassW(pMT));
}
break;
default:
_ASSERTE(!"Unknown custom marshaler method");
}
_ASSERTE(pMD && "Unable to find specified CustomMarshaler method");
// Ensure that the value types in the signature are loaded.
MetaSig::EnsureSigValueTypesLoaded(pMD);
// Return the specified method desc.
return pMD;
}
//==========================================================================
// Implementation of the custom marshaler hashtable helper.
//==========================================================================
EEHashEntry_t * EECMHelperHashtableHelper::AllocateEntry(EECMHelperHashtableKey *pKey, BOOL bDeepCopy, void* pHeap)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(return NULL;);
}
CONTRACTL_END;
EEHashEntry_t *pEntry;
if (bDeepCopy)
{
S_SIZE_T cbEntry = S_SIZE_T(sizeof(EEHashEntry) - 1 + sizeof(EECMHelperHashtableKey));
cbEntry += S_SIZE_T(pKey->GetMarshalerTypeNameByteCount());
cbEntry += S_SIZE_T(pKey->GetCookieStringByteCount());
cbEntry += S_SIZE_T(pKey->GetMarshalerInstantiation().GetNumArgs()) * S_SIZE_T(sizeof(LPVOID));
cbEntry += S_SIZE_T(sizeof(LPVOID)); // For EECMHelperHashtableKey::m_invokingAssembly
if (cbEntry.IsOverflow())
return NULL;
pEntry = (EEHashEntry_t *) new (nothrow) BYTE[cbEntry.Value()];
if (!pEntry)
return NULL;
EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
pEntryKey->m_cMarshalerTypeNameBytes = pKey->GetMarshalerTypeNameByteCount();
pEntryKey->m_strMarshalerTypeName = (LPSTR) pEntry->Key + sizeof(EECMHelperHashtableKey);
pEntryKey->m_cCookieStrBytes = pKey->GetCookieStringByteCount();
pEntryKey->m_strCookie = (LPSTR) pEntry->Key + sizeof(EECMHelperHashtableKey) + pEntryKey->m_cMarshalerTypeNameBytes;
pEntryKey->m_Instantiation = Instantiation(
(TypeHandle *) (pEntryKey->m_strCookie + pEntryKey->m_cCookieStrBytes),
pKey->GetMarshalerInstantiation().GetNumArgs());
memcpy((void*)pEntryKey->m_strMarshalerTypeName, pKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeNameByteCount());
memcpy((void*)pEntryKey->m_strCookie, pKey->GetCookieString(), pKey->GetCookieStringByteCount());
memcpy((void*)pEntryKey->m_Instantiation.GetRawArgs(), pKey->GetMarshalerInstantiation().GetRawArgs(),
pEntryKey->m_Instantiation.GetNumArgs() * sizeof(LPVOID));
pEntryKey->m_invokingAssembly = pKey->GetInvokingAssembly();
}
else
{
pEntry = (EEHashEntry_t *)
new (nothrow) BYTE[sizeof(EEHashEntry) - 1 + sizeof(EECMHelperHashtableKey)];
if (!pEntry)
return NULL;
EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
pEntryKey->m_cMarshalerTypeNameBytes = pKey->GetMarshalerTypeNameByteCount();
pEntryKey->m_strMarshalerTypeName = pKey->GetMarshalerTypeName();
pEntryKey->m_cCookieStrBytes = pKey->GetCookieStringByteCount();
pEntryKey->m_strCookie = pKey->GetCookieString();
pEntryKey->m_Instantiation = Instantiation(pKey->GetMarshalerInstantiation());
pEntryKey->m_invokingAssembly = pKey->GetInvokingAssembly();
}
return pEntry;
}
void EECMHelperHashtableHelper::DeleteEntry(EEHashEntry_t *pEntry, void* pHeap)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pEntry));
}
CONTRACTL_END;
delete[] (BYTE*)pEntry;
}
BOOL EECMHelperHashtableHelper::CompareKeys(EEHashEntry_t *pEntry, EECMHelperHashtableKey *pKey)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pEntry));
PRECONDITION(CheckPointer(pKey));
}
CONTRACTL_END;
EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
if (pEntryKey->GetMarshalerTypeNameByteCount() != pKey->GetMarshalerTypeNameByteCount())
return FALSE;
if (memcmp(pEntryKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeName(), pEntryKey->GetMarshalerTypeNameByteCount()) != 0)
return FALSE;
if (pEntryKey->GetCookieStringByteCount() != pKey->GetCookieStringByteCount())
return FALSE;
if (memcmp(pEntryKey->GetCookieString(), pKey->GetCookieString(), pEntryKey->GetCookieStringByteCount()) != 0)
return FALSE;
DWORD dwNumTypeArgs = pEntryKey->GetMarshalerInstantiation().GetNumArgs();
if (dwNumTypeArgs != pKey->GetMarshalerInstantiation().GetNumArgs())
return FALSE;
for (DWORD i = 0; i < dwNumTypeArgs; i++)
{
if (pEntryKey->GetMarshalerInstantiation()[i] != pKey->GetMarshalerInstantiation()[i])
return FALSE;
}
if (pEntryKey->GetInvokingAssembly() != pKey->GetInvokingAssembly())
return FALSE;
return TRUE;
}
DWORD EECMHelperHashtableHelper::Hash(EECMHelperHashtableKey *pKey)
{
WRAPPER_NO_CONTRACT;
return (DWORD)
(HashBytes((const BYTE *) pKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeNameByteCount()) +
HashBytes((const BYTE *) pKey->GetCookieString(), pKey->GetCookieStringByteCount()) +
HashBytes((const BYTE *) pKey->GetMarshalerInstantiation().GetRawArgs(), pKey->GetMarshalerInstantiation().GetNumArgs() * sizeof(LPVOID)));
}
OBJECTREF CustomMarshalerHelper::InvokeMarshalNativeToManagedMeth(void *pNative)
{
WRAPPER_NO_CONTRACT;
return GetCustomMarshalerInfo()->InvokeMarshalNativeToManagedMeth(pNative);
}
void *CustomMarshalerHelper::InvokeMarshalManagedToNativeMeth(OBJECTREF MngObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
void *RetVal = NULL;
GCPROTECT_BEGIN(MngObj)
{
CustomMarshalerInfo *pCMInfo = GetCustomMarshalerInfo();
RetVal = pCMInfo->InvokeMarshalManagedToNativeMeth(MngObj);
}
GCPROTECT_END();
return RetVal;
}
void CustomMarshalerHelper::InvokeCleanUpNativeMeth(void *pNative)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
OBJECTREF ExceptionObj = NULL;
GCPROTECT_BEGIN(ExceptionObj)
{
EX_TRY
{
GetCustomMarshalerInfo()->InvokeCleanUpNativeMeth(pNative);
}
EX_CATCH
{
ExceptionObj = GET_THROWABLE();
}
EX_END_CATCH(SwallowAllExceptions);
}
GCPROTECT_END();
}
void CustomMarshalerHelper::InvokeCleanUpManagedMeth(OBJECTREF MngObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
GCPROTECT_BEGIN(MngObj)
{
CustomMarshalerInfo *pCMInfo = GetCustomMarshalerInfo();
pCMInfo->InvokeCleanUpManagedMeth(MngObj);
}
GCPROTECT_END();
}
void *NonSharedCustomMarshalerHelper::operator new(size_t size, LoaderHeap *pHeap)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(pHeap));
}
CONTRACTL_END;
return pHeap->AllocMem(S_SIZE_T(sizeof(NonSharedCustomMarshalerHelper)));
}
void NonSharedCustomMarshalerHelper::operator delete(void *pMem)
{
// Instances of this class are always allocated on the loader heap so
// the delete operator has nothing to do.
LIMITED_METHOD_CONTRACT;
}
SharedCustomMarshalerHelper::SharedCustomMarshalerHelper(Assembly *pAssembly, TypeHandle hndManagedType, LPCUTF8 strMarshalerTypeName, DWORD cMarshalerTypeNameBytes, LPCUTF8 strCookie, DWORD cCookieStrBytes)
: m_pAssembly(pAssembly)
, m_hndManagedType(hndManagedType)
, m_cMarshalerTypeNameBytes(cMarshalerTypeNameBytes)
, m_strMarshalerTypeName(strMarshalerTypeName)
, m_cCookieStrBytes(cCookieStrBytes)
, m_strCookie(strCookie)
{
WRAPPER_NO_CONTRACT;
}
void *SharedCustomMarshalerHelper::operator new(size_t size, LoaderHeap *pHeap)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(pHeap));
}
CONTRACTL_END;
return pHeap->AllocMem(S_SIZE_T(sizeof(SharedCustomMarshalerHelper)));
}
void SharedCustomMarshalerHelper::operator delete(void *pMem)
{
// Instances of this class are always allocated on the loader heap so
// the delete operator has nothing to do.
LIMITED_METHOD_CONTRACT;
}
CustomMarshalerInfo *SharedCustomMarshalerHelper::GetCustomMarshalerInfo()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// Retrieve the marshalling data for the current app domain.
EEMarshalingData *pMarshalingData = GetThread()->GetDomain()->GetLoaderAllocator()->GetMarshalingData();
// Retrieve the custom marshaling information for the current shared custom
// marshaling helper.
return pMarshalingData->GetCustomMarshalerInfo(this);
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/nativeaot/Runtime/AsmOffsetsVerify.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "gcenv.h"
#include "gcheaputilities.h"
#include "rhassert.h"
#include "RedhawkWarnings.h"
#include "slist.h"
#include "gcrhinterface.h"
#include "varint.h"
#include "regdisplay.h"
#include "StackFrameIterator.h"
#include "thread.h"
#include "TargetPtrs.h"
#include "rhbinder.h"
#include "RWLock.h"
#include "RuntimeInstance.h"
#include "CachedInterfaceDispatch.h"
#include "shash.h"
#include "CallDescr.h"
class AsmOffsets
{
static_assert(sizeof(Thread::m_rgbAllocContextBuffer) >= sizeof(gc_alloc_context), "Thread::m_rgbAllocContextBuffer is not big enough to hold a gc_alloc_context");
// Some assembly helpers for arrays and strings are shared and use the fact that arrays and strings have similar layouts)
static_assert(offsetof(Array, m_Length) == offsetof(String, m_Length), "The length field of String and Array have different offsets");
static_assert(sizeof(((Array*)0)->m_Length) == sizeof(((String*)0)->m_Length), "The length field of String and Array have different sizes");
#define PLAT_ASM_OFFSET(offset, cls, member) \
static_assert((offsetof(cls, member) == 0x##offset) || (offsetof(cls, member) > 0x##offset), "Bad asm offset for '" #cls "." #member "', the actual offset is smaller than 0x" #offset "."); \
static_assert((offsetof(cls, member) == 0x##offset) || (offsetof(cls, member) < 0x##offset), "Bad asm offset for '" #cls "." #member "', the actual offset is larger than 0x" #offset ".");
#define PLAT_ASM_SIZEOF(size, cls ) \
static_assert((sizeof(cls) == 0x##size) || (sizeof(cls) > 0x##size), "Bad asm size for '" #cls "', the actual size is smaller than 0x" #size "."); \
static_assert((sizeof(cls) == 0x##size) || (sizeof(cls) < 0x##size), "Bad asm size for '" #cls "', the actual size is larger than 0x" #size ".");
#define PLAT_ASM_CONST(constant, expr) \
static_assert(((expr) == 0x##constant) || ((expr) > 0x##constant), "Bad asm constant for '" #expr "', the actual value is smaller than 0x" #constant "."); \
static_assert(((expr) == 0x##constant) || ((expr) < 0x##constant), "Bad asm constant for '" #expr "', the actual value is larger than 0x" #constant ".");
#include "AsmOffsets.h"
};
#ifdef _MSC_VER
namespace { char WorkaroundLNK4221Warning; };
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "gcenv.h"
#include "gcheaputilities.h"
#include "rhassert.h"
#include "RedhawkWarnings.h"
#include "slist.h"
#include "gcrhinterface.h"
#include "varint.h"
#include "regdisplay.h"
#include "StackFrameIterator.h"
#include "thread.h"
#include "TargetPtrs.h"
#include "rhbinder.h"
#include "RWLock.h"
#include "RuntimeInstance.h"
#include "CachedInterfaceDispatch.h"
#include "shash.h"
#include "CallDescr.h"
class AsmOffsets
{
static_assert(sizeof(Thread::m_rgbAllocContextBuffer) >= sizeof(gc_alloc_context), "Thread::m_rgbAllocContextBuffer is not big enough to hold a gc_alloc_context");
// Some assembly helpers for arrays and strings are shared and use the fact that arrays and strings have similar layouts)
static_assert(offsetof(Array, m_Length) == offsetof(String, m_Length), "The length field of String and Array have different offsets");
static_assert(sizeof(((Array*)0)->m_Length) == sizeof(((String*)0)->m_Length), "The length field of String and Array have different sizes");
#define PLAT_ASM_OFFSET(offset, cls, member) \
static_assert((offsetof(cls, member) == 0x##offset) || (offsetof(cls, member) > 0x##offset), "Bad asm offset for '" #cls "." #member "', the actual offset is smaller than 0x" #offset "."); \
static_assert((offsetof(cls, member) == 0x##offset) || (offsetof(cls, member) < 0x##offset), "Bad asm offset for '" #cls "." #member "', the actual offset is larger than 0x" #offset ".");
#define PLAT_ASM_SIZEOF(size, cls ) \
static_assert((sizeof(cls) == 0x##size) || (sizeof(cls) > 0x##size), "Bad asm size for '" #cls "', the actual size is smaller than 0x" #size "."); \
static_assert((sizeof(cls) == 0x##size) || (sizeof(cls) < 0x##size), "Bad asm size for '" #cls "', the actual size is larger than 0x" #size ".");
#define PLAT_ASM_CONST(constant, expr) \
static_assert(((expr) == 0x##constant) || ((expr) > 0x##constant), "Bad asm constant for '" #expr "', the actual value is smaller than 0x" #constant "."); \
static_assert(((expr) == 0x##constant) || ((expr) < 0x##constant), "Bad asm constant for '" #expr "', the actual value is larger than 0x" #constant ".");
#include "AsmOffsets.h"
};
#ifdef _MSC_VER
namespace { char WorkaroundLNK4221Warning; };
#endif
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/file_io/SetEndOfFile/test1/SetEndOfFile.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: SetEndOfFile.c (test 1)
**
** Purpose: Tests the PAL implementation of the SetEndOfFile function.
** This test will attempt to operate on a NULL file handle and
** also test truncating a file not opened with GENERIC_WRITE
**
** Assumes successful:
** SetEndOfFile
** CreateFile
** CloseHandle
**
**
**===================================================================*/
#include <palsuite.h>
PALTEST(file_io_SetEndOfFile_test1_paltest_setendoffile_test1, "file_io/SetEndOfFile/test1/paltest_setendoffile_test1")
{
HANDLE hFile = NULL;
BOOL bRc = FALSE;
if (0 != PAL_Initialize(argc,argv))
{
return FAIL;
}
bRc = SetEndOfFile(NULL);
if (bRc == TRUE)
{
Fail("SetEndOfFile: ERROR -> Operation succeeded on a NULL file "
"handle\n");
}
/* create a test file */
hFile = CreateFile(szTextFile,
GENERIC_READ,
FILE_SHARE_READ,
NULL,
OPEN_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
NULL);
if(hFile == INVALID_HANDLE_VALUE)
{
Fail("SetEndOfFile: ERROR -> Unable to create file \"%s\".\n",
szTextFile);
}
bRc = SetEndOfFile(hFile);
if (bRc == TRUE)
{
Trace("SetEndOfFile: ERROR -> Operation succeeded on read-only"
" file.\n");
bRc = CloseHandle(hFile);
if (bRc != TRUE)
{
Trace("SetEndOfFile: ERROR -> Unable to close file \"%s\".\n",
szTextFile);
}
PAL_TerminateEx(FAIL);
return FAIL;
}
bRc = CloseHandle(hFile);
if (bRc != TRUE)
{
Fail("SetEndOfFile: ERROR -> Unable to close file \"%s\".\n",
szTextFile);
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: SetEndOfFile.c (test 1)
**
** Purpose: Tests the PAL implementation of the SetEndOfFile function.
** This test will attempt to operate on a NULL file handle and
** also test truncating a file not opened with GENERIC_WRITE
**
** Assumes successful:
** SetEndOfFile
** CreateFile
** CloseHandle
**
**
**===================================================================*/
#include <palsuite.h>
PALTEST(file_io_SetEndOfFile_test1_paltest_setendoffile_test1, "file_io/SetEndOfFile/test1/paltest_setendoffile_test1")
{
HANDLE hFile = NULL;
BOOL bRc = FALSE;
if (0 != PAL_Initialize(argc,argv))
{
return FAIL;
}
bRc = SetEndOfFile(NULL);
if (bRc == TRUE)
{
Fail("SetEndOfFile: ERROR -> Operation succeeded on a NULL file "
"handle\n");
}
/* create a test file */
hFile = CreateFile(szTextFile,
GENERIC_READ,
FILE_SHARE_READ,
NULL,
OPEN_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
NULL);
if(hFile == INVALID_HANDLE_VALUE)
{
Fail("SetEndOfFile: ERROR -> Unable to create file \"%s\".\n",
szTextFile);
}
bRc = SetEndOfFile(hFile);
if (bRc == TRUE)
{
Trace("SetEndOfFile: ERROR -> Operation succeeded on read-only"
" file.\n");
bRc = CloseHandle(hFile);
if (bRc != TRUE)
{
Trace("SetEndOfFile: ERROR -> Unable to close file \"%s\".\n",
szTextFile);
}
PAL_TerminateEx(FAIL);
return FAIL;
}
bRc = CloseHandle(hFile);
if (bRc != TRUE)
{
Fail("SetEndOfFile: ERROR -> Unable to close file \"%s\".\n",
szTextFile);
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/src/include/pal/critsect.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
Module Name:
include/pal/critsect.h
Abstract:
Header file for the critical sections functions.
--*/
#ifndef _PAL_CRITSECT_H_
#define _PAL_CRITSECT_H_
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
VOID InternalInitializeCriticalSection(CRITICAL_SECTION *pcs);
VOID InternalDeleteCriticalSection(CRITICAL_SECTION *pcs);
/* The following PALCEnterCriticalSection and PALCLeaveCriticalSection
functions are intended to provide CorUnix's InternalEnterCriticalSection
and InternalLeaveCriticalSection functionalities to legacy C code,
which has no knowledge of CPalThread, classes and namespaces.
*/
VOID PALCEnterCriticalSection(CRITICAL_SECTION *pcs);
VOID PALCLeaveCriticalSection(CRITICAL_SECTION *pcs);
#ifdef __cplusplus
}
#endif // __cplusplus
#endif /* _PAL_CRITSECT_H_ */
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
Module Name:
include/pal/critsect.h
Abstract:
Header file for the critical sections functions.
--*/
#ifndef _PAL_CRITSECT_H_
#define _PAL_CRITSECT_H_
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
VOID InternalInitializeCriticalSection(CRITICAL_SECTION *pcs);
VOID InternalDeleteCriticalSection(CRITICAL_SECTION *pcs);
/* The following PALCEnterCriticalSection and PALCLeaveCriticalSection
functions are intended to provide CorUnix's InternalEnterCriticalSection
and InternalLeaveCriticalSection functionalities to legacy C code,
which has no knowledge of CPalThread, classes and namespaces.
*/
VOID PALCEnterCriticalSection(CRITICAL_SECTION *pcs);
VOID PALCLeaveCriticalSection(CRITICAL_SECTION *pcs);
#ifdef __cplusplus
}
#endif // __cplusplus
#endif /* _PAL_CRITSECT_H_ */
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/ceil/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Tests ceil with simple positive and negative values. Also tests
** extreme cases like extremely small values and positive and
** negative infinity. Makes sure that calling ceil on NaN returns
** NaN
**
**==========================================================================*/
#include <palsuite.h>
// binary64 (double) has a machine epsilon of 2^-52 (approx. 2.22e-16). However, this
// is slightly too accurate when writing tests meant to run against libm implementations
// for various platforms. 2^-50 (approx. 8.88e-16) seems to be as accurate as we can get.
//
// The tests themselves will take PAL_EPSILON and adjust it according to the expected result
// so that the delta used for comparison will compare the most significant digits and ignore
// any digits that are outside the double precision range (15-17 digits).
// For example, a test with an expect result in the format of 0.xxxxxxxxxxxxxxxxx will use
// PAL_EPSILON for the variance, while an expected result in the format of 0.0xxxxxxxxxxxxxxxxx
// will use PAL_EPSILON / 10 and and expected result in the format of x.xxxxxxxxxxxxxxxx will
// use PAL_EPSILON * 10.
#define PAL_EPSILON 8.8817841970012523e-16
#define PAL_NAN sqrt(-1.0)
#define PAL_POSINF -log(0.0)
#define PAL_NEGINF log(0.0)
/**
* Helper test structure
*/
struct test
{
double value; /* value to test the function with */
double expected; /* expected result */
double variance; /* maximum delta between the expected and actual result */
};
/**
* ceil_test1_validate
*
* test validation function
*/
void __cdecl ceil_test1_validate(double value, double expected, double variance)
{
double result = ceil(value);
/*
* The test is valid when the difference between result
* and expected is less than or equal to variance
*/
double delta = fabs(result - expected);
if (delta > variance)
{
Fail("ceil(%g) returned %20.17g when it should have returned %20.17g",
value, result, expected);
}
}
/**
* ceil_test1_validate
*
* test validation function for values returning NaN
*/
void __cdecl ceil_test1_validate_isnan(double value)
{
double result = ceil(value);
if (!_isnan(result))
{
Fail("ceil(%g) returned %20.17g when it should have returned %20.17g",
value, result, PAL_NAN);
}
}
/**
* main
*
* executable entry point
*/
PALTEST(c_runtime_ceil_test1_paltest_ceil_test1, "c_runtime/ceil/test1/paltest_ceil_test1")
{
struct test tests[] =
{
/* value expected variance */
{ 0.31830988618379067, 1, PAL_EPSILON * 10 }, // value: 1 / pi
{ 0.43429448190325183, 1, PAL_EPSILON * 10 }, // value: log10(e)
{ 0.63661977236758134, 1, PAL_EPSILON * 10 }, // value: 2 / pi
{ 0.69314718055994531, 1, PAL_EPSILON * 10 }, // value: ln(2)
{ 0.70710678118654752, 1, PAL_EPSILON * 10 }, // value: 1 / sqrt(2)
{ 0.78539816339744831, 1, PAL_EPSILON * 10 }, // value: pi / 4
{ 1.1283791670955126, 2, PAL_EPSILON * 10 }, // value: 2 / sqrt(pi)
{ 1.4142135623730950, 2, PAL_EPSILON * 10 }, // value: sqrt(2)
{ 1.4426950408889634, 2, PAL_EPSILON * 10 }, // value: log2(e)
{ 1.5707963267948966, 2, PAL_EPSILON * 10 }, // value: pi / 2
{ 2.3025850929940457, 3, PAL_EPSILON * 10 }, // value: ln(10)
{ 2.7182818284590452, 3, PAL_EPSILON * 10 }, // value: e
{ 3.1415926535897932, 4, PAL_EPSILON * 10 }, // value: pi
{ PAL_POSINF, PAL_POSINF, 0 }
};
/* PAL initialization */
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
ceil_test1_validate( 0, 0, PAL_EPSILON);
ceil_test1_validate(-0.0, 0, PAL_EPSILON);
ceil_test1_validate( 1, 1, PAL_EPSILON * 10);
ceil_test1_validate(-1.0, -1, PAL_EPSILON * 10);
for (int i = 0; i < (sizeof(tests) / sizeof(struct test)); i++)
{
ceil_test1_validate( tests[i].value, tests[i].expected, tests[i].variance);
ceil_test1_validate(-tests[i].value, 1 - tests[i].expected, tests[i].variance);
}
ceil_test1_validate_isnan(PAL_NAN);
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Tests ceil with simple positive and negative values. Also tests
** extreme cases like extremely small values and positive and
** negative infinity. Makes sure that calling ceil on NaN returns
** NaN
**
**==========================================================================*/
#include <palsuite.h>
// binary64 (double) has a machine epsilon of 2^-52 (approx. 2.22e-16). However, this
// is slightly too accurate when writing tests meant to run against libm implementations
// for various platforms. 2^-50 (approx. 8.88e-16) seems to be as accurate as we can get.
//
// The tests themselves will take PAL_EPSILON and adjust it according to the expected result
// so that the delta used for comparison will compare the most significant digits and ignore
// any digits that are outside the double precision range (15-17 digits).
// For example, a test with an expect result in the format of 0.xxxxxxxxxxxxxxxxx will use
// PAL_EPSILON for the variance, while an expected result in the format of 0.0xxxxxxxxxxxxxxxxx
// will use PAL_EPSILON / 10 and and expected result in the format of x.xxxxxxxxxxxxxxxx will
// use PAL_EPSILON * 10.
#define PAL_EPSILON 8.8817841970012523e-16
#define PAL_NAN sqrt(-1.0)
#define PAL_POSINF -log(0.0)
#define PAL_NEGINF log(0.0)
/**
* Helper test structure
*/
struct test
{
double value; /* value to test the function with */
double expected; /* expected result */
double variance; /* maximum delta between the expected and actual result */
};
/**
* ceil_test1_validate
*
* test validation function
*/
void __cdecl ceil_test1_validate(double value, double expected, double variance)
{
double result = ceil(value);
/*
* The test is valid when the difference between result
* and expected is less than or equal to variance
*/
double delta = fabs(result - expected);
if (delta > variance)
{
Fail("ceil(%g) returned %20.17g when it should have returned %20.17g",
value, result, expected);
}
}
/**
* ceil_test1_validate
*
* test validation function for values returning NaN
*/
void __cdecl ceil_test1_validate_isnan(double value)
{
double result = ceil(value);
if (!_isnan(result))
{
Fail("ceil(%g) returned %20.17g when it should have returned %20.17g",
value, result, PAL_NAN);
}
}
/**
* main
*
* executable entry point
*/
PALTEST(c_runtime_ceil_test1_paltest_ceil_test1, "c_runtime/ceil/test1/paltest_ceil_test1")
{
struct test tests[] =
{
/* value expected variance */
{ 0.31830988618379067, 1, PAL_EPSILON * 10 }, // value: 1 / pi
{ 0.43429448190325183, 1, PAL_EPSILON * 10 }, // value: log10(e)
{ 0.63661977236758134, 1, PAL_EPSILON * 10 }, // value: 2 / pi
{ 0.69314718055994531, 1, PAL_EPSILON * 10 }, // value: ln(2)
{ 0.70710678118654752, 1, PAL_EPSILON * 10 }, // value: 1 / sqrt(2)
{ 0.78539816339744831, 1, PAL_EPSILON * 10 }, // value: pi / 4
{ 1.1283791670955126, 2, PAL_EPSILON * 10 }, // value: 2 / sqrt(pi)
{ 1.4142135623730950, 2, PAL_EPSILON * 10 }, // value: sqrt(2)
{ 1.4426950408889634, 2, PAL_EPSILON * 10 }, // value: log2(e)
{ 1.5707963267948966, 2, PAL_EPSILON * 10 }, // value: pi / 2
{ 2.3025850929940457, 3, PAL_EPSILON * 10 }, // value: ln(10)
{ 2.7182818284590452, 3, PAL_EPSILON * 10 }, // value: e
{ 3.1415926535897932, 4, PAL_EPSILON * 10 }, // value: pi
{ PAL_POSINF, PAL_POSINF, 0 }
};
/* PAL initialization */
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
ceil_test1_validate( 0, 0, PAL_EPSILON);
ceil_test1_validate(-0.0, 0, PAL_EPSILON);
ceil_test1_validate( 1, 1, PAL_EPSILON * 10);
ceil_test1_validate(-1.0, -1, PAL_EPSILON * 10);
for (int i = 0; i < (sizeof(tests) / sizeof(struct test)); i++)
{
ceil_test1_validate( tests[i].value, tests[i].expected, tests[i].variance);
ceil_test1_validate(-tests[i].value, 1 - tests[i].expected, tests[i].variance);
}
ceil_test1_validate_isnan(PAL_NAN);
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/jit/jitstd.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "allocator.h"
#include "list.h"
#include "utility.h"
#include "vector.h"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "allocator.h"
#include "list.h"
#include "utility.h"
#include "vector.h"
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/tests/Interop/PInvoke/AsAny/AsAnyNative.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <xplatform.h>
struct test1 {
LONG64 a;
LONG64 b;
};
extern "C" DLL_EXPORT LONG64 STDMETHODCALLTYPE PassLayout(test1* i) {
printf("PassLayout: i->a = %lld\n", i->a);
printf("PassLayout: i->b = %lld\n", i->b);
return i->b;
}
struct AsAnyField
{
int * intArray;
};
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassUnicodeStr(LPCWSTR str)
{
return (SHORT)str[0] == 0x0030 && (SHORT)str[1] == 0x7777 && (SHORT)str[2] == 0x000A;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassAnsiStr(LPCSTR str , BOOL isIncludeUnMappableChar)
{
if(isIncludeUnMappableChar)
return (BYTE)str[0] == 0x30 && (BYTE)str[1] == 0x3f && (BYTE)str[2] == 0x0A;
else
return (BYTE)str[0] == 0x30 && (BYTE)str[1] == 0x35 && (BYTE)str[2] == 0x0A;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassUnicodeStrbd(LPCWSTR str)
{
return (SHORT)str[0] == 0x0030 && (SHORT)str[1] == 0x7777 && (SHORT)str[2] == 0x000A;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassAnsiStrbd(LPCSTR str , BOOL isIncludeUnMappableChar)
{
if(isIncludeUnMappableChar)
return (BYTE)str[0] == 0x30 && (BYTE)str[1] == 0x3f && (BYTE)str[2] == 0x0A;
else
return (BYTE)str[0] == 0x30 && (BYTE)str[1] == 0x35 && (BYTE)str[2] == 0x0A;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassUnicodeCharArray(WCHAR CharArray_In [], WCHAR CharArray_InOut [], WCHAR CharArray_Out [])
{
BOOL ret = FALSE;
ret = (SHORT)CharArray_In[0] == 0x0030 && (SHORT)CharArray_In[1] == 0x7777 && (SHORT)CharArray_In[2] == 0x000A
&& (SHORT)CharArray_InOut[0] == 0x0030 && (SHORT)CharArray_InOut[1] == 0x7777 && (SHORT)CharArray_InOut[2] == 0x000A ;
// revese the string for passing back
WCHAR temp = CharArray_InOut[0];
CharArray_InOut[0] = CharArray_InOut[2];
CharArray_Out[0] = CharArray_InOut[2];
CharArray_Out[1] = CharArray_InOut[1];
CharArray_InOut[2] = temp;
CharArray_Out[2] = temp;
return ret;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassAnsiCharArray(CHAR CharArray_In [], CHAR CharArray_InOut [], CHAR CharArray_Out [] ,
BOOL isIncludeUnMappableChar)
{
BOOL ret = FALSE;
if(isIncludeUnMappableChar)
ret = (BYTE)CharArray_In[0] == 0x30 && (BYTE)CharArray_In[1] == 0x3f && (BYTE)CharArray_In[2] == 0x0A
&& (BYTE)CharArray_InOut[0] == 0x30 && (BYTE)CharArray_InOut[1] == 0x3f && (BYTE)CharArray_InOut[2] == 0x0A;
else
ret = (BYTE)CharArray_In[0] == 0x30 && (BYTE)CharArray_In[1] == 0x35 && (BYTE)CharArray_In[2] == 0x0A
&& (BYTE)CharArray_InOut[0] == 0x30 && (BYTE)CharArray_InOut[1] == 0x35 && (BYTE)CharArray_InOut[2] == 0x0A;
// reverse the string for passing back
CHAR temp = CharArray_InOut[0];
CharArray_InOut[0] = CharArray_InOut[2];
CharArray_Out[0] = CharArray_InOut[2];
CharArray_Out[1] = CharArray_InOut[1];
CharArray_InOut[2] = temp;
CharArray_Out[2] = temp;
return ret;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArraySbyte(
BYTE sbyteArray[], BYTE sbyteArray_In[], BYTE sbyteArray_InOut[], BYTE sbyteArray_Out[], BYTE expected[], int len){
for(int i = 0; i < len; i++)
{
if(sbyteArray[i] != expected[i] || sbyteArray_In[i] != expected[i] || sbyteArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArraySbyte\n");
return FALSE;
}
sbyteArray_InOut[i] = 10 + expected[i];
sbyteArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayByte(
BYTE byteArray[], BYTE byteArray_In[], BYTE byteArray_InOut[], BYTE byteArray_Out[], BYTE expected[], int len){
for(int i = 0; i < len; i++)
{
if(byteArray[i] != expected[i] || byteArray_In[i] != expected[i] || byteArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayByte\n");
return FALSE;
}
byteArray_InOut[i] = 10 + expected[i];
byteArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayShort(
SHORT shortArray[], SHORT shortArray_In[], SHORT shortArray_InOut[], SHORT shortArray_Out[], SHORT expected[], int len){
for(int i = 0; i < len; i++)
{
if(shortArray[i] != expected[i] || shortArray_In[i] != expected[i] || shortArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayShort\n");
return FALSE;
}
shortArray_InOut[i] = 10 + expected[i];
shortArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayUshort(
USHORT ushortArray[], USHORT ushortArray_In[], USHORT ushortArray_InOut[], USHORT ushortArray_Out[], USHORT expected[], int len){
for(int i = 0; i < len; i++)
{
if(ushortArray[i] != expected[i] || ushortArray_In[i] != expected[i] || ushortArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayUshort\n");
return FALSE;
}
ushortArray_InOut[i] = 10 + expected[i];
ushortArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayInt(
int IntArray[], int IntArray_In[], int IntArray_InOut[], int IntArray_Out[], int expected[], int len){
for(int i = 0; i < len; i++)
{
if(IntArray[i] != expected[i] || IntArray_In[i] != expected[i] || IntArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayInt\n");
return FALSE;
}
IntArray_InOut[i] = 10 + expected[i];
IntArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayUint(
UINT uintArray[], UINT uintArray_In[], UINT uintArray_InOut[], UINT uintArray_Out[], UINT expected[], int len){
for(int i = 0; i < len; i++)
{
if(uintArray[i] != expected[i] || uintArray_In[i] != expected[i] || uintArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayUint\n");
return FALSE;
}
uintArray_InOut[i] = 10 + expected[i];
uintArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayLong(
LONG64 longArray[], LONG64 longArray_In[], LONG64 longArray_InOut[], LONG64 longArray_Out[], LONG64 expected[], int len){
for(int i = 0; i < len; i++)
{
if(longArray[i] != expected[i] || longArray_In[i] != expected[i] || longArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayLong\n");
return FALSE;
}
longArray_InOut[i] = 10 + expected[i];
longArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayUlong(
LONG64 ulongArray[], LONG64 ulongArray_In[], LONG64 ulongArray_InOut[],
LONG64 ulongArray_Out[], LONG64 expected[], int len){
for(int i = 0; i < len; i++)
{
if(ulongArray[i] != expected[i] || ulongArray_In[i] != expected[i] || ulongArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayUlong\n");
return FALSE;
}
ulongArray_InOut[i] = 10 + expected[i];
ulongArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArraySingle(
float singleArray[], float singleArray_In[], float singleArray_InOut[],
float singleArray_Out[], float expected[], int len){
for(int i = 0; i < len; i++)
{
if(singleArray[i] != expected[i] || singleArray_In[i] != expected[i] || singleArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArraySingle\n");
return FALSE;
}
singleArray_InOut[i] = 10 + expected[i];
singleArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayDouble(
double doubleArray[], double doubleArray_In[], double doubleArray_InOut[], double doubleArray_Out[], double expected[], int len){
for(int i = 0; i < len; i++)
{
if(doubleArray[i] != expected[i] || doubleArray_In[i] != expected[i] || doubleArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayDouble\n");
return FALSE;
}
doubleArray_InOut[i] = 10 + expected[i];
doubleArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayChar(
CHAR charArray[], CHAR charArray_In[], CHAR charArray_InOut[], CHAR charArray_Out[], CHAR expected[], int len){
for(int i = 0; i < len; i++)
{
if(charArray[i] != expected[i] || charArray_In[i] != expected[i] || charArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayChar\n");
return FALSE;
}
}
charArray_InOut[0] = 100;
charArray_Out[0] = 100;
charArray_InOut[1] = 101;
charArray_Out[1] = 101;
charArray_InOut[2] = 102;
charArray_Out[2] = 102;
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayBool(
BOOL boolArray[], BOOL boolArray_In[], BOOL boolArray_InOut[], BOOL boolArray_Out[], BOOL expected[], int len){
for(int i = 0; i < len; i++)
{
if(boolArray[i] != expected[i] || boolArray_In[i] != expected[i] || boolArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayBool\n");
return FALSE;
}
}
boolArray_InOut[0] = FALSE;
boolArray_Out[0] = FALSE;
boolArray_InOut[1] = TRUE;
boolArray_Out[1] = TRUE;
boolArray_InOut[2] = TRUE;
boolArray_Out[2] = TRUE;
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayIntPtr(
INT_PTR intPtrArray[], INT_PTR intPtrArray_In[], INT_PTR intPtrArray_InOut[], INT_PTR intPtrArray_Out[], INT_PTR expected[], int len){
for(int i = 0; i < len; i++)
{
if(intPtrArray[i] != expected[i] || intPtrArray_In[i] != expected[i] || intPtrArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayIntPtr\n");
return FALSE;
}
intPtrArray_InOut[i] = 10 + expected[i];
intPtrArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayUIntPtr(
UINT_PTR uIntPtrArray[], UINT_PTR uIntPtrArray_In[], UINT_PTR uIntPtrArray_InOut[], UINT_PTR uIntPtrArray_Out[], UINT_PTR expected[], int len){
for(int i = 0; i < len; i++)
{
if(uIntPtrArray[i] != expected[i] || uIntPtrArray_In[i] != expected[i] || uIntPtrArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayUIntPtr\n");
return FALSE;
}
uIntPtrArray_InOut[i] = 10 + expected[i];
uIntPtrArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassMixStruct(AsAnyField mix){
return TRUE;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <xplatform.h>
struct test1 {
LONG64 a;
LONG64 b;
};
extern "C" DLL_EXPORT LONG64 STDMETHODCALLTYPE PassLayout(test1* i) {
printf("PassLayout: i->a = %lld\n", i->a);
printf("PassLayout: i->b = %lld\n", i->b);
return i->b;
}
struct AsAnyField
{
int * intArray;
};
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassUnicodeStr(LPCWSTR str)
{
return (SHORT)str[0] == 0x0030 && (SHORT)str[1] == 0x7777 && (SHORT)str[2] == 0x000A;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassAnsiStr(LPCSTR str , BOOL isIncludeUnMappableChar)
{
if(isIncludeUnMappableChar)
return (BYTE)str[0] == 0x30 && (BYTE)str[1] == 0x3f && (BYTE)str[2] == 0x0A;
else
return (BYTE)str[0] == 0x30 && (BYTE)str[1] == 0x35 && (BYTE)str[2] == 0x0A;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassUnicodeStrbd(LPCWSTR str)
{
return (SHORT)str[0] == 0x0030 && (SHORT)str[1] == 0x7777 && (SHORT)str[2] == 0x000A;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassAnsiStrbd(LPCSTR str , BOOL isIncludeUnMappableChar)
{
if(isIncludeUnMappableChar)
return (BYTE)str[0] == 0x30 && (BYTE)str[1] == 0x3f && (BYTE)str[2] == 0x0A;
else
return (BYTE)str[0] == 0x30 && (BYTE)str[1] == 0x35 && (BYTE)str[2] == 0x0A;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassUnicodeCharArray(WCHAR CharArray_In [], WCHAR CharArray_InOut [], WCHAR CharArray_Out [])
{
BOOL ret = FALSE;
ret = (SHORT)CharArray_In[0] == 0x0030 && (SHORT)CharArray_In[1] == 0x7777 && (SHORT)CharArray_In[2] == 0x000A
&& (SHORT)CharArray_InOut[0] == 0x0030 && (SHORT)CharArray_InOut[1] == 0x7777 && (SHORT)CharArray_InOut[2] == 0x000A ;
// revese the string for passing back
WCHAR temp = CharArray_InOut[0];
CharArray_InOut[0] = CharArray_InOut[2];
CharArray_Out[0] = CharArray_InOut[2];
CharArray_Out[1] = CharArray_InOut[1];
CharArray_InOut[2] = temp;
CharArray_Out[2] = temp;
return ret;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassAnsiCharArray(CHAR CharArray_In [], CHAR CharArray_InOut [], CHAR CharArray_Out [] ,
BOOL isIncludeUnMappableChar)
{
BOOL ret = FALSE;
if(isIncludeUnMappableChar)
ret = (BYTE)CharArray_In[0] == 0x30 && (BYTE)CharArray_In[1] == 0x3f && (BYTE)CharArray_In[2] == 0x0A
&& (BYTE)CharArray_InOut[0] == 0x30 && (BYTE)CharArray_InOut[1] == 0x3f && (BYTE)CharArray_InOut[2] == 0x0A;
else
ret = (BYTE)CharArray_In[0] == 0x30 && (BYTE)CharArray_In[1] == 0x35 && (BYTE)CharArray_In[2] == 0x0A
&& (BYTE)CharArray_InOut[0] == 0x30 && (BYTE)CharArray_InOut[1] == 0x35 && (BYTE)CharArray_InOut[2] == 0x0A;
// reverse the string for passing back
CHAR temp = CharArray_InOut[0];
CharArray_InOut[0] = CharArray_InOut[2];
CharArray_Out[0] = CharArray_InOut[2];
CharArray_Out[1] = CharArray_InOut[1];
CharArray_InOut[2] = temp;
CharArray_Out[2] = temp;
return ret;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArraySbyte(
BYTE sbyteArray[], BYTE sbyteArray_In[], BYTE sbyteArray_InOut[], BYTE sbyteArray_Out[], BYTE expected[], int len){
for(int i = 0; i < len; i++)
{
if(sbyteArray[i] != expected[i] || sbyteArray_In[i] != expected[i] || sbyteArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArraySbyte\n");
return FALSE;
}
sbyteArray_InOut[i] = 10 + expected[i];
sbyteArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayByte(
BYTE byteArray[], BYTE byteArray_In[], BYTE byteArray_InOut[], BYTE byteArray_Out[], BYTE expected[], int len){
for(int i = 0; i < len; i++)
{
if(byteArray[i] != expected[i] || byteArray_In[i] != expected[i] || byteArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayByte\n");
return FALSE;
}
byteArray_InOut[i] = 10 + expected[i];
byteArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayShort(
SHORT shortArray[], SHORT shortArray_In[], SHORT shortArray_InOut[], SHORT shortArray_Out[], SHORT expected[], int len){
for(int i = 0; i < len; i++)
{
if(shortArray[i] != expected[i] || shortArray_In[i] != expected[i] || shortArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayShort\n");
return FALSE;
}
shortArray_InOut[i] = 10 + expected[i];
shortArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayUshort(
USHORT ushortArray[], USHORT ushortArray_In[], USHORT ushortArray_InOut[], USHORT ushortArray_Out[], USHORT expected[], int len){
for(int i = 0; i < len; i++)
{
if(ushortArray[i] != expected[i] || ushortArray_In[i] != expected[i] || ushortArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayUshort\n");
return FALSE;
}
ushortArray_InOut[i] = 10 + expected[i];
ushortArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayInt(
int IntArray[], int IntArray_In[], int IntArray_InOut[], int IntArray_Out[], int expected[], int len){
for(int i = 0; i < len; i++)
{
if(IntArray[i] != expected[i] || IntArray_In[i] != expected[i] || IntArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayInt\n");
return FALSE;
}
IntArray_InOut[i] = 10 + expected[i];
IntArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayUint(
UINT uintArray[], UINT uintArray_In[], UINT uintArray_InOut[], UINT uintArray_Out[], UINT expected[], int len){
for(int i = 0; i < len; i++)
{
if(uintArray[i] != expected[i] || uintArray_In[i] != expected[i] || uintArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayUint\n");
return FALSE;
}
uintArray_InOut[i] = 10 + expected[i];
uintArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayLong(
LONG64 longArray[], LONG64 longArray_In[], LONG64 longArray_InOut[], LONG64 longArray_Out[], LONG64 expected[], int len){
for(int i = 0; i < len; i++)
{
if(longArray[i] != expected[i] || longArray_In[i] != expected[i] || longArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayLong\n");
return FALSE;
}
longArray_InOut[i] = 10 + expected[i];
longArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayUlong(
LONG64 ulongArray[], LONG64 ulongArray_In[], LONG64 ulongArray_InOut[],
LONG64 ulongArray_Out[], LONG64 expected[], int len){
for(int i = 0; i < len; i++)
{
if(ulongArray[i] != expected[i] || ulongArray_In[i] != expected[i] || ulongArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayUlong\n");
return FALSE;
}
ulongArray_InOut[i] = 10 + expected[i];
ulongArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArraySingle(
float singleArray[], float singleArray_In[], float singleArray_InOut[],
float singleArray_Out[], float expected[], int len){
for(int i = 0; i < len; i++)
{
if(singleArray[i] != expected[i] || singleArray_In[i] != expected[i] || singleArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArraySingle\n");
return FALSE;
}
singleArray_InOut[i] = 10 + expected[i];
singleArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayDouble(
double doubleArray[], double doubleArray_In[], double doubleArray_InOut[], double doubleArray_Out[], double expected[], int len){
for(int i = 0; i < len; i++)
{
if(doubleArray[i] != expected[i] || doubleArray_In[i] != expected[i] || doubleArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayDouble\n");
return FALSE;
}
doubleArray_InOut[i] = 10 + expected[i];
doubleArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayChar(
CHAR charArray[], CHAR charArray_In[], CHAR charArray_InOut[], CHAR charArray_Out[], CHAR expected[], int len){
for(int i = 0; i < len; i++)
{
if(charArray[i] != expected[i] || charArray_In[i] != expected[i] || charArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayChar\n");
return FALSE;
}
}
charArray_InOut[0] = 100;
charArray_Out[0] = 100;
charArray_InOut[1] = 101;
charArray_Out[1] = 101;
charArray_InOut[2] = 102;
charArray_Out[2] = 102;
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayBool(
BOOL boolArray[], BOOL boolArray_In[], BOOL boolArray_InOut[], BOOL boolArray_Out[], BOOL expected[], int len){
for(int i = 0; i < len; i++)
{
if(boolArray[i] != expected[i] || boolArray_In[i] != expected[i] || boolArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayBool\n");
return FALSE;
}
}
boolArray_InOut[0] = FALSE;
boolArray_Out[0] = FALSE;
boolArray_InOut[1] = TRUE;
boolArray_Out[1] = TRUE;
boolArray_InOut[2] = TRUE;
boolArray_Out[2] = TRUE;
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayIntPtr(
INT_PTR intPtrArray[], INT_PTR intPtrArray_In[], INT_PTR intPtrArray_InOut[], INT_PTR intPtrArray_Out[], INT_PTR expected[], int len){
for(int i = 0; i < len; i++)
{
if(intPtrArray[i] != expected[i] || intPtrArray_In[i] != expected[i] || intPtrArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayIntPtr\n");
return FALSE;
}
intPtrArray_InOut[i] = 10 + expected[i];
intPtrArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassArrayUIntPtr(
UINT_PTR uIntPtrArray[], UINT_PTR uIntPtrArray_In[], UINT_PTR uIntPtrArray_InOut[], UINT_PTR uIntPtrArray_Out[], UINT_PTR expected[], int len){
for(int i = 0; i < len; i++)
{
if(uIntPtrArray[i] != expected[i] || uIntPtrArray_In[i] != expected[i] || uIntPtrArray_InOut[i] != expected[i])
{
printf("Not correct pass in paremeter in PassArrayUIntPtr\n");
return FALSE;
}
uIntPtrArray_InOut[i] = 10 + expected[i];
uIntPtrArray_Out[i] = 10 + expected[i];
}
return TRUE;
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE PassMixStruct(AsAnyField mix){
return TRUE;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/threading/DuplicateHandle/test7/test7.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test7.c (DuplicateHandle)
**
** Purpose: Tests the PAL implementation of the DuplicateHandle function,
** with a handle from CreateThread. The test will create a thread
** handle and its duplicate. Then get the priorities of the threads,
** set the priority of one and the change should be seen in the
** other.
**
**
**===================================================================*/
#include <palsuite.h>
DWORD PALAPI CreateTestThread_DuplicateHandle_test7(LPVOID lpParam);
PALTEST(threading_DuplicateHandle_test7_paltest_duplicatehandle_test7, "threading/DuplicateHandle/test7/paltest_duplicatehandle_test7")
{
HANDLE hThread;
HANDLE hDupThread;
DWORD dwThreadId = 0;
LPTHREAD_START_ROUTINE lpStartAddress = &CreateTestThread_DuplicateHandle_test7;
HANDLE hSyncEvent;
int threadPriority;
int duplicatePriority;
int finalPriority;
/* Initialize the PAL.*/
if ((PAL_Initialize(argc, argv)) != 0)
{
return (FAIL);
}
LPSECURITY_ATTRIBUTES lpEventAttributes = NULL;
BOOL bManualReset = TRUE;
BOOL bInitialState = FALSE;
hSyncEvent = CreateEvent(lpEventAttributes,
bManualReset,
bInitialState,
NULL);
if (hSyncEvent == NULL)
{
Fail("ERROR:%u: Unable to create sync event.\n",
GetLastError());
}
/* Create a thread.*/
hThread = CreateThread(NULL, /* SD*/
(DWORD)0, /* initial stack size*/
lpStartAddress, /* thread function*/
(VOID*)hSyncEvent,/* thread argument*/
(DWORD)0, /* creation option*/
&dwThreadId); /* thread identifier*/
if (hThread == NULL)
{
Fail("ERROR:%u: Unable to create thread.\n",
GetLastError());
}
/* Duplicate the thread handle.*/
if (!(DuplicateHandle(GetCurrentProcess(), /* source handle process*/
hThread, /* handle to duplicate*/
GetCurrentProcess(), /* target process handle*/
&hDupThread, /* duplicate handle*/
(DWORD)0, /* requested access*/
FALSE, /* handle inheritance*/
DUPLICATE_SAME_ACCESS))) /* optional actions*/
{
Trace("ERROR: %ld :Fail to create the duplicate handle"
" to hThread=0x%lx",
GetLastError(),
hThread);
CloseHandle(hThread);
Fail("");
}
/* Get the priority of the thread.*/
threadPriority = GetThreadPriority(hThread);
if(threadPriority != 0)
{
Trace("ERROR: Thread priority of hThread=0x%lx should be "
"set to normal THREAD_PRIORITY_NORMAL=%d\n",
hThread,
THREAD_PRIORITY_NORMAL);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Get the priority of the duplicated handle, and compare it to
* the priority of the original thread. Should be the same.*/
duplicatePriority = GetThreadPriority(hThread);
if(duplicatePriority != threadPriority)
{
Trace("ERROR: Expected priority of hThread=0x%lx and hDupThread=0x%lx"
" to be the same. Priorities:hThread=\"%d\":hDupThread=\"%d\"\n",
hThread,
hDupThread,
threadPriority,
duplicatePriority);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Set the priority of the duplicate thread.*/
if(!SetThreadPriority (hDupThread,THREAD_PRIORITY_HIGHEST))
{
Trace("ERROR:%u: SetThreadPriority failed on hThread=0x%lx\n",
GetLastError(),
hDupThread);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Get the priority of the original thread, and
* compare it to what the duplicate was set to.*/
finalPriority = GetThreadPriority(hThread);
if (finalPriority != THREAD_PRIORITY_HIGHEST)
{
Trace("ERROR: Expected priority of hThread=0x%lw and "
"hDupThread=0x%lw to be set the same. Priorities:"
"hThread=\"%d\":hDupThread=\"%d\".\n",
hThread,
hDupThread,
threadPriority,
duplicatePriority);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Signal the helper thread that it can shut down */
if (!SetEvent(hSyncEvent))
{
Fail("ERROR:%u: Failed to set event.\n",
GetLastError());
}
/* Wait on the original thread.*/
if((WaitForSingleObject(hThread, 100)) != WAIT_OBJECT_0)
{
Trace("ERROR:%u: hThread=0x%lx is in a non-signalled "
"mode, yet created signalled.\n",
GetLastError(),
hThread);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Clean-up thread and Terminate the PAL.*/
CloseHandle(hSyncEvent);
CloseHandle(hThread);
CloseHandle(hDupThread);
PAL_Terminate();
return PASS;
}
/*Thread testing function*/
DWORD PALAPI CreateTestThread_DuplicateHandle_test7(LPVOID lpParam)
{
HANDLE hSyncEvent = (HANDLE)lpParam;
/* Wait until the main thread signals that this helper thread should shut down */
WaitForSingleObject(hSyncEvent, INFINITE);
return (DWORD)0;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test7.c (DuplicateHandle)
**
** Purpose: Tests the PAL implementation of the DuplicateHandle function,
** with a handle from CreateThread. The test will create a thread
** handle and its duplicate. Then get the priorities of the threads,
** set the priority of one and the change should be seen in the
** other.
**
**
**===================================================================*/
#include <palsuite.h>
DWORD PALAPI CreateTestThread_DuplicateHandle_test7(LPVOID lpParam);
PALTEST(threading_DuplicateHandle_test7_paltest_duplicatehandle_test7, "threading/DuplicateHandle/test7/paltest_duplicatehandle_test7")
{
HANDLE hThread;
HANDLE hDupThread;
DWORD dwThreadId = 0;
LPTHREAD_START_ROUTINE lpStartAddress = &CreateTestThread_DuplicateHandle_test7;
HANDLE hSyncEvent;
int threadPriority;
int duplicatePriority;
int finalPriority;
/* Initialize the PAL.*/
if ((PAL_Initialize(argc, argv)) != 0)
{
return (FAIL);
}
LPSECURITY_ATTRIBUTES lpEventAttributes = NULL;
BOOL bManualReset = TRUE;
BOOL bInitialState = FALSE;
hSyncEvent = CreateEvent(lpEventAttributes,
bManualReset,
bInitialState,
NULL);
if (hSyncEvent == NULL)
{
Fail("ERROR:%u: Unable to create sync event.\n",
GetLastError());
}
/* Create a thread.*/
hThread = CreateThread(NULL, /* SD*/
(DWORD)0, /* initial stack size*/
lpStartAddress, /* thread function*/
(VOID*)hSyncEvent,/* thread argument*/
(DWORD)0, /* creation option*/
&dwThreadId); /* thread identifier*/
if (hThread == NULL)
{
Fail("ERROR:%u: Unable to create thread.\n",
GetLastError());
}
/* Duplicate the thread handle.*/
if (!(DuplicateHandle(GetCurrentProcess(), /* source handle process*/
hThread, /* handle to duplicate*/
GetCurrentProcess(), /* target process handle*/
&hDupThread, /* duplicate handle*/
(DWORD)0, /* requested access*/
FALSE, /* handle inheritance*/
DUPLICATE_SAME_ACCESS))) /* optional actions*/
{
Trace("ERROR: %ld :Fail to create the duplicate handle"
" to hThread=0x%lx",
GetLastError(),
hThread);
CloseHandle(hThread);
Fail("");
}
/* Get the priority of the thread.*/
threadPriority = GetThreadPriority(hThread);
if(threadPriority != 0)
{
Trace("ERROR: Thread priority of hThread=0x%lx should be "
"set to normal THREAD_PRIORITY_NORMAL=%d\n",
hThread,
THREAD_PRIORITY_NORMAL);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Get the priority of the duplicated handle, and compare it to
* the priority of the original thread. Should be the same.*/
duplicatePriority = GetThreadPriority(hThread);
if(duplicatePriority != threadPriority)
{
Trace("ERROR: Expected priority of hThread=0x%lx and hDupThread=0x%lx"
" to be the same. Priorities:hThread=\"%d\":hDupThread=\"%d\"\n",
hThread,
hDupThread,
threadPriority,
duplicatePriority);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Set the priority of the duplicate thread.*/
if(!SetThreadPriority (hDupThread,THREAD_PRIORITY_HIGHEST))
{
Trace("ERROR:%u: SetThreadPriority failed on hThread=0x%lx\n",
GetLastError(),
hDupThread);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Get the priority of the original thread, and
* compare it to what the duplicate was set to.*/
finalPriority = GetThreadPriority(hThread);
if (finalPriority != THREAD_PRIORITY_HIGHEST)
{
Trace("ERROR: Expected priority of hThread=0x%lw and "
"hDupThread=0x%lw to be set the same. Priorities:"
"hThread=\"%d\":hDupThread=\"%d\".\n",
hThread,
hDupThread,
threadPriority,
duplicatePriority);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Signal the helper thread that it can shut down */
if (!SetEvent(hSyncEvent))
{
Fail("ERROR:%u: Failed to set event.\n",
GetLastError());
}
/* Wait on the original thread.*/
if((WaitForSingleObject(hThread, 100)) != WAIT_OBJECT_0)
{
Trace("ERROR:%u: hThread=0x%lx is in a non-signalled "
"mode, yet created signalled.\n",
GetLastError(),
hThread);
CloseHandle(hThread);
CloseHandle(hDupThread);
Fail("");
}
/* Clean-up thread and Terminate the PAL.*/
CloseHandle(hSyncEvent);
CloseHandle(hThread);
CloseHandle(hDupThread);
PAL_Terminate();
return PASS;
}
/*Thread testing function*/
DWORD PALAPI CreateTestThread_DuplicateHandle_test7(LPVOID lpParam)
{
HANDLE hSyncEvent = (HANDLE)lpParam;
/* Wait until the main thread signals that this helper thread should shut down */
WaitForSingleObject(hSyncEvent, INFINITE);
return (DWORD)0;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/swscanf/test5/test5.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test5.c
**
** Purpose: Tests swscanf with integer numbers
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swscanf.h"
PALTEST(c_runtime_swscanf_test5_paltest_swscanf_test5, "c_runtime/swscanf/test5/paltest_swscanf_test5")
{
int n65535 = 65535; /* Walkaround compiler strictness */
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoNumTest(convert("1234d"), convert("%i"), 1234);
DoNumTest(convert("1234d"), convert("%2i"), 12);
DoNumTest(convert("-1"), convert("%i"), -1);
DoNumTest(convert("0x1234"), convert("%i"), 0x1234);
DoNumTest(convert("012"), convert("%i"), 10);
DoShortNumTest(convert("-1"), convert("%hi"), n65535);
DoShortNumTest(convert("65536"), convert("%hi"), 0);
DoNumTest(convert("-1"), convert("%li"), -1);
DoNumTest(convert("65536"), convert("%li"), 65536);
DoNumTest(convert("-1"), convert("%Li"), -1);
DoNumTest(convert("65536"), convert("%Li"), 65536);
DoI64NumTest(convert("4294967296"), convert("%I64i"), I64(4294967296));
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test5.c
**
** Purpose: Tests swscanf with integer numbers
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swscanf.h"
PALTEST(c_runtime_swscanf_test5_paltest_swscanf_test5, "c_runtime/swscanf/test5/paltest_swscanf_test5")
{
int n65535 = 65535; /* Walkaround compiler strictness */
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoNumTest(convert("1234d"), convert("%i"), 1234);
DoNumTest(convert("1234d"), convert("%2i"), 12);
DoNumTest(convert("-1"), convert("%i"), -1);
DoNumTest(convert("0x1234"), convert("%i"), 0x1234);
DoNumTest(convert("012"), convert("%i"), 10);
DoShortNumTest(convert("-1"), convert("%hi"), n65535);
DoShortNumTest(convert("65536"), convert("%hi"), 0);
DoNumTest(convert("-1"), convert("%li"), -1);
DoNumTest(convert("65536"), convert("%li"), 65536);
DoNumTest(convert("-1"), convert("%Li"), -1);
DoNumTest(convert("65536"), convert("%Li"), 65536);
DoI64NumTest(convert("4294967296"), convert("%I64i"), I64(4294967296));
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/native/libs/Common/pal_ssl_types.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include <stdint.h>
// Matches managed System.Security.Authentication.SslProtocols
enum
{
PAL_SslProtocol_None = 0,
PAL_SslProtocol_Ssl2 = 12,
PAL_SslProtocol_Ssl3 = 48,
PAL_SslProtocol_Tls10 = 192,
PAL_SslProtocol_Tls11 = 768,
PAL_SslProtocol_Tls12 = 3072,
PAL_SslProtocol_Tls13 = 12288,
};
typedef int32_t PAL_SslProtocol;
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include <stdint.h>
// Matches managed System.Security.Authentication.SslProtocols
enum
{
PAL_SslProtocol_None = 0,
PAL_SslProtocol_Ssl2 = 12,
PAL_SslProtocol_Ssl3 = 48,
PAL_SslProtocol_Tls10 = 192,
PAL_SslProtocol_Tls11 = 768,
PAL_SslProtocol_Tls12 = 3072,
PAL_SslProtocol_Tls13 = 12288,
};
typedef int32_t PAL_SslProtocol;
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/md/heaps/external.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: external.h
//
//
// External types used in MetaData\Heaps subcomponent classes.
// This file is used for precompiled headers, so it has to be included at the beginning of every .cpp in
// this directory.
//
// ======================================================================================
#pragma once
#include "../external.h"
#include "../export.h"
#include <stgpool.h>
#include <metamodelpub.h>
#include <utilcode.h>
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: external.h
//
//
// External types used in MetaData\Heaps subcomponent classes.
// This file is used for precompiled headers, so it has to be included at the beginning of every .cpp in
// this directory.
//
// ======================================================================================
#pragma once
#include "../external.h"
#include "../export.h"
#include <stgpool.h>
#include <metamodelpub.h>
#include <utilcode.h>
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/tests/palsuite/c_runtime/fprintf/test10/test10.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test10.c (fprintf)
**
** Purpose: Tests the octal specifier (%o).
** This test is modeled after the fprintf series.
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../fprintf.h"
/*
* Depends on memcmp, strlen, fopen, fseek and fgets.
*/
PALTEST(c_runtime_fprintf_test10_paltest_fprintf_test10, "c_runtime/fprintf/test10/paltest_fprintf_test10")
{
int neg = -42;
int pos = 42;
INT64 l = 42;
if (PAL_Initialize(argc, argv) != 0)
return(FAIL);
DoNumTest("foo %o", pos, "foo 52");
DoNumTest("foo %lo", 0xFFFF, "foo 177777");
DoNumTest("foo %ho", 0xFFFF, "foo 177777");
DoNumTest("foo %Lo", pos, "foo 52");
DoI64Test("foo %I64o", l, "42", "foo 52", "foo 52");
DoNumTest("foo %3o", pos, "foo 52");
DoNumTest("foo %-3o", pos, "foo 52 ");
DoNumTest("foo %.1o", pos, "foo 52");
DoNumTest("foo %.3o", pos, "foo 052");
DoNumTest("foo %03o", pos, "foo 052");
DoNumTest("foo %#o", pos, "foo 052");
DoNumTest("foo %+o", pos, "foo 52");
DoNumTest("foo % o", pos, "foo 52");
DoNumTest("foo %+o", neg, "foo 37777777726");
DoNumTest("foo % o", neg, "foo 37777777726");
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test10.c (fprintf)
**
** Purpose: Tests the octal specifier (%o).
** This test is modeled after the fprintf series.
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../fprintf.h"
/*
* Depends on memcmp, strlen, fopen, fseek and fgets.
*/
PALTEST(c_runtime_fprintf_test10_paltest_fprintf_test10, "c_runtime/fprintf/test10/paltest_fprintf_test10")
{
int neg = -42;
int pos = 42;
INT64 l = 42;
if (PAL_Initialize(argc, argv) != 0)
return(FAIL);
DoNumTest("foo %o", pos, "foo 52");
DoNumTest("foo %lo", 0xFFFF, "foo 177777");
DoNumTest("foo %ho", 0xFFFF, "foo 177777");
DoNumTest("foo %Lo", pos, "foo 52");
DoI64Test("foo %I64o", l, "42", "foo 52", "foo 52");
DoNumTest("foo %3o", pos, "foo 52");
DoNumTest("foo %-3o", pos, "foo 52 ");
DoNumTest("foo %.1o", pos, "foo 52");
DoNumTest("foo %.3o", pos, "foo 052");
DoNumTest("foo %03o", pos, "foo 052");
DoNumTest("foo %#o", pos, "foo 052");
DoNumTest("foo %+o", pos, "foo 52");
DoNumTest("foo % o", pos, "foo 52");
DoNumTest("foo %+o", neg, "foo 37777777726");
DoNumTest("foo % o", neg, "foo 37777777726");
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/nativeaot/libunwind/test/libunwind_01.pass.cpp | #include <libunwind.h>
#include <stdlib.h>
void backtrace(int lower_bound) {
unw_context_t context;
unw_getcontext(&context);
unw_cursor_t cursor;
unw_init_local(&cursor, &context);
int n = 0;
do {
++n;
if (n > 100) {
abort();
}
} while (unw_step(&cursor) > 0);
if (n < lower_bound) {
abort();
}
}
void test1(int i) {
backtrace(i);
}
void test2(int i, int j) {
backtrace(i);
test1(j);
}
void test3(int i, int j, int k) {
backtrace(i);
test2(j, k);
}
int main() {
test1(1);
test2(1, 2);
test3(1, 2, 3);
}
| #include <libunwind.h>
#include <stdlib.h>
void backtrace(int lower_bound) {
unw_context_t context;
unw_getcontext(&context);
unw_cursor_t cursor;
unw_init_local(&cursor, &context);
int n = 0;
do {
++n;
if (n > 100) {
abort();
}
} while (unw_step(&cursor) > 0);
if (n < lower_bound) {
abort();
}
}
void test1(int i) {
backtrace(i);
}
void test2(int i, int j) {
backtrace(i);
test1(j);
}
void test3(int i, int j, int k) {
backtrace(i);
test2(j, k);
}
int main() {
test1(1);
test2(1, 2);
test3(1, 2, 3);
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/pal/src/misc/time.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
Module Name:
time.c
Abstract:
Implementation of time related WIN API functions.
--*/
#include "pal/palinternal.h"
#include "pal/dbgmsg.h"
#include "pal/misc.h"
#include <time.h>
#include <sys/time.h>
#include <errno.h>
#include <string.h>
#include <sched.h>
using namespace CorUnix;
SET_DEFAULT_DEBUG_CHANNEL(MISC);
/*++
Function:
GetSystemTime
The GetSystemTime function retrieves the current system date and
time. The system time is expressed in Coordinated Universal Time
(UTC).
Parameters
lpSystemTime
[out] Pointer to a SYSTEMTIME structure to receive the current system date and time.
Return Values
This function does not return a value.
--*/
VOID
PALAPI
GetSystemTime(
OUT LPSYSTEMTIME lpSystemTime)
{
time_t tt;
#if HAVE_GMTIME_R
struct tm ut;
#endif /* HAVE_GMTIME_R */
struct tm *utPtr;
struct timeval timeval;
int timeofday_retval;
PERF_ENTRY(GetSystemTime);
ENTRY("GetSystemTime (lpSystemTime=%p)\n", lpSystemTime);
tt = time(NULL);
/* We can't get millisecond resolution from time(), so we get it from
gettimeofday() */
timeofday_retval = gettimeofday(&timeval,NULL);
#if HAVE_GMTIME_R
utPtr = &ut;
if (gmtime_r(&tt, utPtr) == NULL)
#else /* HAVE_GMTIME_R */
if ((utPtr = gmtime(&tt)) == NULL)
#endif /* HAVE_GMTIME_R */
{
ASSERT("gmtime() failed; errno is %d (%s)\n", errno, strerror(errno));
goto EXIT;
}
lpSystemTime->wYear = 1900 + utPtr->tm_year;
lpSystemTime->wMonth = utPtr->tm_mon + 1;
lpSystemTime->wDayOfWeek = utPtr->tm_wday;
lpSystemTime->wDay = utPtr->tm_mday;
lpSystemTime->wHour = utPtr->tm_hour;
lpSystemTime->wMinute = utPtr->tm_min;
lpSystemTime->wSecond = utPtr->tm_sec;
if(-1 == timeofday_retval)
{
ASSERT("gettimeofday() failed; errno is %d (%s)\n",
errno, strerror(errno));
lpSystemTime->wMilliseconds = 0;
}
else
{
int old_seconds;
int new_seconds;
lpSystemTime->wMilliseconds = timeval.tv_usec/tccMillieSecondsToMicroSeconds;
old_seconds = utPtr->tm_sec;
new_seconds = timeval.tv_sec%60;
/* just in case we reached the next second in the interval between
time() and gettimeofday() */
if( old_seconds!=new_seconds )
{
TRACE("crossed seconds boundary; setting milliseconds to 999\n");
lpSystemTime->wMilliseconds = 999;
}
}
EXIT:
LOGEXIT("GetSystemTime returns void\n");
PERF_EXIT(GetSystemTime);
}
/*++
Function:
GetTickCount
The GetTickCount function retrieves the number of milliseconds that
have elapsed since the system was started. It is limited to the
resolution of the system timer. To obtain the system timer resolution,
use the GetSystemTimeAdjustment function.
Parameters
This function has no parameters.
Return Values
The return value is the number of milliseconds that have elapsed since
the system was started.
In the PAL implementation the return value is the elapsed time since
the start of the epoch.
--*/
DWORD
PALAPI
GetTickCount(
VOID)
{
DWORD retval = 0;
PERF_ENTRY(GetTickCount);
ENTRY("GetTickCount ()\n");
// Get the 64-bit count from GetTickCount64 and truncate the results.
retval = (DWORD) GetTickCount64();
LOGEXIT("GetTickCount returns DWORD %u\n", retval);
PERF_EXIT(GetTickCount);
return retval;
}
BOOL
PALAPI
QueryPerformanceCounter(
OUT LARGE_INTEGER *lpPerformanceCount
)
{
BOOL retval = TRUE;
PERF_ENTRY(QueryPerformanceCounter);
ENTRY("QueryPerformanceCounter()\n");
#if HAVE_CLOCK_GETTIME_NSEC_NP
lpPerformanceCount->QuadPart = (LONGLONG)clock_gettime_nsec_np(CLOCK_UPTIME_RAW);
#elif HAVE_CLOCK_MONOTONIC
struct timespec ts;
int result = clock_gettime(CLOCK_MONOTONIC, &ts);
if (result != 0)
{
ASSERT("clock_gettime(CLOCK_MONOTONIC) failed: %d\n", result);
retval = FALSE;
}
else
{
lpPerformanceCount->QuadPart =
((LONGLONG)(ts.tv_sec) * (LONGLONG)(tccSecondsToNanoSeconds)) + (LONGLONG)(ts.tv_nsec);
}
#else
#error "The PAL requires either mach_absolute_time() or clock_gettime(CLOCK_MONOTONIC) to be supported."
#endif
LOGEXIT("QueryPerformanceCounter\n");
PERF_EXIT(QueryPerformanceCounter);
return retval;
}
BOOL
PALAPI
QueryPerformanceFrequency(
OUT LARGE_INTEGER *lpFrequency
)
{
BOOL retval = TRUE;
PERF_ENTRY(QueryPerformanceFrequency);
ENTRY("QueryPerformanceFrequency()\n");
#if HAVE_CLOCK_GETTIME_NSEC_NP
lpFrequency->QuadPart = (LONGLONG)(tccSecondsToNanoSeconds);
#elif HAVE_CLOCK_MONOTONIC
// clock_gettime() returns a result in terms of nanoseconds rather than a count. This
// means that we need to either always scale the result by the actual resolution (to
// get a count) or we need to say the resolution is in terms of nanoseconds. We prefer
// the latter since it allows the highest throughput and should minimize error propagated
// to the user.
lpFrequency->QuadPart = (LONGLONG)(tccSecondsToNanoSeconds);
#else
#error "The PAL requires either mach_absolute_time() or clock_gettime(CLOCK_MONOTONIC) to be supported."
#endif
LOGEXIT("QueryPerformanceFrequency\n");
PERF_EXIT(QueryPerformanceFrequency);
return retval;
}
/*++
Function:
QueryThreadCycleTime
Puts the execution time (in nanoseconds) for the thread pointed to by ThreadHandle, into the unsigned long
pointed to by CycleTime. ThreadHandle must refer to the current thread. Returns TRUE on success, FALSE on
failure.
--*/
BOOL
PALAPI
QueryThreadCycleTime(
IN HANDLE ThreadHandle,
OUT PULONG64 CycleTime
)
{
ULONG64 calcTime;
FILETIME kernelTime, userTime;
BOOL retval = TRUE;
if(!GetThreadTimesInternal(ThreadHandle, &kernelTime, &userTime))
{
ASSERT("Could not get cycle time for current thread");
retval = FALSE;
goto EXIT;
}
calcTime = ((ULONG64)kernelTime.dwHighDateTime << 32);
calcTime += (ULONG64)kernelTime.dwLowDateTime;
calcTime += ((ULONG64)userTime.dwHighDateTime << 32);
calcTime += (ULONG64)userTime.dwLowDateTime;
*CycleTime = calcTime;
EXIT:
return retval;
}
/*++
Function:
GetTickCount64
Returns a 64-bit tick count with a millisecond resolution. It tries its best
to return monotonically increasing counts and avoid being affected by changes
to the system clock (either due to drift or due to explicit changes to system
time).
--*/
PALAPI
ULONGLONG
GetTickCount64()
{
LONGLONG retval = 0;
#if HAVE_CLOCK_GETTIME_NSEC_NP
return (LONGLONG)clock_gettime_nsec_np(CLOCK_UPTIME_RAW) / (LONGLONG)(tccMillieSecondsToNanoSeconds);
#elif HAVE_CLOCK_MONOTONIC || HAVE_CLOCK_MONOTONIC_COARSE
struct timespec ts;
#if HAVE_CLOCK_MONOTONIC_COARSE
// CLOCK_MONOTONIC_COARSE has enough precision for GetTickCount but
// doesn't have the same overhead as CLOCK_MONOTONIC. This allows
// overall higher throughput. See dotnet/coreclr#2257 for more details.
const clockid_t clockType = CLOCK_MONOTONIC_COARSE;
#else
const clockid_t clockType = CLOCK_MONOTONIC;
#endif
int result = clock_gettime(clockType, &ts);
if (result != 0)
{
#if HAVE_CLOCK_MONOTONIC_COARSE
ASSERT("clock_gettime(CLOCK_MONOTONIC_COARSE) failed: %d\n", result);
#else
ASSERT("clock_gettime(CLOCK_MONOTONIC) failed: %d\n", result);
#endif
retval = FALSE;
}
else
{
retval = ((LONGLONG)(ts.tv_sec) * (LONGLONG)(tccSecondsToMillieSeconds)) + ((LONGLONG)(ts.tv_nsec) / (LONGLONG)(tccMillieSecondsToNanoSeconds));
}
#else
#error "The PAL requires either mach_absolute_time() or clock_gettime(CLOCK_MONOTONIC) to be supported."
#endif
return (ULONGLONG)(retval);
}
/*++
Function:
PAL_nanosleep
Sleeps for the time specified in timeInNs.
Returns 0 on successful completion of the operation.
--*/
PALAPI
INT
PAL_nanosleep(
IN long timeInNs
)
{
struct timespec req;
struct timespec rem;
int result;
req.tv_sec = 0;
req.tv_nsec = timeInNs;
do
{
// Sleep for the requested time.
result = nanosleep(&req, &rem);
// Save the remaining time (used if the loop runs another iteration).
req = rem;
}
while(result == -1 && errno == EINTR);
return result;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
Module Name:
time.c
Abstract:
Implementation of time related WIN API functions.
--*/
#include "pal/palinternal.h"
#include "pal/dbgmsg.h"
#include "pal/misc.h"
#include <time.h>
#include <sys/time.h>
#include <errno.h>
#include <string.h>
#include <sched.h>
using namespace CorUnix;
SET_DEFAULT_DEBUG_CHANNEL(MISC);
/*++
Function:
GetSystemTime
The GetSystemTime function retrieves the current system date and
time. The system time is expressed in Coordinated Universal Time
(UTC).
Parameters
lpSystemTime
[out] Pointer to a SYSTEMTIME structure to receive the current system date and time.
Return Values
This function does not return a value.
--*/
VOID
PALAPI
GetSystemTime(
OUT LPSYSTEMTIME lpSystemTime)
{
time_t tt;
#if HAVE_GMTIME_R
struct tm ut;
#endif /* HAVE_GMTIME_R */
struct tm *utPtr;
struct timeval timeval;
int timeofday_retval;
PERF_ENTRY(GetSystemTime);
ENTRY("GetSystemTime (lpSystemTime=%p)\n", lpSystemTime);
tt = time(NULL);
/* We can't get millisecond resolution from time(), so we get it from
gettimeofday() */
timeofday_retval = gettimeofday(&timeval,NULL);
#if HAVE_GMTIME_R
utPtr = &ut;
if (gmtime_r(&tt, utPtr) == NULL)
#else /* HAVE_GMTIME_R */
if ((utPtr = gmtime(&tt)) == NULL)
#endif /* HAVE_GMTIME_R */
{
ASSERT("gmtime() failed; errno is %d (%s)\n", errno, strerror(errno));
goto EXIT;
}
lpSystemTime->wYear = 1900 + utPtr->tm_year;
lpSystemTime->wMonth = utPtr->tm_mon + 1;
lpSystemTime->wDayOfWeek = utPtr->tm_wday;
lpSystemTime->wDay = utPtr->tm_mday;
lpSystemTime->wHour = utPtr->tm_hour;
lpSystemTime->wMinute = utPtr->tm_min;
lpSystemTime->wSecond = utPtr->tm_sec;
if(-1 == timeofday_retval)
{
ASSERT("gettimeofday() failed; errno is %d (%s)\n",
errno, strerror(errno));
lpSystemTime->wMilliseconds = 0;
}
else
{
int old_seconds;
int new_seconds;
lpSystemTime->wMilliseconds = timeval.tv_usec/tccMillieSecondsToMicroSeconds;
old_seconds = utPtr->tm_sec;
new_seconds = timeval.tv_sec%60;
/* just in case we reached the next second in the interval between
time() and gettimeofday() */
if( old_seconds!=new_seconds )
{
TRACE("crossed seconds boundary; setting milliseconds to 999\n");
lpSystemTime->wMilliseconds = 999;
}
}
EXIT:
LOGEXIT("GetSystemTime returns void\n");
PERF_EXIT(GetSystemTime);
}
/*++
Function:
GetTickCount
The GetTickCount function retrieves the number of milliseconds that
have elapsed since the system was started. It is limited to the
resolution of the system timer. To obtain the system timer resolution,
use the GetSystemTimeAdjustment function.
Parameters
This function has no parameters.
Return Values
The return value is the number of milliseconds that have elapsed since
the system was started.
In the PAL implementation the return value is the elapsed time since
the start of the epoch.
--*/
DWORD
PALAPI
GetTickCount(
VOID)
{
DWORD retval = 0;
PERF_ENTRY(GetTickCount);
ENTRY("GetTickCount ()\n");
// Get the 64-bit count from GetTickCount64 and truncate the results.
retval = (DWORD) GetTickCount64();
LOGEXIT("GetTickCount returns DWORD %u\n", retval);
PERF_EXIT(GetTickCount);
return retval;
}
BOOL
PALAPI
QueryPerformanceCounter(
OUT LARGE_INTEGER *lpPerformanceCount
)
{
BOOL retval = TRUE;
PERF_ENTRY(QueryPerformanceCounter);
ENTRY("QueryPerformanceCounter()\n");
#if HAVE_CLOCK_GETTIME_NSEC_NP
lpPerformanceCount->QuadPart = (LONGLONG)clock_gettime_nsec_np(CLOCK_UPTIME_RAW);
#elif HAVE_CLOCK_MONOTONIC
struct timespec ts;
int result = clock_gettime(CLOCK_MONOTONIC, &ts);
if (result != 0)
{
ASSERT("clock_gettime(CLOCK_MONOTONIC) failed: %d\n", result);
retval = FALSE;
}
else
{
lpPerformanceCount->QuadPart =
((LONGLONG)(ts.tv_sec) * (LONGLONG)(tccSecondsToNanoSeconds)) + (LONGLONG)(ts.tv_nsec);
}
#else
#error "The PAL requires either mach_absolute_time() or clock_gettime(CLOCK_MONOTONIC) to be supported."
#endif
LOGEXIT("QueryPerformanceCounter\n");
PERF_EXIT(QueryPerformanceCounter);
return retval;
}
BOOL
PALAPI
QueryPerformanceFrequency(
OUT LARGE_INTEGER *lpFrequency
)
{
BOOL retval = TRUE;
PERF_ENTRY(QueryPerformanceFrequency);
ENTRY("QueryPerformanceFrequency()\n");
#if HAVE_CLOCK_GETTIME_NSEC_NP
lpFrequency->QuadPart = (LONGLONG)(tccSecondsToNanoSeconds);
#elif HAVE_CLOCK_MONOTONIC
// clock_gettime() returns a result in terms of nanoseconds rather than a count. This
// means that we need to either always scale the result by the actual resolution (to
// get a count) or we need to say the resolution is in terms of nanoseconds. We prefer
// the latter since it allows the highest throughput and should minimize error propagated
// to the user.
lpFrequency->QuadPart = (LONGLONG)(tccSecondsToNanoSeconds);
#else
#error "The PAL requires either mach_absolute_time() or clock_gettime(CLOCK_MONOTONIC) to be supported."
#endif
LOGEXIT("QueryPerformanceFrequency\n");
PERF_EXIT(QueryPerformanceFrequency);
return retval;
}
/*++
Function:
QueryThreadCycleTime
Puts the execution time (in nanoseconds) for the thread pointed to by ThreadHandle, into the unsigned long
pointed to by CycleTime. ThreadHandle must refer to the current thread. Returns TRUE on success, FALSE on
failure.
--*/
BOOL
PALAPI
QueryThreadCycleTime(
IN HANDLE ThreadHandle,
OUT PULONG64 CycleTime
)
{
ULONG64 calcTime;
FILETIME kernelTime, userTime;
BOOL retval = TRUE;
if(!GetThreadTimesInternal(ThreadHandle, &kernelTime, &userTime))
{
ASSERT("Could not get cycle time for current thread");
retval = FALSE;
goto EXIT;
}
calcTime = ((ULONG64)kernelTime.dwHighDateTime << 32);
calcTime += (ULONG64)kernelTime.dwLowDateTime;
calcTime += ((ULONG64)userTime.dwHighDateTime << 32);
calcTime += (ULONG64)userTime.dwLowDateTime;
*CycleTime = calcTime;
EXIT:
return retval;
}
/*++
Function:
GetTickCount64
Returns a 64-bit tick count with a millisecond resolution. It tries its best
to return monotonically increasing counts and avoid being affected by changes
to the system clock (either due to drift or due to explicit changes to system
time).
--*/
PALAPI
ULONGLONG
GetTickCount64()
{
LONGLONG retval = 0;
#if HAVE_CLOCK_GETTIME_NSEC_NP
return (LONGLONG)clock_gettime_nsec_np(CLOCK_UPTIME_RAW) / (LONGLONG)(tccMillieSecondsToNanoSeconds);
#elif HAVE_CLOCK_MONOTONIC || HAVE_CLOCK_MONOTONIC_COARSE
struct timespec ts;
#if HAVE_CLOCK_MONOTONIC_COARSE
// CLOCK_MONOTONIC_COARSE has enough precision for GetTickCount but
// doesn't have the same overhead as CLOCK_MONOTONIC. This allows
// overall higher throughput. See dotnet/coreclr#2257 for more details.
const clockid_t clockType = CLOCK_MONOTONIC_COARSE;
#else
const clockid_t clockType = CLOCK_MONOTONIC;
#endif
int result = clock_gettime(clockType, &ts);
if (result != 0)
{
#if HAVE_CLOCK_MONOTONIC_COARSE
ASSERT("clock_gettime(CLOCK_MONOTONIC_COARSE) failed: %d\n", result);
#else
ASSERT("clock_gettime(CLOCK_MONOTONIC) failed: %d\n", result);
#endif
retval = FALSE;
}
else
{
retval = ((LONGLONG)(ts.tv_sec) * (LONGLONG)(tccSecondsToMillieSeconds)) + ((LONGLONG)(ts.tv_nsec) / (LONGLONG)(tccMillieSecondsToNanoSeconds));
}
#else
#error "The PAL requires either mach_absolute_time() or clock_gettime(CLOCK_MONOTONIC) to be supported."
#endif
return (ULONGLONG)(retval);
}
/*++
Function:
PAL_nanosleep
Sleeps for the time specified in timeInNs.
Returns 0 on successful completion of the operation.
--*/
PALAPI
INT
PAL_nanosleep(
IN long timeInNs
)
{
struct timespec req;
struct timespec rem;
int result;
req.tv_sec = 0;
req.tv_nsec = timeInNs;
do
{
// Sleep for the requested time.
result = nanosleep(&req, &rem);
// Save the remaining time (used if the loop runs another iteration).
req = rem;
}
while(result == -1 && errno == EINTR);
return result;
}
| -1 |
dotnet/runtime | 65,916 | Fix GC hole with multi-reg local var stores | Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| BruceForstall | "2022-02-26T02:06:33Z" | "2022-03-01T05:47:41Z" | d9eafd0c55ff7c3d7804c7629baf271703df91a6 | 8686d06e5387cf50f08f451cd697331eb5bd830a | Fix GC hole with multi-reg local var stores. Change #64857 exposed an existing problem where when generating code
for a multi-reg GT_STORE_LCL_VAR, if the first register slot was not
enregistered, but the second or subsequent slots was, and those non-first
slots contained GC pointers, we wouldn't properly add those GC pointers
to the GC tracking sets. This led to cases where the register lifetimes
would be killed in the GC info before the actual lifetime was complete.
The primary fix is to make `gtHasReg()` handle the `IsMultiRegLclVar()`
case. As a side-effect, this fixes some LSRA dumps that weren't displaying
multiple registers properly.
There are about 50 SPMI asm diffs on win-arm64 where register lifetimes
get extended, fixing GC holes.
I also made `GetMultiRegCount()` handle the `IsMultiRegLclVar()` case.
I made a number of cleanup changes along the way:
1. Fixed two cases of calling `gcInfo.gcMarkRegSetNpt` with regNumber, not regMaskTP
2. Marked some functions `const`
3. Improved some comments
4. Changed "ith" to "i'th" in comments which still doesn't read great,
but at least I'm not left trying to parse "ith" as an English word.
5. Use `OperIsScalarLocal()` more broadly
6. Renamed `gtDispRegCount` to `gtDispMultiRegCount` to make it clear
it only applies to the multi-reg case.
Fixes #65476.
| ./src/coreclr/nativeaot/Runtime/allocheap.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "CommonTypes.h"
#include "CommonMacros.h"
#include "daccess.h"
#include "DebugMacrosExt.h"
#include "PalRedhawkCommon.h"
#include "PalRedhawk.h"
#include "rhassert.h"
#include "slist.h"
#include "holder.h"
#include "Crst.h"
#include "Range.h"
#ifdef FEATURE_RWX_MEMORY
#include "memaccessmgr.h"
#endif
#include "allocheap.h"
#include "CommonMacros.inl"
#include "slist.inl"
using namespace rh::util;
//-------------------------------------------------------------------------------------------------
AllocHeap::AllocHeap()
: m_blockList(),
m_rwProtectType(PAGE_READWRITE),
m_roProtectType(PAGE_READWRITE),
#ifdef FEATURE_RWX_MEMORY
m_pAccessMgr(NULL),
m_hCurPageRW(),
#endif // FEATURE_RWX_MEMORY
m_pNextFree(NULL),
m_pFreeCommitEnd(NULL),
m_pFreeReserveEnd(NULL),
m_pbInitialMem(NULL),
m_fShouldFreeInitialMem(false),
m_lock(CrstAllocHeap)
COMMA_INDEBUG(m_fIsInit(false))
{
ASSERT(!_UseAccessManager());
}
#ifdef FEATURE_RWX_MEMORY
//-------------------------------------------------------------------------------------------------
AllocHeap::AllocHeap(
uint32_t rwProtectType,
uint32_t roProtectType,
MemAccessMgr* pAccessMgr)
: m_blockList(),
m_rwProtectType(rwProtectType),
m_roProtectType(roProtectType == 0 ? rwProtectType : roProtectType),
m_pAccessMgr(pAccessMgr),
m_hCurPageRW(),
m_pNextFree(NULL),
m_pFreeCommitEnd(NULL),
m_pFreeReserveEnd(NULL),
m_pbInitialMem(NULL),
m_fShouldFreeInitialMem(false),
m_lock(CrstAllocHeap)
COMMA_INDEBUG(m_fIsInit(false))
{
ASSERT(!_UseAccessManager() || (m_rwProtectType != m_roProtectType && m_pAccessMgr != NULL));
}
#endif // FEATURE_RWX_MEMORY
//-------------------------------------------------------------------------------------------------
bool AllocHeap::Init()
{
ASSERT(!m_fIsInit);
INDEBUG(m_fIsInit = true;)
return true;
}
//-------------------------------------------------------------------------------------------------
// This is for using pre-allocated memory on heap construction.
// Should never use this more than once, and should always follow construction of heap.
bool AllocHeap::Init(
uint8_t * pbInitialMem,
uintptr_t cbInitialMemCommit,
uintptr_t cbInitialMemReserve,
bool fShouldFreeInitialMem)
{
ASSERT(!m_fIsInit);
#ifdef FEATURE_RWX_MEMORY
// Manage the committed portion of memory
if (_UseAccessManager())
{
m_pAccessMgr->ManageMemoryRange(MemRange(pbInitialMem, cbInitialMemCommit), true);
}
#endif // FEATURE_RWX_MEMORY
BlockListElem *pBlock = new (nothrow) BlockListElem(pbInitialMem, cbInitialMemReserve);
if (pBlock == NULL)
return false;
m_blockList.PushHead(pBlock);
if (!_UpdateMemPtrs(pbInitialMem,
pbInitialMem + cbInitialMemCommit,
pbInitialMem + cbInitialMemReserve))
{
return false;
}
m_pbInitialMem = pbInitialMem;
m_fShouldFreeInitialMem = fShouldFreeInitialMem;
INDEBUG(m_fIsInit = true;)
return true;
}
//-------------------------------------------------------------------------------------------------
AllocHeap::~AllocHeap()
{
while (!m_blockList.IsEmpty())
{
BlockListElem *pCur = m_blockList.PopHead();
if (pCur->GetStart() != m_pbInitialMem || m_fShouldFreeInitialMem)
PalVirtualFree(pCur->GetStart(), pCur->GetLength(), MEM_RELEASE);
delete pCur;
}
}
//-------------------------------------------------------------------------------------------------
uint8_t * AllocHeap::_Alloc(
uintptr_t cbMem,
uintptr_t alignment
WRITE_ACCESS_HOLDER_ARG
)
{
#ifndef FEATURE_RWX_MEMORY
const void* pRWAccessHolder = NULL;
#endif // FEATURE_RWX_MEMORY
ASSERT((alignment & (alignment - 1)) == 0); // Power of 2 only.
ASSERT(alignment <= OS_PAGE_SIZE); // Can't handle this right now.
ASSERT((m_rwProtectType == m_roProtectType) == (pRWAccessHolder == NULL));
ASSERT(!_UseAccessManager() || pRWAccessHolder != NULL);
if (_UseAccessManager() && pRWAccessHolder == NULL)
return NULL;
CrstHolder lock(&m_lock);
uint8_t * pbMem = _AllocFromCurBlock(cbMem, alignment PASS_WRITE_ACCESS_HOLDER_ARG);
if (pbMem != NULL)
return pbMem;
// Must allocate new block
if (!_AllocNewBlock(cbMem))
return NULL;
pbMem = _AllocFromCurBlock(cbMem, alignment PASS_WRITE_ACCESS_HOLDER_ARG);
ASSERT_MSG(pbMem != NULL, "AllocHeap::Alloc: failed to alloc mem after new block alloc");
return pbMem;
}
//-------------------------------------------------------------------------------------------------
uint8_t * AllocHeap::Alloc(
uintptr_t cbMem
WRITE_ACCESS_HOLDER_ARG)
{
return _Alloc(cbMem, 1 PASS_WRITE_ACCESS_HOLDER_ARG);
}
//-------------------------------------------------------------------------------------------------
uint8_t * AllocHeap::AllocAligned(
uintptr_t cbMem,
uintptr_t alignment
WRITE_ACCESS_HOLDER_ARG)
{
return _Alloc(cbMem, alignment PASS_WRITE_ACCESS_HOLDER_ARG);
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::Contains(void* pvMem, uintptr_t cbMem)
{
MemRange range(pvMem, cbMem);
for (BlockList::Iterator it = m_blockList.Begin(); it != m_blockList.End(); ++it)
{
if (it->Contains(range))
{
return true;
}
}
return false;
}
#ifdef FEATURE_RWX_MEMORY
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_AcquireWriteAccess(
uint8_t* pvMem,
uintptr_t cbMem,
WriteAccessHolder* pHolder)
{
ASSERT(!_UseAccessManager() || m_pAccessMgr != NULL);
if (_UseAccessManager())
return m_pAccessMgr->AcquireWriteAccess(MemRange(pvMem, cbMem), m_hCurPageRW, pHolder);
else
return true;
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::AcquireWriteAccess(
void* pvMem,
uintptr_t cbMem,
WriteAccessHolder* pHolder)
{
return _AcquireWriteAccess(static_cast<uint8_t*>(pvMem), cbMem, pHolder);
}
#endif // FEATURE_RWX_MEMORY
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_UpdateMemPtrs(uint8_t* pNextFree, uint8_t* pFreeCommitEnd, uint8_t* pFreeReserveEnd)
{
ASSERT(MemRange(pNextFree, pFreeReserveEnd).Contains(MemRange(pNextFree, pFreeCommitEnd)));
ASSERT(ALIGN_DOWN(pFreeCommitEnd, OS_PAGE_SIZE) == pFreeCommitEnd);
ASSERT(ALIGN_DOWN(pFreeReserveEnd, OS_PAGE_SIZE) == pFreeReserveEnd);
#ifdef FEATURE_RWX_MEMORY
// See if we need to update current allocation holder or protect committed pages.
if (_UseAccessManager())
{
if (pFreeCommitEnd - pNextFree > 0)
{
#ifndef STRESS_MEMACCESSMGR
// Create or update the alloc cache, used to speed up new allocations.
// If there is available commited memory and either m_pNextFree is
// being updated past a page boundary or the current cache is empty,
// then update the cache.
if (ALIGN_DOWN(m_pNextFree, OS_PAGE_SIZE) != ALIGN_DOWN(pNextFree, OS_PAGE_SIZE) ||
m_hCurPageRW.GetRange().GetLength() == 0)
{
// Update current alloc page write access holder.
if (!_AcquireWriteAccess(ALIGN_DOWN(pNextFree, OS_PAGE_SIZE),
OS_PAGE_SIZE,
&m_hCurPageRW))
{
return false;
}
}
#endif // STRESS_MEMACCESSMGR
}
else
{ // No available committed memory. Release the cache.
m_hCurPageRW.Release();
}
}
#endif // FEATURE_RWX_MEMORY
m_pNextFree = pNextFree;
m_pFreeCommitEnd = pFreeCommitEnd;
m_pFreeReserveEnd = pFreeReserveEnd;
return true;
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_UpdateMemPtrs(uint8_t* pNextFree, uint8_t* pFreeCommitEnd)
{
return _UpdateMemPtrs(pNextFree, pFreeCommitEnd, m_pFreeReserveEnd);
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_UpdateMemPtrs(uint8_t* pNextFree)
{
return _UpdateMemPtrs(pNextFree, m_pFreeCommitEnd);
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_AllocNewBlock(uintptr_t cbMem)
{
cbMem = ALIGN_UP(max(cbMem, s_minBlockSize), OS_PAGE_SIZE);;
uint8_t * pbMem = reinterpret_cast<uint8_t*>
(PalVirtualAlloc(NULL, cbMem, MEM_COMMIT, m_roProtectType));
if (pbMem == NULL)
return false;
BlockListElem *pBlockListElem = new (nothrow) BlockListElem(pbMem, cbMem);
if (pBlockListElem == NULL)
{
PalVirtualFree(pbMem, 0, MEM_RELEASE);
return false;
}
// Add to the list. While there is no race for writers (we hold the lock) we have the
// possibility of simultaneous readers, and using the interlocked version creates a
// memory barrier to make sure any reader sees a consistent list.
m_blockList.PushHeadInterlocked(pBlockListElem);
return _UpdateMemPtrs(pbMem, pbMem + cbMem, pbMem + cbMem);
}
//-------------------------------------------------------------------------------------------------
uint8_t * AllocHeap::_AllocFromCurBlock(
uintptr_t cbMem,
uintptr_t alignment
WRITE_ACCESS_HOLDER_ARG)
{
uint8_t * pbMem = NULL;
cbMem += (uint8_t *)ALIGN_UP(m_pNextFree, alignment) - m_pNextFree;
if (m_pNextFree + cbMem <= m_pFreeCommitEnd ||
_CommitFromCurBlock(cbMem))
{
ASSERT(cbMem + m_pNextFree <= m_pFreeCommitEnd);
#ifdef FEATURE_RWX_MEMORY
if (pRWAccessHolder != NULL)
{
if (!_AcquireWriteAccess(m_pNextFree, cbMem, pRWAccessHolder))
return NULL;
}
#endif // FEATURE_RWX_MEMORY
pbMem = ALIGN_UP(m_pNextFree, alignment);
if (!_UpdateMemPtrs(m_pNextFree + cbMem))
return NULL;
}
return pbMem;
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_CommitFromCurBlock(uintptr_t cbMem)
{
ASSERT(m_pFreeCommitEnd < m_pNextFree + cbMem);
if (m_pNextFree + cbMem <= m_pFreeReserveEnd)
{
uintptr_t cbMemToCommit = ALIGN_UP(cbMem, OS_PAGE_SIZE);
#ifdef FEATURE_RWX_MEMORY
if (_UseAccessManager())
{
if (!m_pAccessMgr->ManageMemoryRange(MemRange(m_pFreeCommitEnd, cbMemToCommit), false))
return false;
}
else
{
uint32_t oldProtectType;
if (!PalVirtualProtect(m_pFreeCommitEnd, cbMemToCommit, m_roProtectType, &oldProtectType))
return false;
}
#endif // FEATURE_RWX_MEMORY
return _UpdateMemPtrs(m_pNextFree, m_pFreeCommitEnd + cbMemToCommit);
}
return false;
}
//-------------------------------------------------------------------------------------------------
void * __cdecl operator new(size_t n, AllocHeap * alloc)
{
return alloc->Alloc(n);
}
//-------------------------------------------------------------------------------------------------
void * __cdecl operator new[](size_t n, AllocHeap * alloc)
{
return alloc->Alloc(n);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "CommonTypes.h"
#include "CommonMacros.h"
#include "daccess.h"
#include "DebugMacrosExt.h"
#include "PalRedhawkCommon.h"
#include "PalRedhawk.h"
#include "rhassert.h"
#include "slist.h"
#include "holder.h"
#include "Crst.h"
#include "Range.h"
#ifdef FEATURE_RWX_MEMORY
#include "memaccessmgr.h"
#endif
#include "allocheap.h"
#include "CommonMacros.inl"
#include "slist.inl"
using namespace rh::util;
//-------------------------------------------------------------------------------------------------
AllocHeap::AllocHeap()
: m_blockList(),
m_rwProtectType(PAGE_READWRITE),
m_roProtectType(PAGE_READWRITE),
#ifdef FEATURE_RWX_MEMORY
m_pAccessMgr(NULL),
m_hCurPageRW(),
#endif // FEATURE_RWX_MEMORY
m_pNextFree(NULL),
m_pFreeCommitEnd(NULL),
m_pFreeReserveEnd(NULL),
m_pbInitialMem(NULL),
m_fShouldFreeInitialMem(false),
m_lock(CrstAllocHeap)
COMMA_INDEBUG(m_fIsInit(false))
{
ASSERT(!_UseAccessManager());
}
#ifdef FEATURE_RWX_MEMORY
//-------------------------------------------------------------------------------------------------
AllocHeap::AllocHeap(
uint32_t rwProtectType,
uint32_t roProtectType,
MemAccessMgr* pAccessMgr)
: m_blockList(),
m_rwProtectType(rwProtectType),
m_roProtectType(roProtectType == 0 ? rwProtectType : roProtectType),
m_pAccessMgr(pAccessMgr),
m_hCurPageRW(),
m_pNextFree(NULL),
m_pFreeCommitEnd(NULL),
m_pFreeReserveEnd(NULL),
m_pbInitialMem(NULL),
m_fShouldFreeInitialMem(false),
m_lock(CrstAllocHeap)
COMMA_INDEBUG(m_fIsInit(false))
{
ASSERT(!_UseAccessManager() || (m_rwProtectType != m_roProtectType && m_pAccessMgr != NULL));
}
#endif // FEATURE_RWX_MEMORY
//-------------------------------------------------------------------------------------------------
bool AllocHeap::Init()
{
ASSERT(!m_fIsInit);
INDEBUG(m_fIsInit = true;)
return true;
}
//-------------------------------------------------------------------------------------------------
// This is for using pre-allocated memory on heap construction.
// Should never use this more than once, and should always follow construction of heap.
bool AllocHeap::Init(
uint8_t * pbInitialMem,
uintptr_t cbInitialMemCommit,
uintptr_t cbInitialMemReserve,
bool fShouldFreeInitialMem)
{
ASSERT(!m_fIsInit);
#ifdef FEATURE_RWX_MEMORY
// Manage the committed portion of memory
if (_UseAccessManager())
{
m_pAccessMgr->ManageMemoryRange(MemRange(pbInitialMem, cbInitialMemCommit), true);
}
#endif // FEATURE_RWX_MEMORY
BlockListElem *pBlock = new (nothrow) BlockListElem(pbInitialMem, cbInitialMemReserve);
if (pBlock == NULL)
return false;
m_blockList.PushHead(pBlock);
if (!_UpdateMemPtrs(pbInitialMem,
pbInitialMem + cbInitialMemCommit,
pbInitialMem + cbInitialMemReserve))
{
return false;
}
m_pbInitialMem = pbInitialMem;
m_fShouldFreeInitialMem = fShouldFreeInitialMem;
INDEBUG(m_fIsInit = true;)
return true;
}
//-------------------------------------------------------------------------------------------------
AllocHeap::~AllocHeap()
{
while (!m_blockList.IsEmpty())
{
BlockListElem *pCur = m_blockList.PopHead();
if (pCur->GetStart() != m_pbInitialMem || m_fShouldFreeInitialMem)
PalVirtualFree(pCur->GetStart(), pCur->GetLength(), MEM_RELEASE);
delete pCur;
}
}
//-------------------------------------------------------------------------------------------------
uint8_t * AllocHeap::_Alloc(
uintptr_t cbMem,
uintptr_t alignment
WRITE_ACCESS_HOLDER_ARG
)
{
#ifndef FEATURE_RWX_MEMORY
const void* pRWAccessHolder = NULL;
#endif // FEATURE_RWX_MEMORY
ASSERT((alignment & (alignment - 1)) == 0); // Power of 2 only.
ASSERT(alignment <= OS_PAGE_SIZE); // Can't handle this right now.
ASSERT((m_rwProtectType == m_roProtectType) == (pRWAccessHolder == NULL));
ASSERT(!_UseAccessManager() || pRWAccessHolder != NULL);
if (_UseAccessManager() && pRWAccessHolder == NULL)
return NULL;
CrstHolder lock(&m_lock);
uint8_t * pbMem = _AllocFromCurBlock(cbMem, alignment PASS_WRITE_ACCESS_HOLDER_ARG);
if (pbMem != NULL)
return pbMem;
// Must allocate new block
if (!_AllocNewBlock(cbMem))
return NULL;
pbMem = _AllocFromCurBlock(cbMem, alignment PASS_WRITE_ACCESS_HOLDER_ARG);
ASSERT_MSG(pbMem != NULL, "AllocHeap::Alloc: failed to alloc mem after new block alloc");
return pbMem;
}
//-------------------------------------------------------------------------------------------------
uint8_t * AllocHeap::Alloc(
uintptr_t cbMem
WRITE_ACCESS_HOLDER_ARG)
{
return _Alloc(cbMem, 1 PASS_WRITE_ACCESS_HOLDER_ARG);
}
//-------------------------------------------------------------------------------------------------
uint8_t * AllocHeap::AllocAligned(
uintptr_t cbMem,
uintptr_t alignment
WRITE_ACCESS_HOLDER_ARG)
{
return _Alloc(cbMem, alignment PASS_WRITE_ACCESS_HOLDER_ARG);
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::Contains(void* pvMem, uintptr_t cbMem)
{
MemRange range(pvMem, cbMem);
for (BlockList::Iterator it = m_blockList.Begin(); it != m_blockList.End(); ++it)
{
if (it->Contains(range))
{
return true;
}
}
return false;
}
#ifdef FEATURE_RWX_MEMORY
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_AcquireWriteAccess(
uint8_t* pvMem,
uintptr_t cbMem,
WriteAccessHolder* pHolder)
{
ASSERT(!_UseAccessManager() || m_pAccessMgr != NULL);
if (_UseAccessManager())
return m_pAccessMgr->AcquireWriteAccess(MemRange(pvMem, cbMem), m_hCurPageRW, pHolder);
else
return true;
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::AcquireWriteAccess(
void* pvMem,
uintptr_t cbMem,
WriteAccessHolder* pHolder)
{
return _AcquireWriteAccess(static_cast<uint8_t*>(pvMem), cbMem, pHolder);
}
#endif // FEATURE_RWX_MEMORY
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_UpdateMemPtrs(uint8_t* pNextFree, uint8_t* pFreeCommitEnd, uint8_t* pFreeReserveEnd)
{
ASSERT(MemRange(pNextFree, pFreeReserveEnd).Contains(MemRange(pNextFree, pFreeCommitEnd)));
ASSERT(ALIGN_DOWN(pFreeCommitEnd, OS_PAGE_SIZE) == pFreeCommitEnd);
ASSERT(ALIGN_DOWN(pFreeReserveEnd, OS_PAGE_SIZE) == pFreeReserveEnd);
#ifdef FEATURE_RWX_MEMORY
// See if we need to update current allocation holder or protect committed pages.
if (_UseAccessManager())
{
if (pFreeCommitEnd - pNextFree > 0)
{
#ifndef STRESS_MEMACCESSMGR
// Create or update the alloc cache, used to speed up new allocations.
// If there is available commited memory and either m_pNextFree is
// being updated past a page boundary or the current cache is empty,
// then update the cache.
if (ALIGN_DOWN(m_pNextFree, OS_PAGE_SIZE) != ALIGN_DOWN(pNextFree, OS_PAGE_SIZE) ||
m_hCurPageRW.GetRange().GetLength() == 0)
{
// Update current alloc page write access holder.
if (!_AcquireWriteAccess(ALIGN_DOWN(pNextFree, OS_PAGE_SIZE),
OS_PAGE_SIZE,
&m_hCurPageRW))
{
return false;
}
}
#endif // STRESS_MEMACCESSMGR
}
else
{ // No available committed memory. Release the cache.
m_hCurPageRW.Release();
}
}
#endif // FEATURE_RWX_MEMORY
m_pNextFree = pNextFree;
m_pFreeCommitEnd = pFreeCommitEnd;
m_pFreeReserveEnd = pFreeReserveEnd;
return true;
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_UpdateMemPtrs(uint8_t* pNextFree, uint8_t* pFreeCommitEnd)
{
return _UpdateMemPtrs(pNextFree, pFreeCommitEnd, m_pFreeReserveEnd);
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_UpdateMemPtrs(uint8_t* pNextFree)
{
return _UpdateMemPtrs(pNextFree, m_pFreeCommitEnd);
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_AllocNewBlock(uintptr_t cbMem)
{
cbMem = ALIGN_UP(max(cbMem, s_minBlockSize), OS_PAGE_SIZE);;
uint8_t * pbMem = reinterpret_cast<uint8_t*>
(PalVirtualAlloc(NULL, cbMem, MEM_COMMIT, m_roProtectType));
if (pbMem == NULL)
return false;
BlockListElem *pBlockListElem = new (nothrow) BlockListElem(pbMem, cbMem);
if (pBlockListElem == NULL)
{
PalVirtualFree(pbMem, 0, MEM_RELEASE);
return false;
}
// Add to the list. While there is no race for writers (we hold the lock) we have the
// possibility of simultaneous readers, and using the interlocked version creates a
// memory barrier to make sure any reader sees a consistent list.
m_blockList.PushHeadInterlocked(pBlockListElem);
return _UpdateMemPtrs(pbMem, pbMem + cbMem, pbMem + cbMem);
}
//-------------------------------------------------------------------------------------------------
uint8_t * AllocHeap::_AllocFromCurBlock(
uintptr_t cbMem,
uintptr_t alignment
WRITE_ACCESS_HOLDER_ARG)
{
uint8_t * pbMem = NULL;
cbMem += (uint8_t *)ALIGN_UP(m_pNextFree, alignment) - m_pNextFree;
if (m_pNextFree + cbMem <= m_pFreeCommitEnd ||
_CommitFromCurBlock(cbMem))
{
ASSERT(cbMem + m_pNextFree <= m_pFreeCommitEnd);
#ifdef FEATURE_RWX_MEMORY
if (pRWAccessHolder != NULL)
{
if (!_AcquireWriteAccess(m_pNextFree, cbMem, pRWAccessHolder))
return NULL;
}
#endif // FEATURE_RWX_MEMORY
pbMem = ALIGN_UP(m_pNextFree, alignment);
if (!_UpdateMemPtrs(m_pNextFree + cbMem))
return NULL;
}
return pbMem;
}
//-------------------------------------------------------------------------------------------------
bool AllocHeap::_CommitFromCurBlock(uintptr_t cbMem)
{
ASSERT(m_pFreeCommitEnd < m_pNextFree + cbMem);
if (m_pNextFree + cbMem <= m_pFreeReserveEnd)
{
uintptr_t cbMemToCommit = ALIGN_UP(cbMem, OS_PAGE_SIZE);
#ifdef FEATURE_RWX_MEMORY
if (_UseAccessManager())
{
if (!m_pAccessMgr->ManageMemoryRange(MemRange(m_pFreeCommitEnd, cbMemToCommit), false))
return false;
}
else
{
uint32_t oldProtectType;
if (!PalVirtualProtect(m_pFreeCommitEnd, cbMemToCommit, m_roProtectType, &oldProtectType))
return false;
}
#endif // FEATURE_RWX_MEMORY
return _UpdateMemPtrs(m_pNextFree, m_pFreeCommitEnd + cbMemToCommit);
}
return false;
}
//-------------------------------------------------------------------------------------------------
void * __cdecl operator new(size_t n, AllocHeap * alloc)
{
return alloc->Alloc(n);
}
//-------------------------------------------------------------------------------------------------
void * __cdecl operator new[](size_t n, AllocHeap * alloc)
{
return alloc->Alloc(n);
}
| -1 |
Subsets and Splits